python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import torch
import meerkat as mk
from itertools import product
import numpy as np
from typing import List
import pandas as pd
PAD_TOKEN_ID = 103
def generate_candidate_descriptions(
templates: List[str],
device: int = 0,
k: int = 2,
bert_size: str = "base",
num_candidates: str = 30_000,
num_seed_words: int = 10_000,
score_with_gpt: bool = False,
) -> mk.DataPanel:
words_dp = _get_wiki_words(top_k=num_seed_words, eng_only=True)
from transformers import BertForMaskedLM, BertTokenizer
tokenizer = BertTokenizer.from_pretrained(f"bert-{bert_size}-uncased")
model = (
BertForMaskedLM.from_pretrained(f"bert-{bert_size}-uncased").to(device).eval()
)
@torch.no_grad()
def _forward_mlm(words):
input_phrases = [
template.format(word) for word in words for template in templates
]
inputs = tokenizer(input_phrases, return_tensors="pt", padding=True).to(device)
input_ids = inputs["input_ids"]
outputs = model(**inputs) # shape=(num_sents, num_tokens_in_sent, size_vocab)
probs = torch.softmax(outputs.logits, dim=-1).detach()
top_k_out = probs.topk(k=k, dim=-1)
output_phrases = []
output_probs = []
for sent_idx in range(probs.shape[0]):
mask_mask = input_ids[sent_idx] == PAD_TOKEN_ID
mask_range = torch.arange(mask_mask.sum())
token_ids = top_k_out.indices[sent_idx, mask_mask]
token_probs = top_k_out.values[sent_idx, mask_mask]
for local_idxs in product(np.arange(k), repeat=mask_mask.sum()):
output_ids = torch.clone(input_ids[sent_idx])
output_ids[mask_mask] = token_ids[mask_range, local_idxs]
output_phrases.append(
tokenizer.decode(output_ids, skip_special_tokens=True)
)
output_probs.append(
token_probs[mask_range, local_idxs].mean().cpu().numpy()
)
return {"prob": output_probs, "output_phrase": output_phrases}
candidate_phrases = words_dp["word"].map(
_forward_mlm, is_batched_fn=True, batch_size=16, pbar=True
)
candidate_phrases = (
candidate_phrases.to_pandas()
.dropna()
.sort_values("prob", ascending=False)[:num_candidates]
)
if score_with_gpt:
from transformers import GPT2LMHeadModel, GPT2Tokenizer
gpt_model = GPT2LMHeadModel.from_pretrained("gpt2").to(device)
gpt_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
@torch.no_grad()
def _forward_lm(phrase):
tokens_tensor = gpt_tokenizer.encode(
phrase, add_special_tokens=False, return_tensors="pt"
).to(device)
loss = gpt_model(tokens_tensor, labels=tokens_tensor)[0]
return {
"loss": np.exp(loss.cpu().detach().numpy()),
"output_phrase": phrase,
}
# unclear how to get loss for a batch of sentences
return mk.DataPanel.from_pandas(candidate_phrases)["output_phrase"].map(
_forward_lm, is_batched_fn=False, pbar=True
)
return mk.DataPanel.from_pandas(candidate_phrases)
def _get_wiki_words(top_k: int = 1e5, eng_only: bool = False):
df = pd.read_csv(
"https://raw.githubusercontent.com/IlyaSemenov/wikipedia-word-frequency/master/results/enwiki-2022-08-29.txt",
delimiter=" ",
names=["word", "frequency"],
)
if eng_only:
import nltk
from nltk.corpus import words
nltk.download("words")
eng_words = words.words()
eng_df = pd.DataFrame({"word": eng_words})
df = df.merge(eng_df, how="inner", on="word")
df = df.sort_values("frequency", ascending=False)
df = df.drop_duplicates(subset=["word"])
return mk.DataPanel.from_pandas(df.iloc[: int(top_k)])
|
domino-main
|
domino/_describe/generate.py
|
from typing import Union
import meerkat as mk
import numpy as np
from scipy.stats import mode
from domino.utils import unpack_args
def describe(
data: mk.DataPanel = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
slices: Union[str, np.ndarray] = "slices",
text: mk.DataPanel = None,
text_embeddings: Union[str, np.ndarray] = "embedding",
phrases: Union[str, np.ndarray] = "output_phrase",
slice_idx: int = 0,
slice_threshold: float = 0.5,
) -> mk.DataPanel:
"""Generate descriptions of a discovered slice.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a column in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
slices (str, optional): The name of The name of a column in ``data``
holding discovered slices. If ``data`` is ``None``, then an
np.ndarray of shape (num_examples, num_slices). Defaults to "slices".
text (str, optional): A `Meerkat DataPanel` with columns for text phrases and
their embeddings. The names of the columns can be specified with the
``text_embeddings`` and ``phrase`` arguments. Defaults to None.
text_embeddings (Union[str, np.ndarray], optional): The name of a colum in
``text`` holding embeddings. If ``text`` is ``None``, then an np.ndarray
of shape (n_phrases, dimension of embedding). Defaults to "embedding".
phrase (Union[str, np.ndarray], optional): The name of a column in ``text``
holding text phrases. If ``text`` is ``None``, then an np.ndarray of
shape (n_phrases,). Defaults to "output_phrase".
slice_idx (int, optional): The index of the slice to describe. Defaults to 0.
slice_threshold (float, optional): The probability threshold for inclusion in
the slice. Defaults to 0.5.
Returns:
mk.DataPanel: A `Meerkat DataPanel` with columns for the slice description.
Examples
--------
.. code-block:: python
:name: Example:
from domino import describe, generate_candidate_descriptions
templates = [
"a photo of [MASK].",
"a photo of {} [MASK].",
"a photo of [MASK] {}.",
"a photo of [MASK] {} [MASK].",
]
text_dp = generate_candidate_descriptions(templates=templates)
text_dp = embed(
text_dp,
input_col="output_phrase",
encoder="clip",
device=0
)
describe(
data=dp,
embeddings="clip(image)",
pred_probs="prob",
targets="target",
slices="domino_slices",
text=text_dp,
text_embeddings="clip(output_phrase)",
)
"""
embeddings, targets, slices = unpack_args(data, embeddings, targets, slices)
text_embeddings, phrases = unpack_args(text, text_embeddings, phrases)
slice_mask = slices[:, slice_idx] > slice_threshold
slice_proto = embeddings[slice_mask].mean(axis=0)
mode_target = mode(targets[slice_mask]).mode[0]
ref_proto = embeddings[targets == mode_target].mean(axis=0)
scores = np.dot(text_embeddings, (slice_proto - ref_proto))
return mk.DataPanel({"score": scores, "phrase": phrases})
|
domino-main
|
domino/_describe/__init__.py
|
from typing import Union
import meerkat as mk
import numpy as np
from scipy.stats import mode, pearsonr
from .abstract import Describer
from ..utils import unpack_args
class MeanDescriber(Describer):
"""
Args:
text (str, optional): A `Meerkat DataPanel` with columns for text phrases and
their embeddings. The names of the columns can be specified with the
``text_embeddings`` and ``phrase`` arguments. Defaults to None.
text_embeddings (Union[str, np.ndarray], optional): The name of a colum in
``text`` holding embeddings. If ``text`` is ``None``, then an np.ndarray
of shape (n_phrases, dimension of embedding). Defaults to "embedding".
phrase (Union[str, np.ndarray], optional): The name of a column in ``text``
holding text phrases. If ``text`` is ``None``, then an np.ndarray of
shape (n_phrases,). Defaults to "output_phrase".
slice_idx (int, optional): The index of the slice to describe. Defaults to 0.
slice_threshold (float, optional): The probability threshold for inclusion in
the slice. Defaults to 0.5.
"""
def __init__(
self,
data: mk.DataPanel = None,
embeddings: Union[str, np.ndarray] = "embedding",
candidates: Union[str, np.ndarray] = "candidates",
slice_threshold: float = 0.5,
n_descriptions: int = 10,
):
super().__init__()
embeddings, candidates = unpack_args(data, embeddings, candidates)
self.candidates = candidates
self.candidate_embeddings = embeddings
self.config.slice_threshold = slice_threshold
self.config.n_descriptions = n_descriptions
def describe(
self,
data: mk.DataPanel = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
slices: Union[str, np.ndarray] = "slices",
):
embeddings, targets, slices = unpack_args(data, embeddings, targets, slices)
result = []
for slice_idx in range(slices.shape[-1]):
slice_mask = slices[:, slice_idx] > self.config.slice_threshold
slice_proto = embeddings[slice_mask].mean(axis=0)
ref_proto = embeddings.mean(axis=0)
scores = np.dot(self.candidate_embeddings, (slice_proto - ref_proto))
idxs = np.argsort(-scores)[:self.config.n_descriptions]
selected_embeddings = self.candidate_embeddings[idxs]
selected_scores = np.dot(selected_embeddings, embeddings.T)
slice_scores = slices[:, slice_idx]
result.append(
[
{
"text": self.candidates[idx],
"score": scores[idx],
"corr": pearsonr(slice_scores, selected_scores[i])[0],
}
for i, idx in enumerate(idxs)
]
)
return result
class ClassifierMeanDescriber(Describer):
"""
Args:
text (str, optional): A `Meerkat DataPanel` with columns for text phrases and
their embeddings. The names of the columns can be specified with the
``text_embeddings`` and ``phrase`` arguments. Defaults to None.
text_embeddings (Union[str, np.ndarray], optional): The name of a colum in
``text`` holding embeddings. If ``text`` is ``None``, then an np.ndarray
of shape (n_phrases, dimension of embedding). Defaults to "embedding".
phrase (Union[str, np.ndarray], optional): The name of a column in ``text``
holding text phrases. If ``text`` is ``None``, then an np.ndarray of
shape (n_phrases,). Defaults to "output_phrase".
slice_idx (int, optional): The index of the slice to describe. Defaults to 0.
slice_threshold (float, optional): The probability threshold for inclusion in
the slice. Defaults to 0.5.
"""
def __init__(
self,
data: mk.DataPanel = None,
embeddings: Union[str, np.ndarray] = "embedding",
candidates: Union[str, np.ndarray] = "candidates",
slice_threshold: float = 0.5,
n_descriptions: int = 10,
):
super().__init__()
embeddings, candidates = unpack_args(data, embeddings, candidates)
self.candidates = candidates
self.candidate_embeddings = embeddings
self.config.slice_threshold = slice_threshold
self.config.n_descriptions = n_descriptions
def describe(
self,
data: mk.DataPanel = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
slices: Union[str, np.ndarray] = "slices",
):
embeddings, targets, slices = unpack_args(data, embeddings, targets, slices)
result = []
for slice_idx in range(slices.shape[-1]):
slice_mask = slices[:, slice_idx] > self.config.slice_threshold
slice_proto = embeddings[slice_mask].mean(axis=0)
mode_target = mode(targets[slice_mask]).mode[0]
ref_proto = embeddings[targets == mode_target].mean(axis=0)
scores = np.dot(self.candidate_embeddings, (slice_proto - ref_proto))
idxs = np.argsort(-scores)[:self.config.n_descriptions]
result.append(
[
{
"text": self.candidates[idx],
"score": scores[idx],
}
for idx in idxs
]
)
return result
|
domino-main
|
domino/_describe/mean.py
|
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Union
import meerkat as mk
import numpy as np
import torch.nn as nn
from sklearn.base import BaseEstimator
@dataclass
class Config:
pass
class Slicer(ABC, BaseEstimator):
def __init__(self, n_slices: int):
super().__init__()
self.config = Config()
self.config.n_slices = n_slices
@abstractmethod
def fit(
self,
model: nn.Module = None,
data_dp: mk.DataPanel = None,
) -> Slicer:
"""
Fit the slicer to data.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a column in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
losses (Union[str, np.ndarray], optional): The name of a column in ``data``
holding the loss of the model predictions. If ``data`` is ``None``,
then an np.ndarray of shape (n_samples,). Defaults to "loss".
Returns:
Slicer: Returns a fit instance of the slicer.
"""
raise NotImplementedError()
@abstractmethod
def predict(
self,
data: mk.DataPanel,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Get slice membership for data using the fit slicer.
.. caution::
Must call ``Slicer.fit`` prior to calling ``Slicer.predict``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
losses (Union[str, np.ndarray], optional): The name of a column in ``data``
holding the loss of the model predictions. If ``data`` is ``None``,
then an np.ndarray of shape (n_samples,). Defaults to "loss".
Returns:
np.ndarray: A binary ``np.ndarray`` of shape (n_samples, n_slices) where
values are either 1 or 0.
"""
raise NotImplementedError()
@abstractmethod
def predict_proba(
self,
data: mk.DataPanel,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Get probablisitic (**i.e.** soft) slice membership for data using the fit
slicer.
.. caution::
Must call ``Slicer.fit`` prior to calling ``Slicer.predict``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
losses (Union[str, np.ndarray], optional): The name of a column in ``data``
holding the loss of the model predictions. If ``data`` is ``None``,
then an np.ndarray of shape (n_samples,). Defaults to "loss".
Returns:
np.ndarray: A binary ``np.ndarray`` of shape (n_samples, n_slices) where
values are either 1 or 0.
"""
raise NotImplementedError()
def get_params(self) -> Dict[str, Any]:
"""
Get the parameters of this slicer. Returns a dictionary mapping from the names
of the parameters (as they are defined in the ``__init__``) to their values.
Returns:
Dict[str, Any]: A dictionary of parameters.
"""
return self.config.__dict__
def set_params(self, **params):
raise ValueError(
f"Slicer of type {self.__class__.__name__} does not support `set_params`."
)
def to(self, device: Union[str, int]):
if device != "cpu":
raise ValueError(f"Slicer of type {type(self)} does not support GPU.")
# by default this is a no-op, but subclasses can override
|
domino-main
|
domino/_slice/abstract.py
|
from typing import Union
import meerkat as mk
import numpy as np
import torch
import torch.optim as optim
from torch.nn.functional import cross_entropy
from tqdm import tqdm
from domino.utils import unpack_args
from .abstract import Slicer
class SpotlightSlicer(Slicer):
r"""
Slice a dataset with The Spotlight algorithm [deon_2022]_.
TODO: add docstring similar to the Domino one
.. [deon_2022]
d’Eon, G., d’Eon, J., Wright, J. R. & Leyton-Brown, K.
The Spotlight: A General Method for Discovering Systematic Errors in Deep
Learning Models. arXiv:2107. 00758 [cs, stat] (2021)
"""
def __init__(
self,
n_slices: int = 5,
spotlight_size: int = 0.02, # recommended from paper
n_steps: int = 1000,
learning_rate: float = 1e-3, # default from the implementation
device: torch.device = torch.device("cpu"),
pbar: bool = False,
):
super().__init__(n_slices=n_slices)
self.config.spotlight_size = spotlight_size
self.config.n_steps = n_steps
self.config.learning_rate = learning_rate
self.config.device = device
self.means = []
self.precisions = []
self.pbar = pbar
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
**kwargs
):
embeddings, targets, pred_probs, losses = unpack_args(
data, embeddings, targets, pred_probs, losses
)
losses = self._compute_losses(
targets=targets, pred_probs=pred_probs, losses=losses
)
embeddings = torch.tensor(embeddings).to(
dtype=torch.float, device=self.config.device
)
all_weights = []
weights_unnorm = None
min_weight = losses.shape[0] * self.config.spotlight_size
for slice_idx in tqdm(range(self.config.n_slices), disable=not self.pbar):
if slice_idx != 0:
weights_unnorm = weights_unnorm / max(weights_unnorm)
losses = losses * (1 - weights_unnorm)
(weights, weights_unnorm, mean, log_precision) = run_spotlight(
embeddings=embeddings,
losses=losses,
min_weight=min_weight,
barrier_x_schedule=np.geomspace( #
losses.shape[0] - min_weight,
0.05 * min_weight,
self.config.n_steps,
),
learning_rate=self.config.learning_rate,
device=self.config.device,
**kwargs
)
self.means.append(mean.detach())
self.precisions.append(log_precision.detach())
all_weights.append(weights)
return self
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
) -> np.ndarray:
embeddings, targets, pred_probs, losses = unpack_args(
data, embeddings, targets, pred_probs, losses
)
losses = self._compute_losses(
pred_probs=pred_probs, targets=targets, losses=losses
)
embeddings = torch.tensor(embeddings).to(
dtype=torch.float, device=self.config.device
)
all_weights = []
for slice_idx in range(self.config.n_slices):
weights, _, _, _ = md_adversary_weights(
mean=self.means[slice_idx],
precision=torch.exp(self.precisions[slice_idx])
* torch.eye(self.means[slice_idx].shape[0], device=self.config.device),
x=embeddings,
losses=losses,
)
all_weights.append(weights.cpu().numpy())
return np.stack(all_weights, axis=1)
def predict(
self,
data: mk.DataPanel,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
) -> np.ndarray:
probs = self.predict_proba(
data=data,
embeddings=embeddings,
targets=targets,
pred_probs=pred_probs,
losses=losses,
)
# TODO (Greg): check if this is the preferred way to get hard predictions from
# probabilities
return (probs > 0.5).astype(np.int32)
def _compute_losses(
self, targets: np.ndarray, pred_probs: np.ndarray, losses: np.ndarray
):
error_msg = "Must either provide `losses` or `pred_probs` and `targets`. "
if losses is None:
if (targets is None) or (pred_probs is None):
raise ValueError(error_msg)
pred_probs = (
torch.tensor(pred_probs).to(torch.float32).to(self.config.device)
)
targets = torch.tensor(targets).to(torch.long).to(self.config.device)
losses = cross_entropy(
pred_probs,
targets,
reduction="none",
)
else:
if targets is not None or pred_probs is not None:
raise ValueError(error_msg)
losses = torch.tensor(losses).to(torch.float32).to(self.config.device)
return losses
# Source below copied from spotlight implementation
# https://github.com/gregdeon/spotlight/blob/main/torch_spotlight/spotlight.py
def gaussian_probs(mean, precision, x):
# Similarity kernel: describe how similar each point in x is to mean as number in
# [0, 1]
# - mean: (dims) vector
# - precision: (dims, dims) precision matrix; must be PSD
# - x: (num_points, dims) set of points
dists = torch.sum(((x - mean) @ precision) * (x - mean), axis=1)
return torch.exp(-dists / 2)
def md_adversary_weights(mean, precision, x, losses, counts=None):
# Calculate normalized weights, average loss, and spotlight size for current mean
# and precision settings
# - mean, precision, x: as in gaussian_probs
# - losses: (num_points) vector of losses
# - counts: (num_points) vector of number of copies of each point to include.
# defaults to all-ones.
if counts is None:
counts = torch.ones_like(losses)
weights_unnorm = gaussian_probs(mean, precision, x)
total_weight = weights_unnorm @ counts
weights = weights_unnorm / total_weight
weighted_loss = (weights * counts) @ losses
return (weights, weights_unnorm, weighted_loss, total_weight)
def md_objective(
mean,
precision,
x,
losses,
min_weight,
barrier_x,
barrier_scale,
flip_objective=False,
counts=None,
labels=None,
label_coeff=0.0,
predictions=None,
prediction_coeff=0.0,
):
# main objective
weights, _, weighted_loss, total_weight = md_adversary_weights(
mean, precision, x, losses
)
if flip_objective:
weighted_loss = -weighted_loss
# barrier
if total_weight < (min_weight + barrier_x):
barrier_penalty = (
barrier_scale
* (total_weight - (min_weight + barrier_x)) ** 2
/ barrier_x**2
)
weighted_loss -= barrier_penalty
# regularization
if labels is not None:
categories = torch.arange(max(labels) + 1).reshape(-1, 1)
label_probs = (labels == categories).float() @ weights
label_entropy = torch.distributions.Categorical(
probs=label_probs
).entropy() / np.log(2)
weighted_loss -= label_coeff * label_entropy
if predictions is not None:
categories = torch.arange(max(predictions) + 1).reshape(-1, 1)
prediction_probs = (predictions == categories).float() @ weights
prediction_entropy = torch.distributions.Categorical(
probs=prediction_probs
).entropy() / np.log(2)
weighted_loss -= prediction_coeff * prediction_entropy
return (weighted_loss, total_weight)
class ResetOnPlateau(torch.optim.lr_scheduler.ReduceLROnPlateau):
def _reduce_lr(self, epoch):
super(ResetOnPlateau, self)._reduce_lr(epoch)
self._reset()
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group["lr"]
def run_spotlight(
embeddings,
losses,
min_weight,
barrier_x_schedule,
barrier_scale=1,
learning_rate=1e-3,
scheduler_patience=20,
scheduler_decay=0.5,
print_every=200,
device=0,
flip_objective=False,
labels=None,
counts=None,
label_coeff=0.0,
predictions=None,
prediction_coeff=0.0,
):
x = embeddings
y = losses
dimensions = x.shape[1]
mean = torch.zeros((dimensions,), requires_grad=True, device=device).to(torch.float)
log_precision = torch.tensor(np.log(0.0001), requires_grad=True, device=device)
optimizer = optim.Adam([mean, log_precision], lr=learning_rate)
scheduler = ResetOnPlateau(
optimizer, patience=scheduler_patience, factor=scheduler_decay
)
n_steps = len(barrier_x_schedule)
objective_history = []
total_weight_history = []
lr_history = []
print_every = min(print_every, n_steps)
for t in tqdm(range(n_steps), disable=True): # removed tqdm here
optimizer.zero_grad()
precision = torch.exp(log_precision)
precision_matrix = torch.eye(x.shape[1], device=device) * precision
objective, total_weight = md_objective(
mean,
precision_matrix,
x,
y,
min_weight,
barrier_x_schedule[t],
barrier_scale,
flip_objective,
counts,
labels,
label_coeff,
predictions,
prediction_coeff,
)
neg_objective = -objective
neg_objective.backward()
optimizer.step()
scheduler.step(neg_objective)
objective_history.append(objective.detach().item())
total_weight_history.append(total_weight.detach().item())
lr_history.append(get_lr(optimizer))
if (t + 1) % print_every == 0:
precision_matrix = torch.eye(
dimensions, device=precision.device
) * torch.exp(log_precision)
weights, weights_unnorm, weighted_loss, total_weight = md_adversary_weights(
mean, precision_matrix, x, y
)
final_weights = weights.detach()
final_weights_unnorm = weights_unnorm.detach()
return (
final_weights,
final_weights_unnorm,
mean,
log_precision,
)
|
domino-main
|
domino/_slice/spotlight.py
|
from __future__ import annotations
import warnings
from functools import wraps
from typing import Union
import meerkat as mk
import numpy as np
import sklearn.cluster as cluster
from scipy import linalg
from scipy.special import logsumexp
from sklearn.decomposition import PCA
from sklearn.exceptions import ConvergenceWarning
from sklearn.mixture import GaussianMixture
from sklearn.mixture._base import _check_X, check_random_state
from sklearn.mixture._gaussian_mixture import (
_compute_precision_cholesky,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_spherical,
_estimate_gaussian_covariances_tied,
)
from sklearn.preprocessing import label_binarize
from sklearn.utils.validation import check_is_fitted
from tqdm.auto import tqdm
from domino.utils import convert_to_numpy, unpack_args
from .abstract import Slicer
class MixtureSlicer(Slicer):
r"""
Slice Discovery based on the Domino Mixture Model.
Discover slices by jointly modeling a mixture of input embeddings (e.g. activations
from a trained model), class labels, and model predictions. This encourages slices
that are homogeneous with respect to error type (e.g. all false positives).
Examples
--------
Suppose you've trained a model and stored its predictions on a dataset in
a `Meerkat DataPanel <https://github.com/robustness-gym/meerkat>`_ with columns
"emb", "target", and "pred_probs". After loading the DataPanel, you can discover
underperforming slices of the validation dataset with the following:
.. code-block:: python
from domino import MixtureSlicer
dp = ... # Load dataset into a Meerkat DataPanel
# split dataset
valid_dp = dp.lz[dp["split"] == "valid"]
test_dp = dp.lz[dp["split"] == "test"]
domino = MixtureSlicer()
domino.fit(
data=valid_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
dp["domino_slices"] = domino.predict(
data=test_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
Args:
n_slices (int, optional): The number of slices to discover.
Defaults to 5.
covariance_type (str, optional): The type of covariance parameter
:math:`\mathbf{\Sigma}` to use. Same as in sklearn.mixture.GaussianMixture.
Defaults to "diag", which is recommended.
n_pca_components (Union[int, None], optional): The number of PCA components
to use. If ``None``, then no PCA is performed. Defaults to 128.
n_mixture_components (int, optional): The number of clusters in the mixture
model, :math:`\bar{k}`. This differs from ``n_slices`` in that the
``MixtureSlicer`` only returns the top ``n_slices`` with the highest error rate
of the ``n_mixture_components``. Defaults to 25.
y_log_likelihood_weight (float, optional): The weight :math:`\gamma` applied to
the :math:`P(Y=y_{i} | S=s)` term in the log likelihood during the E-step.
Defaults to 1.
y_hat_log_likelihood_weight (float, optional): The weight :math:`\hat{\gamma}`
applied to the :math:`P(\hat{Y} = h_\theta(x_i) | S=s)` term in the log
likelihood during the E-step. Defaults to 1.
max_iter (int, optional): The maximum number of iterations to run. Defaults
to 100.
init_params (str, optional): The initialization method to use. Options are
the same as in sklearn.mixture.GaussianMixture plus one addition,
"confusion". If "confusion", the clusters are initialized such that almost
all of the examples in a cluster come from same cell in the confusion
matrix. See Notes below for more details. Defaults to "confusion".
confusion_noise (float, optional): Only used if ``init_params="confusion"``.
The scale of noise added to the confusion matrix initialization. See notes
below for more details.
Defaults to 0.001.
random_state (Union[int, None], optional): The random seed to use when
initializing the parameters.
Notes
-----
The mixture model is an extension of a standard Gaussian Mixture Model. The model is
based on the assumption that data is generated according to the following generative
process.
* Each example belongs to one of :math:`\bar{k}` slices. This slice
:math:`S` is sampled from a categorical
distribution :math:`S \sim Cat(\mathbf{p}_S)` with parameter :math:`\mathbf{p}_S
\in\{\mathbf{p} \in \mathbb{R}_+^{\bar{k}} : \sum_{i = 1}^{\bar{k}} p_i = 1\}`
(see ``MixtureSlicer.mm.weights_``).
* Given the slice :math:`S'`, the embeddings are normally distributed
:math:`Z | S \sim \mathcal{N}(\mathbf{\mu}, \mathbf{\Sigma}`) with parameters
mean :math:`\mathbf{\mu} \in \mathbb{R}^d` (see ``MixtureSlicer.mm.means_``) and
:math:`\mathbf{\Sigma} \in \mathbb{S}^{d}_{++}`
(see ``MixtureSlicer.mm.covariances_``;
normally this parameter is constrained to the set of symmetric positive definite
:math:`d \\times d` matrices, however the argument ``covariance_type`` allows for
other constraints).
* Given the slice, the labels vary as a categorical
:math:`Y |S \sim Cat(\mathbf{p})` with parameter :math:`\mathbf{p}
\in \{\mathbf{p} \in \mathbb{R}^c_+ : \sum_{i = 1}^c p_i = 1\}` (see
``MixtureSlicer.mm.y_probs``).
* Given the slice, the model predictions also vary as a categorical
:math:`\hat{Y} | S \sim Cat(\mathbf{\hat{p}})` with parameter
:math:`\mathbf{\hat{p}} \in \{\mathbf{\hat{p}} \in \mathbb{R}^c_+ :
\sum_{i = 1}^c \hat{p}_i = 1\}` (see ``MixtureSlicer.mm.y_hat_probs``).
The mixture model is, thus, parameterized by :math:`\phi = [\mathbf{p}_S, \mu,
\Sigma, \mathbf{p}, \mathbf{\hat{p}}]` corresponding to the attributes
``weights_, means_, covariances_, y_probs, y_hat_probs`` respectively. The
log-likelihood over the :math:`n` examples in the validation dataset :math:`D_v` is
given as followsand maximized using expectation-maximization:
.. math::
\ell(\phi) = \sum_{i=1}^n \log \sum_{s=1}^{\hat{k}} P(S=s)P(Z=z_i| S=s)
P( Y=y_i| S=s)P(\hat{Y} = h_\theta(x_i) | S=s)
We include two optional hyperparameters
:math:`\gamma, \hat{\gamma} \in \mathbb{R}_+`
(see ``y_log_liklihood_weight`` and ``y_hat_log_likelihood_weight`` below) that
balance the importance of modeling the class labels and predictions against the
importance of modeling the embedding. The modified log-likelihood over :math:`n`
examples is given as follows:
.. math::
\ell(\phi) = \sum_{i=1}^n \log \sum_{s=1}^{\hat{k}} P(S=s)P(Z=z_i| S=s)
P( Y=y_i| S=s)^\gamma P(\hat{Y} = h_\theta(x_i) | S=s)^{\hat{\gamma}}
.. attention::
Although we model the prediction :math:`\hat{Y}` as a categorical random
variable, in practice predictions are sometimes "soft" (e.g. the output
of a softmax layer is a probability distribution over labels, not a single
label). In these cases, the prediction :math:`\hat{Y}` is technically a
dirichlet random variable (i.e. a distribution over distributions).
However, to keep the implementation simple while still leveraging the extra
information provided by "soft" predictions, we naïvely plug the "soft"
predictions directly into the categorical PMF in the E-step and the update in
the M-step. Specifically, during the E-step, instead of computing the
categorical PMF :math:`P(\hat{Y}=\hat{y_i} | S=s)` we compute
:math:`\sum_{j=1}^c \hat{y_i}(j) P(\hat{Y}=j | S=s)` where :math:`\hat{y_i}(j)`
is the "soft" prediction for class :math:`j` (we can
think of this like we're marginalizing out the uncertainty in the prediction).
During the M-step, we compute a "soft" update for the categorical parameters
:math:`p_j^{(s)} = \sum_{i=1}^n Q(s,i) \hat{y_i}(j)` where :math:`Q(s,i)`
is the "responsibility" of slice :math:`s` towards the data point :math:`i`.
When using ``"confusion"`` initialization, each slice $s^{(j)}$ is assigned a
:math:`y^{(j)}\in \mathcal{Y}` and :math:`\hat{y}^{(j)} \in \mathcal{Y}` (*i.e.*
each slice is assigned a cell in the confusion matrix). This is typically done in a
round-robin fashion so that there are at least
:math:`\floor{\hat{k} / {|\mathcal{Y}|^2}}`
slices assigned to each cell in the confusion matrix. Then, we fill in the initial
responsibility matrix :math:`Q \in \mathbb{R}^{n \times \hat{k}}`, where each cell
:math:`Q_{ij}` corresponds to our model's initial estimate of
:math:`P(S=s^{(j)}|Y=y_i,
\hat{Y}=\hat{y}_i)`. We do this according to
.. math::
\bar{Q}_{ij} \leftarrow
\begin{cases}
1 + \epsilon & y_i=y^{(j)} \land \hat{y}_i = \hat{y}^{(j)} \\
\epsilon & \text{otherwise}
\end{cases}
.. math::
Q_{ij} \leftarrow \frac{\bar{Q}_{ij} } {\sum_{l=1}^{\hat{k}} \bar{Q}_{il}}
where :math:`\epsilon` is random noise which ensures that slices assigned to the
same confusion matrix cell won't have the exact same initialization. We sample
:math:`\epsilon` uniformly from the range ``(0, confusion_noise]``.
"""
def __init__(
self,
n_slices: int = 5,
covariance_type: str = "diag",
n_pca_components: Union[int, None] = 128,
n_mixture_components: int = 25,
y_log_likelihood_weight: float = 1,
y_hat_log_likelihood_weight: float = 1,
max_iter: int = 100,
init_params: str = "confusion",
confusion_noise: float = 1e-3,
random_state: int = None,
pbar: bool = True,
):
super().__init__(n_slices=n_slices)
self.config.covariance_type = covariance_type
self.config.n_pca_components = n_pca_components
self.config.n_mixture_components = n_mixture_components
self.config.init_params = init_params
self.config.confusion_noise = confusion_noise
self.config.y_log_likelihood_weight = y_log_likelihood_weight
self.config.y_hat_log_likelihood_weight = y_hat_log_likelihood_weight
self.config.max_iter = max_iter
if self.config.n_pca_components is None:
self.pca = None
else:
self.pca = PCA(n_components=self.config.n_pca_components)
self.mm = DominoMixture(
n_components=self.config.n_mixture_components,
y_log_likelihood_weight=self.config.y_log_likelihood_weight,
y_hat_log_likelihood_weight=self.config.y_hat_log_likelihood_weight,
covariance_type=self.config.covariance_type,
init_params=self.config.init_params,
max_iter=self.config.max_iter,
confusion_noise=self.config.confusion_noise,
random_state=random_state,
pbar=pbar,
)
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
losses: Union[str, np.ndarray] = None,
) -> MixtureSlicer:
"""
Fit the mixture model to data.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
MixtureSlicer: Returns a fit instance of MixtureSlicer.
"""
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
embeddings, targets, pred_probs = convert_to_numpy(
embeddings, targets, pred_probs
)
if self.pca is not None:
self.pca.fit(X=embeddings)
embeddings = self.pca.transform(X=embeddings)
self.mm.fit(X=embeddings, y=targets, y_hat=pred_probs)
self.slice_cluster_indices = (
-np.abs((self.mm.y_hat_probs - self.mm.y_probs).max(axis=1))
).argsort()[: self.config.n_slices]
return self
def predict(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
losses: Union[str, np.ndarray] = "losses",
) -> np.ndarray:
"""
Get probabilistic slice membership for data using a fit mixture model.
.. caution::
Must call ``MixtureSlicer.fit`` prior to calling ``MixtureSlicer.predict``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
losses (Union[str, np.ndarray], optional): Ignored.
Returns:
np.ndarray: A binary ``np.ndarray`` of shape (n_samples, n_slices) where
values are either 1 or 0.
"""
probs = self.predict_proba(
data=data,
embeddings=embeddings,
targets=targets,
pred_probs=pred_probs,
)
preds = np.zeros_like(probs, dtype=np.int32)
preds[np.arange(preds.shape[0]), probs.argmax(axis=-1)] = 1
return preds
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
losses: Union[str, np.ndarray] = "loss"
) -> np.ndarray:
"""
Get probabilistic slice membership for data using a fit mixture model.
.. caution::
Must call ``MixtureSlicer.fit`` prior to calling
``MixtureSlicer.predict_proba``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
losses (Union[str, np.ndarray], optional): Ignored.
Returns:
np.ndarray: A ``np.ndarray`` of shape (n_samples, n_slices) where values in
are in range [0,1] and rows sum to 1.
"""
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
embeddings, targets, pred_probs = convert_to_numpy(
embeddings, targets, pred_probs
)
if self.pca is not None:
embeddings = self.pca.transform(X=embeddings)
clusters = self.mm.predict_proba(embeddings, y=targets, y_hat=pred_probs)
return clusters[:, self.slice_cluster_indices]
class DominoMixture(GaussianMixture):
@wraps(GaussianMixture.__init__)
def __init__(
self,
*args,
y_log_likelihood_weight: float = 1,
y_hat_log_likelihood_weight: float = 1,
confusion_noise: float = 1e-3,
pbar: bool = True,
**kwargs,
):
self.y_log_likelihood_weight = y_log_likelihood_weight
self.y_hat_log_likelihood_weight = y_hat_log_likelihood_weight
self.confusion_noise = confusion_noise
self.pbar = pbar
super().__init__(*args, **kwargs)
def _initialize_parameters(self, X, y, y_hat, random_state):
"""Initialize the model parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
random_state : RandomState
A random number generator instance that controls the random seed
used for the method chosen to initialize the parameters.
"""
n_samples, _ = X.shape
if self.init_params == "kmeans":
resp = np.zeros((n_samples, self.n_components))
label = (
cluster.KMeans(
n_clusters=self.n_components, n_init=1, random_state=random_state
)
.fit(X)
.labels_
)
resp[np.arange(n_samples), label] = 1
elif self.init_params == "random":
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
elif self.init_params == "confusion":
num_classes = y.shape[-1]
if self.n_components < num_classes**2:
raise ValueError(
"Can't use 'init_params=\"confusion\"' when "
"`n_components` < `num_classes **2`"
)
resp = np.matmul(y[:, :, np.newaxis], y_hat[:, np.newaxis, :]).reshape(
len(y), -1
)
resp = np.concatenate(
[resp]
* (
int(self.n_components / (num_classes**2))
+ (self.n_components % (num_classes**2) > 0)
),
axis=1,
)[:, : self.n_components]
resp /= resp.sum(axis=1)[:, np.newaxis]
resp += (
random_state.rand(n_samples, self.n_components) * self.confusion_noise
)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError(
"Unimplemented initialization method '%s'" % self.init_params
)
self._initialize(X, y, y_hat, resp)
def _initialize(self, X, y, y_hat, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances, y_probs, y_hat_probs = _estimate_parameters(
X, y, y_hat, resp, self.reg_covar, self.covariance_type
)
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
self.y_probs, self.y_hat_probs = y_probs, y_hat_probs
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type
)
elif self.covariance_type == "full":
self.precisions_cholesky_ = np.array(
[
linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init
]
)
elif self.covariance_type == "tied":
self.precisions_cholesky_ = linalg.cholesky(
self.precisions_init, lower=True
)
else:
self.precisions_cholesky_ = self.precisions_init
def fit(self, X, y, y_hat):
self.fit_predict(X, y, y_hat)
return self
def _preprocess_ys(self, y: np.ndarray = None, y_hat: np.ndarray = None):
if y is not None:
# we want to support continuous binary labels as well
if y.dtype == np.dtype(int):
y = label_binarize(y, classes=np.arange(np.max(y) + 1))
if y.ndim == 1:
y = y[:, np.newaxis]
if y.shape[-1] == 1:
# binary targets transform to a column vector with label_binarize
y = np.array([1 - y[:, 0], y[:, 0]]).T
if y_hat is not None:
if len(y_hat.shape) == 1:
y_hat = np.array([1 - y_hat, y_hat]).T
return y, y_hat
def fit_predict(self, X, y, y_hat):
y, y_hat = self._preprocess_ys(y, y_hat)
X = _check_X(X, self.n_components, ensure_min_samples=2)
self._check_n_features(X, reset=True)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not (self.warm_start and hasattr(self, "converged_"))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.infty
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
best_params = None
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, y, y_hat, random_state)
lower_bound = -np.infty if do_init else self.lower_bound_
for n_iter in tqdm(
range(1, self.max_iter + 1), colour="#f17a4a", disable=not self.pbar
):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X, y, y_hat)
if np.isnan(log_resp).any():
import pdb; pdb.set_trace()
self._m_step(X, y, y_hat, log_resp)
lower_bound = self._compute_lower_bound(log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(lower_bound)
if lower_bound > max_lower_bound:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn(
"Initialization %d did not converge. "
"Try different init parameters, "
"or increase max_iter, tol "
"or check for degenerate data." % (init + 1),
ConvergenceWarning,
)
if best_params is None:
self._initialize_parameters(X, y, y_hat, random_state)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X, y, y_hat)
return log_resp.argmax(axis=1)
def predict_proba(
self, X: np.ndarray, y: np.ndarray = None, y_hat: np.ndarray = None
):
y, y_hat = self._preprocess_ys(y, y_hat)
check_is_fitted(self)
X = _check_X(X, None, self.means_.shape[1])
_, log_resp = self._estimate_log_prob_resp(X, y, y_hat)
return np.exp(log_resp)
def _m_step(self, X, y, y_hat, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
resp = np.exp(log_resp)
n_samples, _ = X.shape
(
self.weights_,
self.means_,
self.covariances_,
self.y_probs,
self.y_hat_probs,
) = _estimate_parameters(
X, y, y_hat, resp, self.reg_covar, self.covariance_type
)
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type
)
def _e_step(self, X, y, y_hat):
"""E step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X, y, y_hat)
return np.mean(log_prob_norm), log_resp
def _estimate_log_prob_resp(self, X, y=None, y_hat=None):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X, y, y_hat)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _estimate_weighted_log_prob(self, X, y=None, y_hat=None):
log_prob = self._estimate_log_prob(X) + self._estimate_log_weights()
if y is not None:
log_prob += self._estimate_y_log_prob(y) * self.y_log_likelihood_weight
if y_hat is not None:
log_prob += (
self._estimate_y_hat_log_prob(y_hat) * self.y_hat_log_likelihood_weight
)
return log_prob
def _get_parameters(self):
return (
self.weights_,
self.means_,
self.covariances_,
self.y_probs,
self.y_hat_probs,
self.precisions_cholesky_,
)
def _set_parameters(self, params):
(
self.weights_,
self.means_,
self.covariances_,
self.y_probs,
self.y_hat_probs,
self.precisions_cholesky_,
) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == "full":
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == "tied":
self.precisions_ = np.dot(
self.precisions_cholesky_, self.precisions_cholesky_.T
)
else:
self.precisions_ = self.precisions_cholesky_**2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
return super()._n_parameters() + 2 * self.n_components
def _estimate_y_log_prob(self, y):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
y: array-like of shape (n_samples, n_classes)
y_hat: array-like of shpae (n_samples, n_classes)
"""
# add epsilon to avoid "RuntimeWarning: divide by zero encountered in log"
return np.log(np.dot(y, self.y_probs.T) + np.finfo(self.y_probs.dtype).eps)
def _estimate_y_hat_log_prob(self, y_hat):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
y: array-like of shape (n_samples, n_classes)
y_hat: array-like of shpae (n_samples, n_classes)
"""
# add epsilon to avoid "RuntimeWarning: divide by zero encountered in log"
if (np.dot(y_hat, self.y_hat_probs.T) + np.finfo(self.y_hat_probs.dtype).eps < 0).any():
import pdb; pdb.set_trace()
return np.log(
np.dot(y_hat, self.y_hat_probs.T) + np.finfo(self.y_hat_probs.dtype).eps
)
def _estimate_parameters(X, y, y_hat, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
y: array-like of shape (n_samples, n_classes)
y_hat: array-like of shpae (n_samples, n_classes)
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps # (n_components, )
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {
"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical,
}[covariance_type](resp, X, nk, means, reg_covar)
y_probs = np.dot(resp.T, y) / nk[:, np.newaxis] # (n_components, n_classes)
y_hat_probs = np.dot(resp.T, y_hat) / nk[:, np.newaxis] # (n_components, n_classes)
return nk, means, covariances, y_probs, y_hat_probs
DominoSlicer = MixtureSlicer
|
domino-main
|
domino/_slice/mixture.py
|
from __future__ import annotations
from typing import Union
from domino._slice.abstract import Slicer
from torch import nn
from torch.nn import functional as F
from torch.nn.functional import cross_entropy
import torch
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import numpy as np
import meerkat as mk
from ..utils import convert_to_torch, unpack_args, convert_to_numpy
class FusedSlicer(Slicer, nn.Module):
def __init__(
self,
n_slices: int = 5,
candidate_text: mk.DataPanel = None,
text_column: Union[str, np.np.ndarray] = "text",
text_embedding_column: Union[str, np.np.ndarray] = "embedding",
device: Union[int, str] = "cpu",
):
super().__init__(n_slices=n_slices)
self.candidate_text, self.candidate_text_embeddings = unpack_args(
candidate_text, text_column, text_embedding_column
)
(self.candidate_text_embeddings,) = convert_to_torch(
self.candidate_text_embeddings
)
self.candidate_text_embeddings = self.candidate_text_embeddings
self.device = device
self.text_idxs = None
self.text_embeddings = None
self.text = None
def _prepare_embs(self, *args):
return [inp.to(device=self.device, dtype=torch.float) for inp in args]
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
) -> FusedSlicer:
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
(embeddings,) = convert_to_torch(embeddings)
targets, pred_probs = convert_to_numpy(targets, pred_probs)
embeddings, candidate_text_embeddings = self._prepare_embs(
embeddings, self.candidate_text_embeddings
)
with torch.no_grad():
slice_scores = torch.matmul(embeddings, candidate_text_embeddings.T)
slice_scores = slice_scores.cpu().numpy()
l = targets - pred_probs
#slice_scores = MinMaxScaler().fit_transform(slice_scores)
lr = Ridge(normalize=True).fit(slice_scores, l) # Change this back!!!!
coef = lr.coef_.squeeze()
self.text_idxs = np.concatenate(
[
#np.argsort(coef)[: self.config.n_slices],
np.argsort(-np.abs(coef))[: self.config.n_slices]
]
)
self.text_embeddings = candidate_text_embeddings[self.text_idxs]
self.text = self.candidate_text[self.text_idxs]
self.text_coefs = coef[self.text_idxs]
return slice_scores
def predict(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
):
return (
self.predict(data, embeddings, targets, pred_probs, losses) > 0.5
).astype(int)
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
):
if self.text_embeddings is None:
raise ValueError("Must call `fit` before `predict`.")
(embeddings,) = unpack_args(data, embeddings)
(embeddings,) = convert_to_torch(embeddings)
(embeddings,) = self._prepare_embs(embeddings)
slice_scores = torch.matmul(embeddings, self.text_embeddings.T)
return slice_scores.cpu().numpy()
def describe(
self,
text_data: Union[dict, mk.DataPanel] = None,
text_embeddings: Union[str, np.ndarray] = "embedding",
text_descriptions: Union[str, np.ndarray] = "description",
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
num_descriptions: int = 3,
):
output = []
for pred_slice_idx in range(self.config.n_slices):
output.append(
{
"pred_slice_idx": pred_slice_idx,
"scores": [1],
"phrases": [self.text[pred_slice_idx]],
}
)
return output
def to(self, *args, **kwargs):
"""Intercept to on a device and set the self.device."""
if isinstance(args[0], (int, str, torch.device)):
self.device = args[0]
return super().to(*args, **kwargs)
|
domino-main
|
domino/_slice/fused.py
|
domino-main
|
domino/_slice/__init__.py
|
|
from typing import Union
import meerkat as mk
import numpy as np
import torch
import torch.optim as optim
from torch.nn.functional import cross_entropy
from tqdm import tqdm
from domino.utils import unpack_args
from abstract import Slicer
## PlaneSpot imports
from sklearn import mixture
import glob
from collections import defaultdict
from domino.utils import convert_to_numpy, unpack_args
import pandas as pd
class PlaneSpotSlicer(Slicer):
r"""
Implements PlaneSpot [plumb_2023], a simple SDM that fits a GMM to a 2D model
embedding, fit using scvis [ding_2018].
.. [plumb_2023]
Gregory Plumb*, Nari Johnson*, Ángel Alexander Cabrera, Ameet Talwalkar.
Towards a More Rigorous Science of Blindspot Discovery in Image
Classification Models. arXiv:2207.04104 [cs] (2023)
.. [ding_2018]
Jiarui Ding, Anne Condon, and Sohrab P Shah.
Interpretable dimensionality reduction of single cell transcriptome
data with deep generative models.
Nature communications, 9(1):1–13. (2018)
PREREQUISITES: Assumes that scvis is installed in the conda environment
at scvis_conda_env, using the instructions here:
https://github.com/shahcompbio/scvis
"""
def __init__(
self,
scvis_conda_env: str, # name of conda environment where scvis is installed
n_slices: int = 10,
n_max_mixture_components: int = 33, # maximum number of mixture components
weight: float = 0.025, # weight hyperparameter
scvis_config_path = None, # custom scvis config path
scvis_output_dir = 'scvis', # path to output directory for scvis
fit_scvis = True # flag to load rather than re-compute the scvis embedding
):
super().__init__(n_slices=n_slices)
# scvis hyper-parameters
self.scvis_conda_env = scvis_conda_env
self.config.scvis_config_path = scvis_config_path
self.config.scvis_output_dir = scvis_output_dir
self.fit_scvis = fit_scvis
# GMM hyper-parameters
self.config.n_max_mixture_components = n_max_mixture_components
self.config.weight = weight
self.gmm = None
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
verbose: bool = True,
random_state: int = 0, # random state for GMM
**kwargs
):
embeddings, targets, pred_probs, losses = unpack_args(
data, embeddings, targets, pred_probs, losses
)
embeddings, targets, pred_probs = convert_to_numpy(
embeddings, targets, pred_probs
)
# 1. Fit scvis.
if verbose:
print('Fitting scvis...')
scvis_embeddings = self._fit_scvis(embeddings.reshape(embeddings.shape[0], embeddings.shape[1]))
# 2. Fit GMM.
if verbose:
print('Fitting GMM...')
self._fit_gmm(scvis_embeddings,
pred_probs,
random_state,
verbose,)
def predict_proba(
self,
data: mk.DataPanel,
scvis_embeddings: str, # scvis column name
pred_probs: str, # predicted probabilities column name
) -> np.ndarray:
''' Returns the probability that each datapoint belongs to the
top self.n_slices slices.
Note that the probabilities may not sum to 1.
'''
# Append the scvis embedding and predicted probabilities; normalize
X = self._combine_embedding(data[scvis_embeddings], data[pred_probs])
probs_all_components = self.gmm.predict_proba(X)
probs_slices = probs_all_components[:, self.slice_indices]
return probs_slices
def predict(
self,
data: mk.DataPanel,
scvis_embeddings: str, # scvis column name
pred_probs: str, # predicted probabilities column name
) -> np.ndarray:
''' Assigns (or does not assign) each datapoint in data to a slice.
Datapoints that are not assigned to a slice have a returned label
of np.nan.
'''
# Append the scvis embedding and predicted probabilities; normalize
X = self._combine_embedding(data[scvis_embeddings], data[pred_probs])
hard_predictions = self.gmm.predict(X)
# Re-assign their indices
return np.array([self._gmm_label_to_slice_label(l) for l in hard_predictions])
def _fit_scvis(
self, embeddings: np.ndarray
):
''' Fits an scvis model to the input embedding(s).
'''
if self.fit_scvis:
### Fit scvis
# Make output directory
os.system(f'rm -rf {self.config.scvis_output_dir}')
os.system(f'mkdir {self.config.scvis_output_dir}')
# Dump the embeddings as a CSV file
embedding_filepath = f'{self.config.scvis_output_dir}/tmp.tsv'
embedding_df = pd.DataFrame(embeddings)
embedding_df.to_csv(embedding_filepath, sep = '\t', index = False)
# Run scvis using the command line
# source: https://github.com/shahcompbio/scvis
command = f'conda run -n {self.scvis_conda_env} scvis train --data_matrix_file {embedding_filepath} --out_dir {self.config.scvis_output_dir}'
if self.config.scvis_config_path is not None:
print(self.config.scvis_config_path)
# Add optional scvis config
command += f' --config_file {self.config.scvis_config_path}'
# Run the command (blocking)
print(command)
os.system(command)
print('done')
# Cleanup
os.system('rm -rf {}'.format(embedding_filepath))
### Load and return the scvis embeddings
return self._load_scvis_embeddings()
def _fit_gmm(
self,
reduced_embeddings: np.ndarray,
pred_probs: np.ndarray,
random_state: int, # random state for sklearn
verbose: bool = False,
):
''' Fits an error-aware Gaussian Mixture model to the scvis embeddings
and model predictions.
'''
# Store the min and max column values to normalize in the future.
self.min_scvis_vals = np.min(reduced_embeddings, axis = 0)
self.max_scvis_vals = np.max(reduced_embeddings, axis = 0)
X = self._combine_embedding(reduced_embeddings, pred_probs)
lowest_bic = np.infty
bic = []
n_components_range = range(self.config.n_slices, self.config.n_max_mixture_components)
for n_components in n_components_range:
# Fit a GMM with n_components components
gmm = mixture.GaussianMixture(n_components = n_components,
covariance_type = 'full',
random_state = random_state)
gmm.fit(X)
# Calculate the Bayesian Information Criteria
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
self.gmm = best_gmm
# Assign a score to each mixture component to find the top-k slices
# Create the map from "group" to "set of points" (recorded as indices)
hard_preds = self.gmm.predict(X)
cluster_map = defaultdict(list)
for i, v in enumerate(hard_preds):
cluster_map[v].append(i)
if verbose:
print(f'The best GMM has {len(cluster_map)} components.')
# Score each of those groups
scores = []
errors = (1. - pred_probs)
for i in cluster_map:
indices = cluster_map[i]
score = len(indices) * np.mean(errors[indices]) ** 2 # Equivalent to 'number of errors * error rate'
scores.append((i, score))
scores = sorted(scores, key = lambda x: -1 * x[1])
# Store the indices of the mixture components with the highest scores
self.slice_indices = np.array([t[0] for t in scores[:self.config.n_slices]])
if verbose:
print('Scores:')
for i, score in scores:
indices = cluster_map[i]
print(i, score, len(indices) * np.mean(errors[indices]), np.mean(errors[indices]))
print()
def _gmm_label_to_slice_label(self, gmm_label: int):
''' Returns the slice index corresponding to the GMM component
index gmm_label.
If the datapoint's GMM component is not in the top self.n_slices
slices, returns np.nan instead.
'''
slice_idxs = np.argwhere(self.slice_indices == gmm_label)
if len(slice_idxs) > 0:
return slice_idxs.item()
else:
return np.nan
def _load_scvis_embeddings(self) -> np.ndarray:
''' Loads and returns pre-computed scvis embeddings from
self.config.scvis_output_dir.
'''
### Load and return the scvis embeddings
search_string = f'{self.config.scvis_output_dir}/*.tsv'
scvis_embedding_filepath = sorted(glob.glob(search_string), key = len)[0]
return pd.read_csv(scvis_embedding_filepath, sep = '\t', index_col = 0).values
def _combine_embedding(self,
scvis_reps: np.ndarray,
pred_probs: np.ndarray) -> np.ndarray:
''' Normalizes the scvis_reps and appends the predicted probabilities.
'''
# Normalize the embeddings using the minimum and maximum column values
X = np.copy(scvis_reps)
X -= self.min_scvis_vals
X /= self.max_scvis_vals
# Append (weighted) predicted probabilities to the embedding
return np.concatenate((X, self.config.weight * pred_probs.reshape(-1, 1)), axis = 1)
|
domino-main
|
domino/_slice/planespot.py
|
from __future__ import annotations
from typing import Union
from domino._slice.abstract import Slicer
from torch import nn
from torch.nn import functional as F
from torch.nn.functional import cross_entropy
import torch
from tqdm import tqdm
import numpy as np
import meerkat as mk
from ..utils import convert_to_torch, unpack_args, convert_to_numpy
class MLPSlicer(Slicer, nn.Module):
def __init__(
self,
n_slices: int = 5,
lr: float = 1e-2,
alpha: float = 1e-2,
max_epochs: int = 100,
batch_size: int = 1024,
device: str = "cpu",
pbar: bool = True,
return_losses: bool = False,
):
super().__init__(n_slices=n_slices)
self.mlp = None
self.config.lr = lr
self.config.alpha = alpha
self.config.max_epochs = max_epochs
self.config.batch_size = batch_size
self.device = device
self.pbar = pbar
self.return_losses = return_losses
self.encoder = None
self.decoder = None
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
) -> MLPSlicer:
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
embeddings, targets, pred_probs = convert_to_torch(
embeddings, targets, pred_probs
)
# l = torch.stack([targets, pred_probs], dim=-1)
l = targets - pred_probs
self._fit(x=embeddings, l=l)
def predict(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
):
if self.encoder is None:
raise ValueError("Must call `fit` before `predict`.")
(embeddings,) = unpack_args(data, embeddings)
(embeddings,) = convert_to_torch(embeddings)
return self._predict(x=embeddings, return_probs=False)
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = None,
pred_probs: Union[str, np.ndarray] = None,
losses: Union[str, np.ndarray] = None,
):
if self.encoder is None:
raise ValueError("Must call `fit` before `predict`.")
(embeddings,) = unpack_args(data, embeddings)
(embeddings,) = convert_to_torch(embeddings)
return self._predict_proba(x=embeddings).cpu().numpy()
def describe(
self,
text_data: Union[dict, mk.DataPanel],
text_embeddings: Union[str, np.ndarray] = "embedding",
text_descriptions: Union[str, np.ndarray] = "description",
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
num_descriptions: int = 3,
):
text_embeddings, text_descriptions = unpack_args(
text_data, text_embeddings, text_descriptions
)
(text_embeddings,) = convert_to_torch(text_embeddings)
# also produce predictions for the inverse
probs = np.concatenate(
[
self._predict_proba(text_embeddings).cpu().numpy(),
self._predict_proba(-text_embeddings).cpu().numpy(),
],
axis=0,
)
text_descriptions = np.concatenate(
[
text_descriptions,
"not " + text_descriptions,
],
axis=0,
)
output = []
for pred_slice_idx in range(self.config.n_slices):
scores = probs[:, pred_slice_idx]
idxs = np.argsort(-scores)[:num_descriptions]
output.append(
{
"pred_slice_idx": pred_slice_idx,
"scores": list(scores[idxs]),
"phrases": list(text_descriptions[idxs]),
}
)
return output
def to(self, *args, **kwargs):
"""Intercept to on a device and set the self.device."""
if isinstance(args[0], (int, str, torch.device)):
self.device = args[0]
return super().to(*args, **kwargs)
def _prepare_inputs(self, *args):
return [inp.to(device=self.device, dtype=torch.float) for inp in args]
def forward(self, x: torch.Tensor):
return torch.sigmoid(self.encoder(x))
def _fit(self, x: torch.Tensor, l: torch.Tensor):
x, l = self._prepare_inputs(x, l)
if len(l.shape) == 1:
l = l.unsqueeze(1)
self.embedding_dim = x.shape[1]
self.response_dim = l.shape[1]
self.encoder = nn.Linear(self.embedding_dim, self.config.n_slices).to(
self.device
)
self.decoder = nn.Linear(self.config.n_slices, self.response_dim).to(
self.device
)
optimizer = torch.optim.Adam(self.parameters(), lr=self.config.lr)
losses = []
with tqdm(total=self.config.max_epochs, disable=not self.pbar) as pbar:
for epoch in range(self.config.max_epochs):
batcher = lambda data: torch.split(data, self.config.batch_size, dim=0)
for x_batch, l_batch in zip(batcher(x), batcher(l)):
s_batch = self.forward(x_batch)
l_hat = self.decoder(s_batch)
loss = F.mse_loss(l_hat, l_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self.pbar:
pbar.update()
pbar.set_postfix(epoch=epoch, loss=loss.detach().cpu().item())
if self.return_losses:
losses.extend(
[
{
"value": value,
"name": name,
"epoch": epoch,
}
for name, value in [
("response", response_loss.detach().cpu().item()),
("embedding", embedding_loss.detach().cpu().item()),
("total", loss.detach().cpu().item()),
]
]
)
return losses
@torch.no_grad()
def _predict_proba(self, x: torch.Tensor):
x = self._prepare_inputs(x)[0]
return self.forward(x)
@torch.no_grad()
def _predict(self, x: torch.Tensor, return_probs: bool = False):
probs = self._predict_proba(x)
preds = (probs > 0.5).to(int)
if return_probs:
return preds, probs
else:
return preds
|
domino-main
|
domino/_slice/mlp.py
|
from __future__ import annotations
import datetime
from dataclasses import dataclass
from typing import Union
import meerkat as mk
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from meerkat.columns.tensor_column import TensorColumn
from sklearn.linear_model import Ridge
from sklearn.metrics import roc_auc_score
from torch.nn.functional import cross_entropy
from tqdm import tqdm
from domino.utils import VariableColumn, requires_columns
from domino.utils import convert_to_numpy, unpack_args
from .abstract import Slicer
class MultiaccuracySlicer(Slicer):
r"""
Slice discovery based on MultiAccuracy auditing [kim_2019].
Discover slices by learning a simple function (e.g. ridge regression) that
correlates with the residual.
Examples
--------
Suppose you've trained a model and stored its predictions on a dataset in
a `Meerkat DataPanel <https://github.com/robustness-gym/meerkat>`_ with columns
"emb", "target", and "pred_probs". After loading the DataPanel, you can discover
underperforming slices of the validation dataset with the following:
.. code-block:: python
from domino import MultiaccuracySlicer
dp = ... # Load dataset into a Meerkat DataPanel
# split dataset
valid_dp = dp.lz[dp["split"] == "valid"]
test_dp = dp.lz[dp["split"] == "test"]
slicer = MultiaccuracySlicer()
slicer.fit(
data=valid_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
dp["slicer"] = slicer.predict(
data=test_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
Args:
n_slices (int, optional): The number of slices to discover.
Defaults to 5.
eta (float, optional): Step size for the logits update, see final line
Algorithm 1 in . Defaults to 0.1
dev_valid_frac (float, optional): The fraction of data held out for computing
corr. Defaults to 0.3.
.. [kim_2019]
@inproceedings{kim2019multiaccuracy,
title={Multiaccuracy: Black-box post-processing for fairness in classification},
author={Kim, Michael P and Ghorbani, Amirata and Zou, James},
booktitle={Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society},
pages={247--254},
year={2019}
}
"""
def __init__(
self,
n_slices: int = 5,
eta: float = 0.1,
dev_valid_frac: float = 0.1,
partition_size_threshold: int = 10,
pbar: bool = False,
):
super().__init__(n_slices=n_slices)
self.config.eta = eta
self.config.dev_valid_frac = dev_valid_frac
self.config.partition_size_threshold = partition_size_threshold
self.auditors = []
self.pbar = pbar
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> MultiaccuracySlicer:
"""
Fit the mixture model to data.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
MultiaccuracySlicer: Returns a fit instance of MultiaccuracySlicer.
"""
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
embeddings, targets, pred_probs = convert_to_numpy(
embeddings, targets, pred_probs
)
pred_probs = pred_probs[:, 1] if pred_probs.ndim > 1 else pred_probs
# inverse of sigmoid
logits = np.log(pred_probs / (1 - pred_probs))
dev_train_idxs, dev_valid_idxs = self._split_data(np.arange(len(targets)))
for t in tqdm(range(self.config.n_slices), disable=not self.pbar):
# partitioning the input space X based on the initial classifier predictions
preds = (pred_probs > 0.5).astype(int)
partitions = [1 - preds, preds, np.ones_like(preds)]
# compute the partial derivative of the cross-entropy loss with respect to
# the predictions
delta = self._compute_partial_derivative(pred_probs, targets)
residual = pred_probs - targets
corrs = []
candidate_auditors = []
for partition in partitions:
# for each partition, train a classifier to predict the partial
# derivative of the cross entropy loss with respect to predictions
partition_dev_train = np.where(partition[dev_train_idxs] == 1)[0]
partition_dev_valid = np.where(partition[dev_valid_idxs] == 1)[0]
if (
len(partition_dev_train) < self.config.partition_size_threshold
) or (len(partition_dev_valid) < self.config.partition_size_threshold):
continue
rr = Ridge(alpha=1)
rr.fit(
embeddings[dev_train_idxs][partition_dev_train],
delta[dev_train_idxs][partition_dev_train],
)
rr_prediction = rr.predict(
embeddings[dev_valid_idxs][partition_dev_valid]
)
candidate_auditors.append(rr)
corrs.append(
np.mean(
rr_prediction
* np.abs(residual[dev_valid_idxs][partition_dev_valid])
)
)
partition_idx = np.argmax(corrs)
auditor = candidate_auditors[partition_idx]
h = (
np.matmul(embeddings, np.expand_dims(auditor.coef_, -1))[:, 0]
+ auditor.intercept_
)
if partition_idx == 0:
logits += self.config.eta * h * partitions[partition_idx]
else:
logits -= self.config.eta * h * partitions[partition_idx]
pred_probs = torch.sigmoid(torch.tensor(logits)).numpy()
self.auditors.append(auditor)
return self
def predict(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Get probabilistic slice membership for data using a fit mixture model.
.. caution::
Must call ``MultiaccuracySlicer.fit`` prior to calling ``MultiaccuracySlicer.predict``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
np.ndarray: A binary ``np.ndarray`` of shape (n_samples, n_slices) where
values are either 1 or 0.
"""
probs = self.predict_proba(
data=data,
embeddings=embeddings,
targets=targets,
pred_probs=pred_probs,
)
return (probs > 0.5).astype(int)
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Get probabilistic slice membership for data using a fit mixture model.
.. caution::
Must call ``MultiaccuracySlicer.fit`` prior to calling
``MultiaccuracySlicer.predict_proba``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
np.ndarray: A ``np.ndarray`` of shape (n_samples, n_slices) where values in
are in range [0,1] and rows sum to 1.
"""
(embeddings,) = unpack_args(data, embeddings)
(embeddings,) = convert_to_numpy(embeddings)
all_weights = []
for slice_idx in range(self.config.n_slices):
auditor = self.auditors[slice_idx]
h = (
np.matmul(embeddings, np.expand_dims(auditor.coef_, -1))[:, 0]
+ auditor.intercept_
)
all_weights.append(h)
pred_slices = np.stack(all_weights, axis=1)
max_scores = np.max(pred_slices, axis=0)
return pred_slices / max_scores[None, :]
def _compute_partial_derivative(self, p, y):
"""
Compute a smoothed version of the partial derivative function of the cross-entropy
loss with respect to the predictions.
To help
"""
y0 = (1 - y) * ((p < 0.9) / (1 - p + 1e-20) + (p >= 0.9) * (100 * p - 80))
y1 = y * ((p >= 0.1) / (p + 1e-20) + (p < 0.1) * (20 - 100 * p))
return y0 + y1
def _split_data(self, data):
ratio = [1 - self.config.dev_valid_frac, self.config.dev_valid_frac]
num = (
data[0].shape[0]
if type(data) == list or type(data) == tuple
else data.shape[0]
)
idx = np.arange(num)
idx_train = idx[: int(ratio[0] * num)]
idx_val = idx[int(ratio[0] * num) : int((ratio[0] + ratio[1]) * num)]
train = data[idx_train]
val = data[idx_val]
return train, val
|
domino-main
|
domino/_slice/multiaccuracy.py
|
from __future__ import annotations
from collections import defaultdict
from multiprocessing.sharedctypes import Value
from typing import Union
import meerkat as mk
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from domino.utils import convert_to_numpy, unpack_args
from .abstract import Slicer
class BarlowSlicer(Slicer):
r"""
Slice Discovery based on the Barlow [singla_2021]_.
Discover slices using a decision tree. TODO(singlasahil14): add any more details
describing your method
.. note:
The authors of the Barlow paper use this slicer with embeddings from a
classifier trained using an adversarially robust loss [engstrom_2019]_.
To compute embeddings using such a classifier, pass ``encoder="robust"`` to
``domino.embed``.
Examples
--------
Suppose you've trained a model and stored its predictions on a dataset in
a `Meerkat DataPanel <https://github.com/robustness-gym/meerkat>`_ with columns
"emb", "target", and "pred_probs". After loading the DataPanel, you can discover
underperforming slices of the validation dataset with the following:
.. code-block:: python
from domino import BarlowSlicer
dp = ... # Load dataset into a Meerkat DataPanel
# split dataset
valid_dp = dp.lz[dp["split"] == "valid"]
test_dp = dp.lz[dp["split"] == "test"]
barlow = BarlowSlicer()
barlow.fit(
data=valid_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
dp["barlow_slices"] = barlow.transform(
data=test_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
Args:
n_slices (int, optional): The number of slices to discover.
Defaults to 5.
max_depth (str, optional): The maximum depth of the desicion tree. Defaults to
3. If None, then nodes are expanded until all leaves are pure or until all
leaves contain less than 2 samples. See SKlearn documentation for more
information.
n_features (int, optional): The number features from the embedding
to use. Defaults to 128. Features are selcted using mutual information
estimate.
pbar (bool, optional): Whether to show a progress bar. Ignored for barlow.
.. [singla_2021]
Singla, Sahil, et al. "Understanding failures of deep networks via robust
feature extraction." Proceedings of the IEEE/CVF Conference on Computer Vision
and Pattern Recognition. 2021.
.. [engstrom_2019]
@misc{robustness,
title={Robustness (Python Library)},
author={Logan Engstrom and Andrew Ilyas and Hadi Salman and Shibani
Santurkar and Dimitris Tsipras},
year={2019},
url={https://github.com/MadryLab/robustness}
}
"""
def __init__(
self,
n_slices: int = 5,
max_depth: int = 3, # TODO(singlasahil14): confirm this default
n_features: int = 128, # TODO(singlasahil14): confirm this default
pbar: bool = True,
):
super().__init__(n_slices=n_slices)
self.config.max_depth = max_depth
self.config.n_features = n_features
# parameters set after a call to fit
self._feature_indices = None
self._important_leaf_ids = None
self._decision_tree = None
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> BarlowSlicer:
"""
Fit the decision tree to data.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
BarlowSlicer: Returns a fit instance of BarlowSlicer.
"""
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
embeddings, targets, pred_probs = convert_to_numpy(
embeddings, targets, pred_probs
)
if pred_probs.ndim > 1:
preds = pred_probs.argmax(axis=-1)
else:
preds = pred_probs > 0.5
success = preds == targets
failure = np.logical_not(success)
sparse_features, feature_indices = _select_important_features(
embeddings,
failure,
num_features=self.config.n_features,
method="mutual_info",
)
self._feature_indices = feature_indices
decision_tree = _train_decision_tree(
sparse_features,
failure,
max_depth=self.config.max_depth,
criterion="entropy",
)
(
error_rate_array,
error_coverage_array,
) = decision_tree.compute_leaf_error_rate_coverage(sparse_features, failure)
important_leaf_ids = _important_leaf_nodes(
decision_tree, error_rate_array, error_coverage_array
)
self._decision_tree = decision_tree
self._important_leaf_ids = important_leaf_ids
return self
def predict(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Predict slice membership according to the learnt decision tree.
.. caution::
Must call ``BarlowSlicer.fit`` prior to calling ``BarlowSlicer.predict``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
np.ndarray: A binary ``np.ndarray`` of shape (n_samples, n_slices) where
values are either 1 or 0.
"""
if self._decision_tree is None:
raise ValueError(
"Must call `fit` prior to calling `predict` or `predict_proba`."
)
(embeddings,) = unpack_args(data, embeddings)
(embeddings,) = convert_to_numpy(embeddings)
embeddings = embeddings[:, self._feature_indices]
leaves = self._decision_tree.apply(embeddings) # (n_samples,)
# convert to 1-hot encoding of size (n_samples, n_slices) using broadcasting
slices = (
leaves[:, np.newaxis] == self._important_leaf_ids[np.newaxis, :]
).astype(int)
return slices
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Predict slice membership according to the learnt decision tree.
.. warning::
Because the decision tree does not produce probabilistic leaf assignments,
this method is equivalent to `predict`
.. caution::
Must call ``BarlowSlicer.fit`` prior to calling
``BarlowSlicer.predict_proba``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
np.ndarray: A ``np.ndarray`` of shape (n_samples, n_slices) where values in
are in range [0,1] and rows sum to 1.
"""
return self.predict(data, embeddings, targets, pred_probs)
def _mutual_info_select(train_features_class, train_failure_class, num_features=20):
from sklearn.feature_selection import mutual_info_classif
mi = mutual_info_classif(train_features_class, train_failure_class, random_state=0)
important_features_indices = np.argsort(mi)[-num_features:]
important_features_values = mi[important_features_indices]
return important_features_indices, important_features_values
def _feature_importance_select(train_features_class, num_features=20):
fi = np.mean(train_features_class, axis=0)
important_features_indices = np.argsort(fi)[-num_features:]
important_features_values = fi[important_features_indices]
return important_features_indices, important_features_values
def _select_important_features(
train_features, train_failure, num_features=20, method="mutual_info"
):
"""Perform feature selection using some prespecified method such as
mutual information.
Args:
train_features (_type_): _description_
train_failure (_type_): _description_
num_features (int, optional): _description_. Defaults to 20.
method (str, optional): _description_. Defaults to 'mutual_info'.
Raises:
ValueError: _description_
Returns:
_type_: _description_
"""
if method == "mutual_info":
important_indices, _ = _mutual_info_select(
train_features, train_failure, num_features=num_features
)
elif method == "feature_importance":
important_indices, _ = _feature_importance_select(
train_features, num_features=num_features
)
else:
raise ValueError("Unknown feature selection method")
train_sparse_features = train_features[:, important_indices]
return train_sparse_features, important_indices
class BarlowDecisionTreeClassifier(DecisionTreeClassifier):
"""Extension of scikit-learn's DecisionTreeClassifier"""
def fit_tree(self, train_data, train_labels):
"""Learn decision tree using features 'train_data' and labels 'train_labels"""
num_true = np.sum(train_labels)
num_false = np.sum(np.logical_not(train_labels))
if self.class_weight == "balanced":
self.float_class_weight = num_false / num_true
elif isinstance(self.class_weight, dict):
keys_list = list(self.class_weight.keys())
assert len(keys_list) == 2
assert 0 in keys_list
assert 1 in keys_list
self.float_class_weight = self.class_weight[1]
self.fit(train_data, train_labels)
true_dict, false_dict = self.compute_TF_dict(train_data, train_labels)
self.train_true_dict = dict(true_dict)
self.train_false_dict = dict(false_dict)
self._compute_parent()
true_array = np.array(list(true_dict))
false_array = np.array(list(false_dict))
unique_leaf_ids = np.union1d(true_array, false_array)
self.leaf_ids = unique_leaf_ids
true_leaves = []
for leaf_id in unique_leaf_ids:
true_count = true_dict[leaf_id]
false_count = false_dict[leaf_id]
if true_count * self.float_class_weight > false_count:
true_leaves.append(leaf_id)
self.true_leaves = true_leaves
return self
def _compute_parent(self):
"""Find the parent of every leaf node"""
n_nodes = self.tree_.node_count
children_left = self.tree_.children_left
children_right = self.tree_.children_right
self.parent = np.zeros(shape=n_nodes, dtype=np.int64)
stack = [0]
while len(stack) > 0:
node_id = stack.pop()
child_left = children_left[node_id]
child_right = children_right[node_id]
if child_left != child_right:
self.parent[child_left] = node_id
self.parent[child_right] = node_id
stack.append(child_left)
stack.append(child_right)
def compute_leaf_data(self, data, leaf_id):
"""Find which of the data points lands in the leaf node with identifier 'leaf_id'"""
leaf_ids = self.apply(data)
return np.nonzero(leaf_ids == leaf_id)[0]
def compute_leaf_truedata(self, data, labels, leaf_id):
"""Find which of the data points lands in the leaf node with identifier
'leaf_id' and for which the prediction is 'true'."""
leaf_ids = self.apply(data)
leaf_data_indices = np.nonzero(leaf_ids == leaf_id)[0]
leaf_failure_labels = labels[leaf_data_indices]
leaf_failure_indices = leaf_data_indices[leaf_failure_labels]
return leaf_failure_indices
def compute_TF_dict(self, data, labels):
"""
Returns two dictionaries 'true_dict' and 'false_dict'.
true_dict maps every leaf_id to the number of correctly classified
data points in the leaf with that leaf_id.
false_dict maps every leaf_id to the number of incorrectly classified
data points in the leaf with that leaf_id.
"""
def create_dict(unique, counts, dtype=int):
count_dict = defaultdict(dtype)
for u, c in zip(unique, counts):
count_dict[u] = count_dict[u] + c
return count_dict
leaf_ids = self.apply(data)
true_leaf_ids = leaf_ids[np.nonzero(labels)]
false_leaf_ids = leaf_ids[np.nonzero(np.logical_not(labels))]
true_unique, _, true_unique_counts = np.unique(
true_leaf_ids, return_index=True, return_counts=True
)
true_dict = create_dict(true_unique, true_unique_counts)
false_unique, _, false_unique_counts = np.unique(
false_leaf_ids, return_index=True, return_counts=True
)
false_dict = create_dict(false_unique, false_unique_counts)
return true_dict, false_dict
def compute_precision_recall(self, data, labels, compute_ALER=True):
"""
Compute precision and recall for the tree. Also compute
Average Leaf Error Rate if compute_ALER is True.
"""
true_dict, false_dict = self.compute_TF_dict(data, labels)
total_true = np.sum(labels)
total_pred = 0
total = 0
for leaf_id in self.true_leaves:
true_count = true_dict[leaf_id]
false_count = false_dict[leaf_id]
total_pred += true_count
total += true_count + false_count
precision = total_pred / total
recall = total_pred / total_true
if compute_ALER:
average_precision = self.compute_average_leaf_error_rate(data, labels)
return precision, recall, average_precision
else:
return precision, recall
def compute_average_leaf_error_rate(self, data, labels):
"""Compute Average Leaf Error Rate using the trained decision tree"""
num_true = np.sum(labels)
true_dict, false_dict = self.compute_TF_dict(data, labels)
avg_leaf_error_rate = 0
for leaf_id in self.leaf_ids:
true_count = true_dict[leaf_id]
false_count = false_dict[leaf_id]
if true_count + false_count > 0:
curr_error_coverage = true_count / num_true
curr_error_rate = true_count / (true_count + false_count)
avg_leaf_error_rate += curr_error_coverage * curr_error_rate
return avg_leaf_error_rate
def compute_decision_path(self, leaf_id, important_features_indices=None):
"""Compute decision_path (the set of decisions used to arrive at a certain
leaf)
"""
assert leaf_id in self.leaf_ids
features_arr = self.tree_.feature
thresholds_arr = self.tree_.threshold
children_left = self.tree_.children_left
children_right = self.tree_.children_right
path = []
curr_node = leaf_id
while curr_node > 0:
parent_node = self.parent[curr_node]
is_left_child = children_left[parent_node] == curr_node
is_right_child = children_right[parent_node] == curr_node
assert is_left_child ^ is_right_child
if is_left_child:
direction = "left"
else:
direction = "right"
curr_node = parent_node
curr_feature = features_arr[curr_node]
curr_threshold = np.round(thresholds_arr[curr_node], 6)
if important_features_indices is not None:
curr_feature_original = important_features_indices[curr_feature]
else:
curr_feature_original = curr_feature
path.insert(
0, (curr_node, curr_feature_original, curr_threshold, direction)
)
return path
def compute_leaf_error_rate_coverage(self, data, labels):
"""Compute error rate and error coverage for every node in the tree."""
total_failures = np.sum(labels)
true_dict, false_dict = self.compute_TF_dict(data, labels)
n_nodes = self.tree_.node_count
children_left = self.tree_.children_left
children_right = self.tree_.children_right
error_rate_array = np.zeros(shape=n_nodes, dtype=float)
error_coverage_array = np.zeros(shape=n_nodes, dtype=float)
stack = [(0, True)]
while len(stack) > 0:
node_id, traverse = stack.pop()
child_left = children_left[node_id]
child_right = children_right[node_id]
if traverse:
if child_left != child_right:
stack.append((node_id, False))
stack.append((child_left, True))
stack.append((child_right, True))
else:
num_true_in_node = true_dict[node_id]
num_false_in_node = false_dict[node_id]
num_total_in_node = num_true_in_node + num_false_in_node
if num_total_in_node > 0:
leaf_error_rate = num_true_in_node / num_total_in_node
else:
leaf_error_rate = 0.0
leaf_error_coverage = num_true_in_node / total_failures
error_coverage_array[node_id] = leaf_error_coverage
error_rate_array[node_id] = leaf_error_rate
else:
child_left_ER = error_rate_array[child_left]
child_right_ER = error_rate_array[child_right]
child_left_EC = error_coverage_array[child_left]
child_right_EC = error_coverage_array[child_right]
child_ER = (
child_left_ER * child_left_EC + child_right_ER * child_right_EC
)
child_EC = child_left_EC + child_right_EC
if child_EC > 0:
error_rate_array[node_id] = child_ER / child_EC
else:
error_rate_array[node_id] = 0.0
error_coverage_array[node_id] = child_EC
return error_rate_array, error_coverage_array
def _train_decision_tree(
train_sparse_features, train_failure, max_depth=1, criterion="entropy"
):
num_true = np.sum(train_failure)
num_false = np.sum(np.logical_not(train_failure))
rel_weight = num_false / num_true
class_weight_dict = {0: 1, 1: rel_weight}
decision_tree = BarlowDecisionTreeClassifier(
max_depth=max_depth, criterion=criterion, class_weight=class_weight_dict
)
decision_tree.fit_tree(train_sparse_features, train_failure)
return decision_tree
def _important_leaf_nodes(decision_tree, precision_array, recall_array):
"""
Select leaf nodes with highest importance value i.e highest contribution to
average leaf error rate.
"""
leaf_ids = decision_tree.leaf_ids
leaf_precision = precision_array[leaf_ids]
leaf_recall = recall_array[leaf_ids]
leaf_precision_recall = leaf_precision * leaf_recall
important_leaves = np.argsort(-leaf_precision_recall)
return leaf_ids[important_leaves]
|
domino-main
|
domino/_slice/barlow.py
|
from sklearn.decomposition import FactorAnalysis
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# License: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state
from sklearn.utils.extmath import fast_logdet, randomized_svd, squared_norm
from sklearn.utils.validation import check_is_fitted, _deprecate_positional_args
from sklearn.exceptions import ConvergenceWarning
class DominoFactorAnalysis(TransformerMixin, BaseEstimator):
"""Factor Analysis (FA).
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using SVD based approach.
Read more in the :ref:`User Guide <FA>`.
.. versionadded:: 0.13
Parameters
----------
n_components : int, default=None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float, defaul=1e-2
Stopping tolerance for log-likelihood increase.
copy : bool, default=True
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int, default=1000
Maximum number of iterations.
noise_variance_init : ndarray of shape (n_features,), default=None
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features).
svd_method : {'lapack', 'randomized'}, default='randomized'
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, default=3
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'.
rotation : {'varimax', 'quartimax'}, default=None
If not None, apply the indicated rotation. Currently, varimax and
quartimax are implemented. See
`"The varimax criterion for analytic rotation in factor analysis"
<https://link.springer.com/article/10.1007%2FBF02289233>`_
H. F. Kaiser, 1958.
.. versionadded:: 0.24
random_state : int or RandomState instance, default=0
Only used when ``svd_method`` equals 'randomized'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Components with maximum variance.
loglike_ : list of shape (n_iterations,)
The log likelihood at each iteration.
noise_variance_ : ndarray of shape (n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FactorAnalysis
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FactorAnalysis(n_components=7, random_state=0)
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
References
----------
- David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1.
- Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4.
See Also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, tol=1e-2, copy=True,
max_iter=1000,
noise_variance_init=None, svd_method='randomized',
iterated_power=3, rotation=None, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
self.rotation = rotation
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using SVD based approach
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
self
"""
X = self._validate_data(X, copy=self.copy, dtype=np.float64)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, Vt = linalg.svd(X,
full_matrices=False,
check_finite=False)
return (s[:n_components], Vt[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, Vt = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, Vt, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in range(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * Vt
del Vt
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
if self.rotation is not None:
self.components_ = self._rotate(W)
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : ndarray of shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self)
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : ndarray of shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self)
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data
y : Ignored
Returns
-------
ll : float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
def _rotate(self, components, n_components=None, tol=1e-6):
"Rotate the factor analysis solution."
# note that tol is not exposed
implemented = ("varimax", "quartimax")
method = self.rotation
if method in implemented:
return _ortho_rotation(components.T, method=method,
tol=tol)[:self.n_components]
else:
raise ValueError("'method' must be in %s, not %s"
% (implemented, method))
def _ortho_rotation(components, method='varimax', tol=1e-6, max_iter=100):
"""Return rotated components."""
nrow, ncol = components.shape
rotation_matrix = np.eye(ncol)
var = 0
for _ in range(max_iter):
comp_rot = np.dot(components, rotation_matrix)
if method == "varimax":
tmp = comp_rot * np.transpose((comp_rot ** 2).sum(axis=0) / nrow)
elif method == "quartimax":
tmp = 0
u, s, v = np.linalg.svd(
np.dot(components.T, comp_rot ** 3 - tmp))
rotation_matrix = np.dot(u, v)
var_new = np.sum(s)
if var != 0 and var_new < var * (1 + tol):
break
var = var_new
return np.dot(components, rotation_matrix).T
|
domino-main
|
domino/_slice/factor.py
|
from dataclasses import dataclass
import meerkat as mk
import numpy as np
import torch
import torch.nn as nn
import umap
from sklearn.decomposition import PCA
#from stratification.cluster.models.cluster import AutoKMixtureModel
from torch.nn.functional import cross_entropy
from umap import UMAP
from domino.utils import VariableColumn, requires_columns
from .abstract import SliceDiscoveryMethod
class GeorgeSDM(SliceDiscoveryMethod):
@dataclass
class Config(SliceDiscoveryMethod.Config):
n_components: int = 2
n_clusters: int = 25
n_classes: int = 2
reduction_method: str = "umap"
cluster_method: str = "gmm"
n_init: int = 3
concat_loss_component: bool = False
RESOURCES_REQUIRED = {"cpu": 1, "gpu": 0}
def __init__(self, config: dict = None, **kwargs):
super().__init__(config, **kwargs)
self.class_to_reducer = {
klass: self._get_reducer() for klass in range(self.config.n_classes)
}
self.class_to_clusterer = {
klass: AutoKMixtureModel(
cluster_method=self.config.cluster_method,
max_k=self.config.n_clusters,
n_init=self.config.n_init,
search=False,
)
for klass in range(self.config.n_classes)
}
def _get_reducer(self):
if self.config.reduction_method == "umap":
return UMAP(n_components=self.config.n_components)
elif self.config.reduction_method == "pca":
return PCA(n_components=self.config.n_components)
else:
raise ValueError(
f"Reduction method {self.config.reduction_method} not supported."
)
def _compute_losses(self, data_dp: mk.DataPanel):
probs = (
data_dp["probs"].data
if isinstance(data_dp["probs"], mk.TensorColumn)
else torch.tensor(data_dp["probs"].data)
)
return cross_entropy(
probs.to(torch.float32),
torch.tensor(data_dp["target"]).to(torch.long),
reduction="none",
)
@requires_columns(
dp_arg="data_dp", columns=["probs", "target", VariableColumn("self.config.emb")]
)
def fit(
self,
data_dp: mk.DataPanel,
model: nn.Module = None,
):
data_dp["loss"] = self._compute_losses(data_dp).data.numpy()
self.slice_cluster_indices = {}
for klass in range(self.config.n_classes):
# filter `data_dp` to only include rows in the class
curr_dp = data_dp.lz[data_dp["target"] == klass]
# (1) reduction phase
embs = curr_dp[self.config.emb].data
reduced_embs = self.class_to_reducer[klass].fit_transform(embs)
if self.config.concat_loss_component:
reduced_embs = np.concatenate(
[
reduced_embs,
np.expand_dims(curr_dp["loss"].data, axis=1),
],
axis=1,
)
# (2) clustering phase
self.class_to_clusterer[klass].fit(reduced_embs)
clusters = self.class_to_clusterer[klass].predict_proba(reduced_embs)
cluster_losses = np.dot(curr_dp["loss"].data.T, clusters)
# need to
n_slices = self.config.n_slices // self.config.n_classes + (
self.config.n_slices % self.config.n_classes
) * int(klass == 0)
self.slice_cluster_indices[klass] = (-cluster_losses).argsort()[:n_slices]
return self
@requires_columns(
dp_arg="data_dp", columns=["target", VariableColumn("self.config.emb")]
)
def transform(
self,
data_dp: mk.DataPanel,
):
slices = np.zeros((len(data_dp), self.config.n_slices))
start = 0
for klass in range(self.config.n_classes):
# filter `data_dp` to only include rows in the class
curr_dp = data_dp.lz[data_dp["target"] == klass]
# (1) reduction phase
acts = curr_dp[self.config.emb].data
reduced_embs = self.class_to_reducer[klass].transform(acts)
if self.config.concat_loss_component:
losses = self._compute_losses(curr_dp).data.numpy()
reduced_embs = np.concatenate(
[reduced_embs, np.expand_dims(losses, axis=1)], axis=1
)
# (2) cluster phase
if self.config.cluster_method == "kmeans":
raise NotImplementedError
else:
# ensure that the slice atrix
class_clusters = self.class_to_clusterer[klass].predict_proba(
reduced_embs
)
class_slices = class_clusters[:, self.slice_cluster_indices[klass]]
slices[
data_dp["target"] == klass, start : start + class_slices.shape[-1]
] = class_slices
start = start + class_slices.shape[-1]
data_dp["pred_slices"] = slices
# if self.config.cluster_method == "kmeans":
# # since slices in other methods are not necessarily mutually exclusive, it's
# # important to return as a matrix of binary columns, one for each slice
# dp["pred_slices"] = np.stack(
# [
# (dp["pred_slices"].data == slice_idx).astype(int)
# for slice_idx in range(self.config.n_slices)
# ],
# axis=-1,
# )
return data_dp
|
domino-main
|
domino/_slice/george.py
|
from typing import Dict, Union
from .encoder import Encoder
def transformers(
variant: str = "bert-large-cased", device: Union[int, str] = "cpu"
) -> Dict[str, Encoder]:
"""Contrastive Language-Image Pre-training (CLIP) encoders [radford_2021]_. Includes
encoders for the following modalities:
- "text"
Encoders will map these different modalities to the same embedding space.
Args:
variant (str, optional): A model name listed by `clip.available_models()`, or
the path to a model checkpoint containing the state_dict. Defaults to
"ViT-B/32".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
@misc{gpt-j,
author = {Wang, Ben and Komatsuzaki, Aran},
title = {{GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model}},
howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
year = 2021,
month = May
}
@misc{mesh-transformer-jax,
author = {Wang, Ben},
title = {{Mesh-Transformer-JAX: Model-Parallel Implementation of Transformer Language Model with JAX}},
howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
year = 2021,
month = May
}
"""
try:
from transformers import AutoTokenizer, AutoModelForSequenceClassification
except ImportError:
raise ImportError(
"To embed with CLIP run pip install git+https://github.com/openai/CLIP.git"
"and install domino with the `clip` submodule. For example, "
"`pip install domino[clip]`"
)
tokenizer = AutoTokenizer.from_pretrained(variant)
model = AutoModelForSequenceClassification.from_pretrained(variant)
model.to(device)
def _encode(x: List[str]) -> torch.Tensor:
return model(**tokenizer(x, return_tensors="pt", padding=True).to(device=device)).last_hidden_state[:, 0]
model, preprocess = load(variant, device=device)
return {
"text": Encoder(
# need to squeeze out the batch dimension for compatibility with collate
encode=_encode, preprocess=lambda x: x
),
}
|
domino-main
|
domino/_embed/gpt_j.py
|
from typing import Union, List, Dict
import torch
from .encoder import Encoder
def transformers(
variant: str = "bert-large-cased", device: Union[int, str] = "cpu"
) -> Dict[str, Encoder]:
"""Transformer encoders
- "text"
Encoders will map these different modalities to the same embedding space.
Args:
variant (str, optional): A model name listed by `clip.available_models()`, or
the path to a model checkpoint containing the state_dict. Defaults to
"ViT-B/32".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
"""
try:
from transformers import AutoTokenizer, AutoModel
except ImportError:
raise ImportError("To embed with transformers run `pip install transformers")
tokenizer = AutoTokenizer.from_pretrained(variant)
model = AutoModel.from_pretrained(variant)
model.to(device)
def _encode(x: List[str]) -> torch.Tensor:
# need to coerce to list in case someone passes in a pandas series or ndarray
x = list(x)
return model(
**tokenizer(x, return_tensors="pt", padding=True, truncation=True).to(device=device)
).last_hidden_state[:, 0]
return {
"text": Encoder(
# need to squeeze out the batch dimension for compatibility with collate
encode=_encode,
preprocess=lambda x: x,
),
}
|
domino-main
|
domino/_embed/transformers.py
|
import os
from typing import Callable, Union
import meerkat as mk
import torch
from domino._embed.encoder import Encoder
from ..registry import Registry
from .bit import bit
from .clip import clip
from .robust import robust
from .transformers import transformers
__all__ = ["clip", "bit"]
encoders = Registry(name="encoders")
encoders.register(clip, aliases=[])
encoders.register(bit, aliases=[])
encoders.register(robust, aliases=[])
encoders.register(transformers, aliases=[])
def infer_modality(col: mk.AbstractColumn):
if isinstance(col, mk.ImageColumn):
return "image"
elif isinstance(col, mk.PandasSeriesColumn):
return "text"
else:
raise ValueError(f"Cannot infer modality from colummn of type {type(col)}.")
def embed(
data: mk.DataPanel,
input_col: str,
encoder: Union[str, Encoder] = "clip",
modality: str = None,
out_col: str = None,
device: Union[int, str] = "cpu",
mmap_dir: str = None,
num_workers: int = 4,
batch_size: int = 128,
**kwargs,
) -> mk.DataPanel:
"""Embed a column of data with an encoder from the encoder registry.
Examples
--------
Suppose you have an Image dataset (e.g. Imagenette, CIFAR-10) loaded into a
`Meerkat DataPanel <https://github.com/robustness-gym/meerkat>`_. You can embed the
images in the dataset with CLIP using a code snippet like:
.. code-block:: python
import meerkat as mk
from domino import embed
dp = mk.datasets.get("imagenette")
dp = embed(
data=dp,
input_col="img",
encoder="clip"
)
Args:
data (mk.DataPanel): A DataPanel containing the data to embed.
input_col (str): The name of the column to embed.
encoder (Union[str, Encoder], optional): Name of the encoder to use. List
supported encoders with ``domino.encoders``. Defaults to "clip".
Alternatively, pass an :class:`~domino._embed.encoder.Encoder` object
containing a custom encoder.
modality (str, optional): The modality of the data to be embedded. Defaults to
None, in which case the modality is inferred from the type of the input
column.
out_col (str, optional): The name of the column where the embeddings are stored.
Defaults to None, in which case it is ``"{encoder}({input_col})"``.
device (Union[int, str], optional): The device on which. Defaults to "cpu".
mmap_dir (str, optional): The path to directory where a memory-mapped file
containing the embeddings will be written. Defaults to None, in which case
the embeddings are not memmapped.
num_workers (int, optional): Number of worker processes used to load the data
from disk. Defaults to 4.
batch_size (int, optional): Size of the batches to used . Defaults to 128.
**kwargs: Additional keyword arguments are passed to the encoder. To see
supported arguments for each encoder, see the encoder documentation (e.g.
:func:`~domino._embed.clip`).
Returns:
mk.DataPanel: A view of ``data`` with a new column containing the embeddings.
This column will be named according to the ``out_col`` parameter.
"""
if modality is None:
modality = infer_modality(col=data[input_col])
if out_col is None:
out_col = f"{encoder}({input_col})"
encoder = encoders.get(encoder, device=device, **kwargs)
if modality not in encoder:
raise ValueError(f'Encoder "{encoder}" does not support modality "{modality}".')
encoder = encoder[modality]
return _embed(
data=data,
input_col=input_col,
out_col=out_col,
encode=encoder.encode,
preprocess=encoder.preprocess,
collate=encoder.collate,
device=device,
mmap_dir=mmap_dir,
num_workers=num_workers,
batch_size=batch_size,
)
def _embed(
data: mk.DataPanel,
input_col: str,
out_col: str,
encode: Callable,
preprocess: Callable,
collate: Callable,
device: int = None,
mmap_dir: str = None,
num_workers: int = 4,
batch_size: int = 128,
):
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if preprocess is not None:
embed_input = data[input_col].to_lambda(preprocess)
else:
embed_input = data[input_col]
if collate is not None:
embed_input.collate_fn = collate
def _prepare_input(x):
if isinstance(x, mk.AbstractColumn):
x = x.data
if torch.is_tensor(x):
x = x.to(device)
return x
with torch.no_grad():
data[out_col] = embed_input.map(
lambda x: encode(_prepare_input(x)).cpu().detach().numpy(),
pbar=True,
is_batched_fn=True,
batch_size=batch_size,
num_workers=num_workers,
mmap=mmap_dir is not None,
mmap_path=None
if mmap_dir is None
else os.path.join(mmap_dir, "emb_mmap.npy"),
flush_size=128,
)
return data
|
domino-main
|
domino/_embed/__init__.py
|
from dataclasses import dataclass
@dataclass
class Encoder:
encode: callable
preprocess: callable = None
collate: callable = None
|
domino-main
|
domino/_embed/encoder.py
|
from functools import partial
import torch
def _get_reduction_fn(reduction_name):
if reduction_name == "max":
reduction_fn = partial(torch.mean, dim=[-1, -2])
elif reduction_name == "mean":
reduction_fn = partial(torch.mean, dim=[-1, -2])
else:
raise ValueError(f"reduction_fn {reduction_name} not supported.")
reduction_fn.__name__ = reduction_name
return reduction_fn
class ActivationExtractor:
"""Class for extracting activations a targetted intermediate layer"""
def __init__(self, reduction_fn: callable = None):
self.activation = None
self.reduction_fn = reduction_fn
def add_hook(self, module, input, output):
if self.reduction_fn is not None:
output = self.reduction_fn(output)
self.activation = output
|
domino-main
|
domino/_embed/utils.py
|
import io
from collections import OrderedDict
from typing import Dict, Union
import numpy as np
import PIL
import requests
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import nested_getattr
from .encoder import Encoder
from .utils import ActivationExtractor, _get_reduction_fn
# this implementation is primarily an adaptation of this colab
# https://colab.research.google.com/github/google-research/big_transfer/blob/master/colabs/big_transfer_pytorch.ipynb
def bit(
variant: str = "BiT-M-R50x1",
device: Union[int, str] = "cpu",
reduction: str = "mean",
layer: str = "body",
) -> Dict[str, Encoder]:
"""Big Transfer (BiT) encoders [kolesnivok_2019]_. Includes encoders for the
following modalities:
- "image"
Args:
variant (str): The variant of the model to use. Variants include
"BiT-M-R50x1", "BiT-M-R101x3", "Bit-M-R152x4". Defaults to "BiT-M-R50x1".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
reduction (str, optional): The reduction function used to reduce image
embeddings of shape (batch x height x width x dimensions) to (batch x
dimensions). Defaults to "mean". Other options include "max".
layer (str, optional): The layer of the model from which the embeddings will
beto extract the embeddings from. Defaults to "body".
.. [kolesnivok_2019]
Kolesnikov, A. et al. Big Transfer (BiT): General Visual Representation
Learning. arXiv [cs.CV] (2019)
"""
try:
import torchvision as tv
except ImportError:
raise ImportError(
"To embed with bit install domino with the `bit` submodule. For example, "
"pip install domino[bit]."
)
model = _get_model(variant=variant)
layer = nested_getattr(model, layer)
extractor = ActivationExtractor(reduction_fn=_get_reduction_fn(reduction))
layer.register_forward_hook(extractor.add_hook)
model.to(device)
@torch.no_grad()
def _embed(batch: torch.tensor):
model(batch) # run forward pass, but don't collect output
return extractor.activation
return {"image": Encoder(encode=_embed, preprocess=transform)}
def transform(img: PIL.Image.Image):
import torchvision as tv
transform = tv.transforms.Compose(
[
tv.transforms.Resize(
(128, 128), interpolation=tv.transforms.InterpolationMode.BILINEAR
),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
return transform(img)
def _get_weights(variant: str):
response = requests.get(f"https://storage.googleapis.com/bit_models/{variant}.npz")
response.raise_for_status()
return np.load(io.BytesIO(response.content))
def _get_model(variant: str):
weights = _get_weights(variant=variant)
# BLOCK_UNITS expects model names like "r50"
model_str = variant.split("-")[-1].split("x")[0].lower()
model = ResNetV2(ResNetV2.BLOCK_UNITS[model_str], width_factor=1)
model.load_from(weights)
return model
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(
cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups
)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW"""
if conv_weights.ndim == 4:
conv_weights = np.transpose(conv_weights, [3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""
Follows the implementation of "Identity Mappings in Deep Residual Networks" here:
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride) # Original ResNetv2 has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
# Conv'ed branch
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, "downsample"):
residual = self.downsample(out)
# The first block has already applied pre-act before splitting, see Appendix.
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=""):
with torch.no_grad():
self.conv1.weight.copy_(
tf2th(weights[prefix + "a/standardized_conv2d/kernel"])
)
self.conv2.weight.copy_(
tf2th(weights[prefix + "b/standardized_conv2d/kernel"])
)
self.conv3.weight.copy_(
tf2th(weights[prefix + "c/standardized_conv2d/kernel"])
)
self.gn1.weight.copy_(tf2th(weights[prefix + "a/group_norm/gamma"]))
self.gn2.weight.copy_(tf2th(weights[prefix + "b/group_norm/gamma"]))
self.gn3.weight.copy_(tf2th(weights[prefix + "c/group_norm/gamma"]))
self.gn1.bias.copy_(tf2th(weights[prefix + "a/group_norm/beta"]))
self.gn2.bias.copy_(tf2th(weights[prefix + "b/group_norm/beta"]))
self.gn3.bias.copy_(tf2th(weights[prefix + "c/group_norm/beta"]))
if hasattr(self, "downsample"):
self.downsample.weight.copy_(
tf2th(weights[prefix + "a/proj/standardized_conv2d/kernel"])
)
return self
class ResNetV2(nn.Module):
BLOCK_UNITS = {
"r50": [3, 4, 6, 3],
"r101": [3, 4, 23, 3],
"r152": [3, 8, 36, 3],
}
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor # shortcut 'cause we'll use it a lot.
self.root = nn.Sequential(
OrderedDict(
[
(
"conv",
StdConv2d(
3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False
),
),
("padp", nn.ConstantPad2d(1, 0)),
("pool", nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
self.body = nn.Sequential(
OrderedDict(
[
(
"block1",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=64 * wf, cout=256 * wf, cmid=64 * wf
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=256 * wf, cout=256 * wf, cmid=64 * wf
),
)
for i in range(2, block_units[0] + 1)
],
)
),
),
(
"block2",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=256 * wf,
cout=512 * wf,
cmid=128 * wf,
stride=2,
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=512 * wf, cout=512 * wf, cmid=128 * wf
),
)
for i in range(2, block_units[1] + 1)
],
)
),
),
(
"block3",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=512 * wf,
cout=1024 * wf,
cmid=256 * wf,
stride=2,
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=1024 * wf, cout=1024 * wf, cmid=256 * wf
),
)
for i in range(2, block_units[2] + 1)
],
)
),
),
(
"block4",
nn.Sequential(
OrderedDict(
[
(
"unit01",
PreActBottleneck(
cin=1024 * wf,
cout=2048 * wf,
cmid=512 * wf,
stride=2,
),
)
]
+ [
(
f"unit{i:02d}",
PreActBottleneck(
cin=2048 * wf, cout=2048 * wf, cmid=512 * wf
),
)
for i in range(2, block_units[3] + 1)
],
)
),
),
]
)
)
self.zero_head = zero_head
self.head = nn.Sequential(
OrderedDict(
[
("gn", nn.GroupNorm(32, 2048 * wf)),
("relu", nn.ReLU(inplace=True)),
("avg", nn.AdaptiveAvgPool2d(output_size=1)),
("conv", nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True)),
]
)
)
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[..., 0, 0]
def load_from(self, weights, prefix="resnet/"):
with torch.no_grad():
self.root.conv.weight.copy_(
tf2th(weights[f"{prefix}root_block/standardized_conv2d/kernel"])
)
self.head.gn.weight.copy_(tf2th(weights[f"{prefix}group_norm/gamma"]))
self.head.gn.bias.copy_(tf2th(weights[f"{prefix}group_norm/beta"]))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(
tf2th(weights[f"{prefix}head/conv2d/kernel"])
)
self.head.conv.bias.copy_(tf2th(weights[f"{prefix}head/conv2d/bias"]))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f"{prefix}{bname}/{uname}/")
return self
|
domino-main
|
domino/_embed/bit.py
|
from ast import Import
import subprocess
from typing import Dict, Union
import os
from .encoder import Encoder
VARIANTS = {
"imagenet_l2_3_0": "https://www.dropbox.com/s/knf4uimlqsi1yz8/imagenet_l2_3_0.pt?dl=0",
"cifar_l2_1_0": "https://www.dropbox.com/s/s2x7thisiqxz095/cifar_l2_1_0.pt?dl=0",
"imagenet_linf_8": "https://www.dropbox.com/s/yxn15a9zklz3s8q/imagenet_linf_8.pt?dl=0"
}
def robust(
variant: str = "imagenet_l2_3_0", device: Union[int, str] = "cpu", model_path: str = None
) -> Dict[str, Encoder]:
""" Image classifier trained with adversarial robustness loss [engstrom_2019]_.
Args:
variant (str, optional): One of ["imagenet_l2_3_0", "cifar_l2_1_0",
"imagenet_linf_8"].Defaults to "imagenet_l2_3_0".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
.. [engstrom_2019]
@misc{robustness,
title={Robustness (Python Library)},
author={Logan Engstrom and Andrew Ilyas and Hadi Salman and Shibani
Santurkar and Dimitris Tsipras},
year={2019},
url={https://github.com/MadryLab/robustness}
}
"""
model_path = (
os.path.expanduser("~/.cache/domino/robust/robust_resnet50.pth")
if model_path is None
else model_path
)
model = _load_robust_model(model_path=model_path, variant=variant).to(device)
return {
"image": Encoder(
encode=lambda x: model(x, with_latent=True)[0][1],
preprocess=_transform_image,
),
}
def _load_robust_model(model_path: str, variant: str):
try:
from robustness import model_utils
from robustness import datasets as dataset_utils
except ImportError:
raise ImportError("To embed with robust run `pip install robustness`")
# ensure model_path directories exist
os.makedirs(os.path.dirname(model_path), exist_ok=True)
subprocess.run(
[
"wget",
"-O",
model_path,
"https://www.dropbox.com/s/knf4uimlqsi1yz8/imagenet_l2_3_0.pt?dl=0",
]
)
dataset_function = getattr(dataset_utils, "ImageNet")
dataset = dataset_function("")
model_kwargs = {
"arch": variant,
"dataset": dataset,
"resume_path": model_path,
"parallel": False,
}
model, _ = model_utils.make_and_restore_model(**model_kwargs)
model.eval()
return model
def _transform_image(img):
from torchvision import transforms
return transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
)(img)
|
domino-main
|
domino/_embed/robust.py
|
from typing import Dict, Union
from .encoder import Encoder
def clip(
variant: str = "ViT-B/32", device: Union[int, str] = "cpu"
) -> Dict[str, Encoder]:
"""Contrastive Language-Image Pre-training (CLIP) encoders [radford_2021]_. Includes
encoders for the following modalities:
- "text"
- "image"
Encoders will map these different modalities to the same embedding space.
Args:
variant (str, optional): A model name listed by `clip.available_models()`, or
the path to a model checkpoint containing the state_dict. Defaults to
"ViT-B/32".
device (Union[int, str], optional): The device on which the encoders will be
loaded. Defaults to "cpu".
.. [radford_2021]
Radford, A. et al. Learning Transferable Visual Models From Natural Language
Supervision. arXiv [cs.CV] (2021)
"""
try:
from clip import load, tokenize
except ImportError:
raise ImportError(
"To embed with CLIP run pip install git+https://github.com/openai/CLIP.git"
"and install domino with the `clip` submodule. For example, "
"`pip install domino[clip]`"
)
model, preprocess = load(variant, device=device)
return {
"image": Encoder(encode=model.encode_image, preprocess=preprocess),
"text": Encoder(
# need to squeeze out the batch dimension for compatibility with collate
encode=model.encode_text,
preprocess=lambda x: tokenize(x, truncate=True).squeeze(0),
),
}
|
domino-main
|
domino/_embed/clip.py
|
from typing import List, Tuple
from dcbench import SliceDiscoveryProblem, SliceDiscoverySolution
import meerkat as mk
import numpy as np
import sklearn.metrics as skmetrics
from domino.utils import unpack_args
from scipy.stats import rankdata
import pandas as pd
from tqdm import tqdm
def compute_metrics(
solutions: List[SliceDiscoverySolution], run_id: int = None
) -> Tuple[mk.DataPanel]:
global_metrics = []
slice_metrics = []
for solution in tqdm(solutions):
g, s = compute_solution_metrics(solution)
global_metrics.append(g)
slice_metrics.extend(s)
return mk.DataPanel(global_metrics), mk.DataPanel(slice_metrics)
def compute_solution_metrics(
solution: SliceDiscoverySolution,
):
metrics = _compute_metrics(
data=solution.merge(),
slice_target_column="slices",
slice_pred_column="slice_preds",
slice_prob_column="slice_probs",
slice_names=solution.problem.slice_names,
)
for row in metrics:
row["solution_id"] = solution.id
row["problem_id"] = solution.problem_id
return metrics
def _compute_metrics(
data: mk.DataPanel,
slice_target_column: str,
slice_pred_column: str,
slice_prob_column: str,
slice_names: List[str],
):
slice_targets, slice_preds, slice_probs = unpack_args(
data, slice_target_column, slice_pred_column, slice_prob_column
)
# consider complements of slices
slice_preds = np.concatenate([slice_preds, 1 - slice_preds], axis=1)
slice_probs = np.concatenate([slice_probs, 1 - slice_probs], axis=1)
def precision_at_k(slc: np.ndarray, pred_slice: np.ndarray, k: int = 25):
# don't need to check for zero division because we're taking the top_k
return skmetrics.precision_score(
slc, rankdata(-pred_slice, method="ordinal") <= k
)
# compute mean response conditional on the slice and predicted slice_targets
def zero_fill_nan_and_infs(x: np.ndarray):
return np.nan_to_num(x, nan=0, posinf=0, neginf=0, copy=False)
metrics = []
for slice_idx in range(slice_targets.shape[1]):
slc = slice_targets[:, slice_idx]
slice_name = slice_names[slice_idx]
for pred_slice_idx in range(slice_preds.shape[1]):
slice_pred = slice_preds[:, pred_slice_idx]
slice_prob = slice_probs[:, pred_slice_idx]
metrics.append(
{
"target_slice_idx": slice_idx,
"target_slice_name": slice_name,
"pred_slice_idx": pred_slice_idx,
"average_precision": skmetrics.average_precision_score(
y_true=slc, y_score=slice_prob
),
"precision-at-10": precision_at_k(slc, slice_prob, k=10),
"precision-at-25": precision_at_k(slc, slice_prob, k=25),
**dict(
zip(
["precision", "recall", "f1_score", "support"],
skmetrics.precision_recall_fscore_support(
y_true=slc,
y_pred=slice_pred,
average="binary",
# note: if slc is empty, recall will be 0 and if pred
# is empty precision will be 0
zero_division=0,
),
)
),
}
)
df = pd.DataFrame(metrics)
primary_metric = "average_precision"
slice_metrics = df.iloc[
df.groupby("target_slice_name")[primary_metric].idxmax().astype(int)
]
return slice_metrics.to_dict("records")
|
domino-main
|
domino/eval/metrics.py
|
from __future__ import annotations
from contextlib import redirect_stdout
import dataclasses
from gettext import dpgettext
import io
import itertools
from random import choice, sample
from typing import Collection, Dict, Iterable, List, Tuple, Union
from dataclasses import dataclass
from sklearn.linear_model import LinearRegression
import sklearn.metrics as skmetrics
import pandas as pd
from scipy.stats import rankdata
import terra
import numpy as np
from domino.eval.metrics import compute_solution_metrics
import meerkat as mk
from tqdm.auto import tqdm
import os
from domino import embed, generate_candidate_descriptions
from domino.utils import unpack_args
from dcbench import SliceDiscoveryProblem, SliceDiscoverySolution
def _run_sdms(problems: List[SliceDiscoveryProblem], **kwargs):
result = []
for problem in problems:
# f = io.StringIO()
# with redirect_stdout(f):
result.append(run_sdm(problem, **kwargs))
return result
def run_sdms(
problems: List[SliceDiscoveryProblem],
slicer_class: type,
slicer_config: dict,
emb_dp: mk.DataPanel,
embedding_col: str = "emb",
batch_size: int = 1,
num_workers: int = 0,
):
if num_workers > 0:
import ray
ray.init()
run_fn = ray.remote(_run_sdms).remote
else:
run_fn = _run_sdms
total_batches = len(problems)
results = []
t = tqdm(total=total_batches)
for start_idx in range(0, len(problems), batch_size):
batch = problems[start_idx : start_idx + batch_size]
result = run_fn(
problems=batch,
emb_dp=emb_dp,
embedding_col=embedding_col,
# candidate_descriptions=candidate_descriptions,
slicer_class=slicer_class,
slicer_config=slicer_config,
)
if num_workers == 0:
t.update(n=len(result))
results.extend(result)
else:
# in the parallel case, this is a single object reference
# moreover, the remote returns immediately so we don't update tqdm
results.append(result)
if num_workers > 0:
# if we're working in parallel, we need to wait for the results to come back
# and update the tqdm accordingly
result_refs = results
results = []
while result_refs:
done, result_refs = ray.wait(result_refs)
for result in done:
result = ray.get(result)
results.extend(result)
t.update(n=len(result))
ray.shutdown()
solutions, metrics = zip(*results)
# flatten the list of lists
metrics = [row for slices in metrics for row in slices]
return solutions, pd.DataFrame(metrics)
def run_sdm(
problem: SliceDiscoveryProblem,
# candidate_descriptions: Descriptions,
slicer_class: type,
slicer_config: dict,
emb_dp: mk.DataPanel,
embedding_col: str = "emb",
) -> SliceDiscoverySolution:
val_dp = problem.merge(split="val")
val_dp = val_dp.merge(emb_dp["id", embedding_col], on="id", how="left")
slicer = slicer_class(pbar=False, **slicer_config)
slicer.fit(val_dp, embeddings=embedding_col, targets="target", pred_probs="probs")
test_dp = problem.merge(split="test")
test_dp = test_dp.merge(emb_dp["id", embedding_col], on="id", how="left")
result = mk.DataPanel({"id": test_dp["id"]})
result["slice_preds"] = slicer.predict(
test_dp, embeddings=embedding_col, targets="target", pred_probs="probs"
)
result["slice_probs"] = slicer.predict_proba(
test_dp, embeddings=embedding_col, targets="target", pred_probs="probs"
)
# descriptions = slicer.describe(
# text_data=candidate_descriptions.dp,
# text_embeddings=candidate_descriptions.embedding_column,
# text_descriptions=candidate_descriptions.description_column,
# num_descriptions=5,
# )
solution = SliceDiscoverySolution(
artifacts={
"pred_slices": result,
},
attributes={
"problem_id": problem.id,
"slicer_class": slicer_class,
"slicer_config": slicer_config,
"embedding_column": embedding_col,
},
)
metrics = compute_solution_metrics(
solution,
)
return solution, metrics
|
domino-main
|
domino/eval/run.py
|
domino-main
|
domino/eval/__init__.py
|
|
from typing import Union
import meerkat as mk
import numpy as np
import pandas as pd
class CorrelationImpossibleError(ValueError):
def __init__(
self,
corr: float,
n: int,
attr_a: str,
attr_b: str,
mu_a: float,
mu_b: float,
msg: str,
):
super().__init__(
f"Cannot achieve correlation of {corr} while creating sample with {int(n)} "
f"examples and means of {mu_a:0.3f} and {mu_b:0.3f} for attributes "
f"{attr_a} and {attr_b} respectively. " + msg
)
def induce_correlation(
dp: Union[pd.DataFrame, mk.DataPanel],
corr: float,
n: int,
attr_a: str,
attr_b: str,
mu_a: float = None,
mu_b: float = None,
match_mu: bool = False,
replace: bool = False,
):
"""
Induce a correlation `corr` between two boolean columns `attr_a` and `attr_b` by
subsampling `df`, while maintaining mean and variance. If `match_mu` is `True` then
take the minimum mean among the two attributes and use it for both.
"""
if mu_a is None:
mu_a = dp[attr_a].mean()
if mu_b is None:
mu_b = dp[attr_b].mean()
if match_mu:
mu = min(mu_a, mu_b)
mu_a, mu_b = mu, mu
var_a = (mu_a) * (1 - mu_a)
var_b = (mu_b) * (1 - mu_b)
n_a1 = mu_a * n
n_b1 = mu_b * n
n_1 = (n_a1 * n_b1 / n) + corr * np.sqrt(var_a * var_b) * (n - 1)
n_0 = n - (n_a1 + n_b1 - n_1)
n_a1_b0 = n_a1 - n_1
n_a0_b1 = n_b1 - n_1
both_1 = (dp[attr_a] == 1) & (dp[attr_b] == 1)
both_0 = (dp[attr_a] == 0) & (dp[attr_b] == 0)
# check if requested correlation is possible
msg = None
if int(n_a1) > dp[attr_a].sum():
msg = "Not enough samples where a=1. Try a lower mu_a."
elif int(n_b1) > dp[attr_b].sum():
msg = "Not enough samples where b=1. Try a lower mu_b."
elif int(n_1) > both_1.sum():
msg = "Not enough samples where a=1 and b=1. Try a lower corr or smaller n."
elif int(n_0) > both_0.sum():
msg = "Not enough samples where a=0 and b=0. Try a lower corr or smaller n."
elif int(n_a1_b0) > (dp[attr_a] & (1 - both_1)).sum():
msg = "Not enough samples where a=1 and b=0. Try a higher corr or smaller n."
elif int(n_a0_b1) > (dp[attr_b] & (1 - both_1)).sum():
msg = "Not enough samples where a=0 and b=1. Try a higher corr or smaller n."
elif n_1 < 0:
msg = "Insufficient variance for desired corr. Try mu_a or mu_b closer to 0.5 "
elif n_0 < 0:
msg = "ahh"
elif (n_1 > n_a1) or (n_1 > n_b1) or n_1 < 0 or n_0 < 0:
msg = "Not enough samples where a=0 and b=0. Try a lower corr or smaller n."
if msg is not None:
raise CorrelationImpossibleError(corr, n, attr_a, attr_b, mu_a, mu_b, msg)
indices = []
indices.extend(
np.random.choice(np.where(both_1)[0], size=int(n_1), replace=replace)
)
indices.extend(
np.random.choice(
np.where(dp[attr_a] & (1 - both_1))[0], size=int(n_a1_b0), replace=replace
)
)
indices.extend(
np.random.choice(
np.where(dp[attr_b] & (1 - both_1))[0], size=int(n_a0_b1), replace=replace
)
)
indices.extend(
np.random.choice(
np.where(both_0)[0],
size=int(n_0),
replace=replace,
)
)
np.random.shuffle(indices)
return indices
|
domino-main
|
domino/eval/utils.py
|
from __future__ import annotations
import meerkat as mk
import torch
import PIL
from torchvision.models import ResNet as _ResNet
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls as resnet_model_urls
from torch.hub import load_state_dict_from_url
from torchvision import transforms
from torch.utils.data import DataLoader
import torchmetrics
from torch import nn
import pytorch_lightning as pl
from typing import Union, Sequence, Iterable, List
def train(
dp: mk.DataPanel,
input_column: str,
target_column: Union[Sequence[str], str],
id_column: str,
model: Classifier = None,
config: dict = None,
num_classes: int = 2,
max_epochs: int = 50,
gpus: Union[int, Iterable] = 1,
num_workers: int = 6,
batch_size: int = 16,
train_split: str = "train",
valid_split: str = "valid",
pbar: bool = True,
seed: int = 123,
run_dir: str = None,
**kwargs,
):
# see here for preprocessing https://github.com/pytorch/vision/issues/39#issuecomment-403701432
# Note from https://pytorch-lightning.readthedocs.io/en/0.8.3/multi_gpu.html: Make sure to set the random seed so that each model initializes with the same weights.
pl.utilities.seed.seed_everything(seed)
if (model is not None) and (config is not None):
raise ValueError("Cannot pass both `model` and `config`.")
if model is None:
config = {} if config is None else config
config["num_classes"] = num_classes
model = Classifier(config)
model.train()
trainer = pl.Trainer(
gpus=gpus,
max_epochs=max_epochs,
log_every_n_steps=1,
default_root_dir=run_dir,
accelerator=None,
auto_select_gpus=True,
progress_bar_refresh_rate=None if pbar else 0,
**kwargs,
)
dp = mk.DataPanel.from_batch(
{
"input": dp[input_column],
"target": dp[target_column].astype(int),
"id": dp[id_column],
"split": dp["split"],
}
)
train_dp = dp.lz[dp["split"] == train_split]
if model.config.get("train_transform", None) is not None:
train_dp["input"] = train_dp["input"].to_lambda(model.config["train_transform"])
train_dl = DataLoader(
train_dp,
batch_size=batch_size,
num_workers=num_workers,
)
valid_dp = dp.lz[dp["split"] == valid_split]
if model.config.get("transform", None) is not None:
valid_dp["input"] = valid_dp["input"].to_lambda(model.config["transform"])
valid_dl = DataLoader(
valid_dp, batch_size=batch_size, num_workers=num_workers, shuffle=True
)
trainer.fit(model, train_dl, valid_dl)
return model
def default_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
def default_train_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
class ResNet(_ResNet):
ACTIVATION_DIMS = [64, 128, 256, 512]
ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]
RESNET_TO_ARCH = {"resnet18": [2, 2, 2, 2], "resnet50": [3, 4, 6, 3]}
def __init__(
self,
num_classes: int,
arch: str = "resnet18",
dropout: float = 0.0,
pretrained: bool = True,
):
if arch not in self.RESNET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}"
)
block = BasicBlock if arch == "resnet18" else Bottleneck
super().__init__(block, self.RESNET_TO_ARCH[arch])
if pretrained:
state_dict = load_state_dict_from_url(
resnet_model_urls[arch], progress=True
)
self.load_state_dict(state_dict)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Sequential(
nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)
)
class Classifier(pl.LightningModule):
DEFAULT_CONFIG = {
"lr": 1e-4,
"model_name": "resnet",
"arch": "resnet18",
"pretrained": True,
"num_classes": 2,
"transform": default_transform,
"train_transform": default_train_transform,
}
def __init__(self, config: dict = None):
super().__init__()
self.config = self.DEFAULT_CONFIG.copy()
if config is not None:
self.config.update(config)
self._set_model()
metrics = self.config.get("metrics", ["auroc", "accuracy"])
self.metrics = self._get_metrics(
metrics, num_classes=self.config["num_classes"]
)
def _get_metrics(self, metrics: List[str], num_classes: int = None):
num_classes = self.config["num_classes"] if num_classes is None else num_classes
_metrics = {
"accuracy": torchmetrics.Accuracy(compute_on_step=False),
"auroc": torchmetrics.AUROC(compute_on_step=False, num_classes=num_classes),
# TODO (Sabri): Use sklearn metrics here, torchmetrics doesn't handle case
# there are only a subset of classes in a test set
"macro_f1": torchmetrics.F1(num_classes=num_classes, average="macro"),
"macro_recall": torchmetrics.Recall(
num_classes=num_classes, average="macro"
),
}
return nn.ModuleDict(
{name: metric for name, metric in _metrics.items() if name in metrics}
) # metrics need to be child module of the model, https://pytorch-lightning.readthedocs.io/en/stable/metrics.html#metrics-and-devices
def _set_model(self):
if self.config["model_name"] == "resnet":
self.model = ResNet(
num_classes=self.config["num_classes"],
arch=self.config["arch"],
pretrained=self.config["pretrained"],
)
else:
raise ValueError(f"Model name {self.config['model_name']} not supported.")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
inputs, targets, _ = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("train_loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
inputs, targets, sample_id = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("valid_loss", loss)
for metric in self.metrics.values():
metric(torch.softmax(outs, dim=-1), targets)
def validation_epoch_end(self, outputs) -> None:
for metric_name, metric in self.metrics.items():
self.log(f"valid_{metric_name}", metric.compute())
metric.reset()
def test_epoch_end(self, outputs) -> None:
return self.validation_epoch_end(outputs)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.config["lr"])
return optimizer
|
domino-main
|
domino/eval/train.py
|
domino-main
|
tests/__init__.py
|
|
import os
import meerkat as mk
import numpy as np
from PIL import Image
from sklearn.datasets import make_blobs
import torch
class ImageColumnTestBed:
def __init__(
self,
tmpdir: str,
length: int = 16,
):
self.image_paths = []
self.image_arrays = []
self.ims = []
self.data = []
for i in range(0, length):
self.image_arrays.append((i * np.ones((4, 4, 3))).astype(np.uint8))
im = Image.fromarray(self.image_arrays[-1])
self.ims.append(im)
self.data.append(im)
filename = "{}.png".format(i)
im.save(os.path.join(tmpdir, filename))
self.image_paths.append(os.path.join(tmpdir, filename))
self.col = mk.ImageColumn.from_filepaths(
self.image_paths,
loader=Image.open,
)
class TextColumnTestBed:
def __init__(self, length: int = 16):
self.data = ["Row " * idx for idx in range(length)]
self.col = mk.PandasSeriesColumn(self.data)
class SliceTestBed:
def __init__(self, length: int = 16, type: str = "numpy"):
if type == "numpy":
conversion = lambda x: x
elif type == "torch":
conversion = torch.tensor
else:
raise ValueError("Unknown type: {}".format(type))
gaussian_means = np.array(
[
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
[5.0, 2.0, 0.0, 1.0, 10.0],
[1.0, 10.0, 2.0, 2.0, 0.0],
]
)
emb, clusters = make_blobs(
n_samples=length, centers=gaussian_means, cluster_std=1.0, random_state=42
)
targets = clusters == 1
preds = (clusters == 1) | (clusters == 2)
self.clusters = clusters
self.dp = mk.DataPanel(
{
"embedding": conversion(emb),
"target": conversion(targets),
"pred_probs": conversion(
np.stack([1 - preds, preds], axis=1).astype(float)
),
"losses": conversion(np.abs(targets ^ preds).astype(float)),
}
)
|
domino-main
|
tests/testbeds.py
|
domino-main
|
tests/_describe/__init__.py
|
|
from sklearn import metrics
import pytest
from itertools import product
from domino import DominoSlicer
from ..testbeds import SliceTestBed
@pytest.mark.parametrize(
"init_params,type", product(["random", "confusion"], ["numpy", "torch"])
)
def test_domino_results(init_params: str, type: str):
testbed = SliceTestBed(length=9, type=type)
domino = DominoSlicer(
n_slices=5,
n_mixture_components=5,
n_pca_components=None,
init_params=init_params,
random_state=42,
)
domino.fit(data=testbed.dp)
pred_slices = domino.predict(data=testbed.dp)
assert metrics.rand_score(testbed.clusters, pred_slices.argmax(axis=-1)) == 1.0
prob_slices = domino.predict_proba(data=testbed.dp)
assert (pred_slices.argmax(axis=-1) == prob_slices.argmax(axis=-1)).all()
|
domino-main
|
tests/_slice/test_domino.py
|
domino-main
|
tests/_slice/__init__.py
|
|
from sklearn import metrics
import pytest
import numpy as np
from domino import SpotlightSlicer
from ..testbeds import SliceTestBed
@pytest.mark.parametrize("pass_losses", [True, False])
def test_domino_results(pass_losses):
testbed = SliceTestBed(length=9)
method = SpotlightSlicer(n_slices=2, n_steps=3)
if pass_losses:
kwargs = {"losses": "losses"}
else:
kwargs = {"targets": "target", "pred_probs": "pred_probs"}
method.fit(data=testbed.dp, **kwargs)
pred_slices = method.predict(data=testbed.dp, **kwargs)
# assert output is a numpy array
assert isinstance(pred_slices, np.ndarray)
# assert that the shape of the array is (n_samples, n_slices)
assert pred_slices.shape == (len(testbed.dp), 2)
prob_slices = method.predict_proba(data=testbed.dp, **kwargs)
# assert output is a numpy array
assert isinstance(prob_slices, np.ndarray)
# assert that the shape of the array is (n_samples, n_slices)
assert prob_slices.shape == (len(testbed.dp), 2)
|
domino-main
|
tests/_slice/test_spotlight.py
|
import meerkat as mk
import PIL
import pytest
import torch
import hashlib
import numpy as np
import domino
from domino import embed, encoders
from domino._embed.encoder import Encoder
from domino.registry import Registry
from ..testbeds import ImageColumnTestBed, TextColumnTestBed
EMB_SIZE = 4
def simple_encode(batch: torch.Tensor):
value = batch.to(torch.float32).mean(dim=-1, keepdim=True)
return torch.ones(batch.shape[0], EMB_SIZE) * value
def simple_image_transform(image: PIL.Image):
return torch.tensor(np.asarray(image)).to(torch.float32)
def simple_text_transform(text: str):
return torch.tensor(
[
int.from_bytes(hashlib.sha256(token.encode("utf-8")).digest(), "big") % 100
for token in text.split(" ")
]
)[:1]
def _simple_encoder(variant: str = "ViT-B/32", device: str = "cpu"):
return {
"image": Encoder(encode=simple_encode, preprocess=simple_image_transform),
"text": Encoder(encode=simple_encode, preprocess=simple_text_transform),
}
@pytest.fixture()
def simple_encoder(monkeypatch):
if "_simple_encoder" not in encoders.names:
encoders.register(_simple_encoder)
return _simple_encoder
def test_embed_images(tmpdir: str, simple_encoder):
image_testbed = ImageColumnTestBed(tmpdir=tmpdir)
dp = mk.DataPanel({"image": image_testbed.col})
dp = embed(
data=dp,
input_col="image",
encoder="_simple_encoder",
batch_size=4,
num_workers=0,
)
assert isinstance(dp, mk.DataPanel)
assert "_simple_encoder(image)" in dp
assert (
simple_image_transform(dp["image"][0]).mean()
== dp["_simple_encoder(image)"][0].mean()
)
def test_embed_text(simple_encoder):
testbed = TextColumnTestBed()
dp = mk.DataPanel({"text": testbed.col})
dp = embed(
data=dp,
input_col="text",
encoder="_simple_encoder",
batch_size=4,
num_workers=0,
)
assert isinstance(dp, mk.DataPanel)
assert "_simple_encoder(text)" in dp
assert (
simple_text_transform(dp["text"][0]).to(torch.float32).mean()
== dp["_simple_encoder(text)"][0].mean()
)
def test_encoders_repr():
assert isinstance(domino.encoders, Registry)
assert isinstance(domino.encoders.__repr__(), str)
|
domino-main
|
tests/_embed/test__init__.py
|
domino-main
|
tests/_embed/__init__.py
|
|
domino-main
|
tests/_embed/test_clip.py
|
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pathlib import Path
version_path = Path(__file__).parent.parent.parent / "domino" / "version.py"
metadata = {}
with open(str(version_path)) as ver_file:
exec(ver_file.read(), metadata)
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("../.."))
sys.setrecursionlimit(1500)
# -- Project information -----------------------------------------------------
project = "domino"
copyright = "2021"
author = "Sabri Eyuboglu, Maya Varma, Khaled Saab, and others"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.autodoc.typehints",
"sphinx.ext.autosummary",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"sphinx_panels",
"jupyter_sphinx",
"sphinx_rtd_theme",
"nbsphinx",
"recommonmark",
]
autodoc_typehints = "description"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
html_logo = "../assets/logo.png"
html_theme_options = {}
# Don't show module names in front of class names.
add_module_names = False
|
domino-main
|
docs/source/conf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import subprocess
from subprocess import check_output
import os
import embeddings
class VecMap:
"""
wrapper for vecmap https://github.com/artetxem/vecmap
assumes vecmap is in the directory ./vecmap
"""
def __init__(self, srcvec, tgtvec, dictpath, outdir, config):
self.srcvec = srcvec
self.tgtvec = tgtvec
self.outdir = outdir
self.dictpath = dictpath
self.flags = ''
self.config = config
def add_flag(v):
self.flags += ' ' + v
add_flag('--verbose')
add_flag('--orthogonal')
# default is 50, but I want faster
add_flag('--stochastic_interval 3')
add_flag('--csls 10')
if dictpath is not None:
add_flag(f'--validation {dictpath}')
logdir = os.path.join(self.outdir, 'vecmap.log')
add_flag(f'--log {logdir}')
if config.supervision == 'identical':
add_flag('--identical')
elif config.supervision == 'init_identical':
add_flag('--init_identical')
elif config.supervision == 'numeral':
add_flag('--init_numeral')
elif config.supervision == 'unsupervised':
add_flag('--init_unsupervised')
add_flag('--unsupervised')
else:
raise Exception('invalid type of supervision: ' + config.supervision)
# if config.init_dict:
# add_flag(f'--dictionary {config.init_dict}')
def run(self):
srcvec = self.srcvec
tgtvec = self.tgtvec
srcvec_out = os.path.join(self.outdir, 'src.out.vec')
tgtvec_out = os.path.join(self.outdir, 'tgt.out.vec')
cmd = f'python vecmap/map_embeddings.py {srcvec} {tgtvec} {srcvec_out} {tgtvec_out} {self.flags} --verbose'
print(cmd)
process = subprocess.Popen(cmd.split())
output, error = process.communicate()
def vecs(self):
srcvec_out = os.path.join(self.outdir, 'src.out.vec')
tgtvec_out = os.path.join(self.outdir, 'tgt.out.vec')
srcfile = open(srcvec_out, encoding='utf-8', errors='surrogateescape')
tgtfile = open(tgtvec_out, encoding='utf-8', errors='surrogateescape')
src_words, srcvec = embeddings.read(srcfile)
tgt_words, tgtvec = embeddings.read(tgtfile)
return srcvec, tgtvec
def eval(self, dictpath):
srcvec_out = os.path.join(self.outdir, 'src.out.vec')
tgtvec_out = os.path.join(self.outdir, 'tgt.out.vec')
cmd = f'python vecmap/eval_translation.py {srcvec_out} {tgtvec_out} -d {dictpath} --retrieval csls -k 10'
print(cmd)
out = check_output(cmd.split())
def cov_acc(sout):
toks = sout.decode('utf8').replace(': ', ' ').replace(':', ' ').split()
print(sout)
cov = float(toks[1].strip('%'))
acc = float(toks[3].strip('%'))
return ({'accuracy': acc, 'coverage': cov})
return cov_acc(out)
|
coocmap-main
|
baselines.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from typing import Optional
import collections
import numpy as np
import pandas as pd
from tokenizers import Tokenizer
# faithfully recreate the protocol of vecmap with minimal code modifications
def vecmap_evaluate(sim: np.ndarray, tokenizer1: Tokenizer, tokenizer2: Tokenizer, refpath: str):
# https://github.com/artetxem/vecmap/blob/master/map_embeddings.py#L225
# precision only, count oovs
with open(refpath, encoding='utf-8', errors='surrogateescape') as f:
validation = collections.defaultdict(set)
oov = set()
vocab = set()
for line in f:
try:
src, trg = line.split()
except ValueError:
continue
try:
src_ind = tokenizer1.token_to_id(src)
trg_ind = tokenizer2.token_to_id(trg)
if src_ind is None or trg_ind is None:
raise KeyError
if src_ind >= sim.shape[0] or trg_ind >= sim.shape[1]:
raise KeyError
validation[src_ind].add(trg_ind)
vocab.add(src)
except KeyError:
oov.add(src)
oov -= vocab # If one of the translation options is in the vocabulary, then the entry is not an oov
validation_coverage = len(validation) / (len(validation) + len(oov))
# https://github.com/artetxem/vecmap/blob/master/map_embeddings.py#L383
src = list(validation.keys())
# xw[src].dot(zw.T, out=simval)
srct = [s for s in src if s < sim.shape[0]]
simval = sim[srct]
nn = np.nanargmax(simval, axis=1)
accuracy = np.mean([1 if nn[i] in validation[src[i]] else 0 for i in range(len(src))])
similarity = np.mean([max([simval[i, j].tolist() for j in validation[src[i]]]) for i in range(len(src))])
return {'accuracy': accuracy, 'similarity': similarity, 'coverage': validation_coverage}
def get_refdict(refpath):
with open(refpath, encoding='utf-8', errors='surrogateescape') as f:
val = collections.defaultdict(set)
for line in f:
try:
src, trg = line.split()
except ValueError:
continue
val[src].add(trg)
return val
def report_sim(sim: np.ndarray, tokenizer1: Tokenizer, tokenizer2: Tokenizer, refpath: Optional[str]):
# ind_src = np.arange(sim.shape[0])
kth = range(3)
ind_tgt = np.argpartition(-sim, kth, axis=1)
res = []
maxes = []
stats = {}
if refpath is not None:
refdict = get_refdict(refpath)
# keys: accuracy, coverage, similarity
vecmapres = vecmap_evaluate(sim, tokenizer1, tokenizer2, refpath)
stats = vecmapres
else:
refdict = collections.defaultdict(set)
for i in range(sim.shape[0]):
char = tokenizer1.id_to_token(i)
pred = tokenizer2.id_to_token(ind_tgt[i][0])
preds = ' '.join(tokenizer2.id_to_token(j) for j in ind_tgt[i][kth])
gap = sim[i][ind_tgt[i][0]] - sim[i][ind_tgt[i][1]]
maxes.append(sim[i][ind_tgt[i][0]])
res.append({
'char': char,
# 'id': i,
'pred': pred,
'preds': preds,
'eq': char == pred,
# 'gap': gap,
# 'max': maxes[i],
'correct': pred in refdict[char],
'refs': ' '.join(refdict[char])
})
# print(res)
df = pd.DataFrame.from_records(res)
neq = len(df[df['char'] == df['pred']])
ncorrect = len(df[df['correct']==True])
stats['nidentical'] = neq
stats['mean_max'] = np.mean(maxes)
stats['ncorrrect'] = ncorrect
# print(stats)
return df, stats
def _dict_to_inds(refpath, tok1, tok2, full=False):
refdict = get_refdict(refpath)
for src, trgs in refdict.items():
src_ind = tok1.token_to_id(src)
if src_ind is None:
continue
trg_inds = [tok2.token_to_id(trg) for trg in trgs]
trg_inds = [trg_ind for trg_ind in trg_inds if trg_ind is not None]
if full:
for trg_ind in trg_inds:
yield src_ind, trg_ind
elif len(trg_inds) > 0:
trg_ind = trg_inds[0]
yield src_ind, trg_ind
def dict_to_inds(refpath, tok1, tok2, full=False):
return list(zip(*_dict_to_inds(refpath, tok1, tok2, full=full)))
def label_preds(preds, refpath: Optional[str]):
# ind_src = np.arange(sim.shape[0])
if refpath is not None:
refdict = get_refdict(refpath)
print('size of dictionary', len(refdict.keys()))
res = []
for w, v in preds:
res.append(
{
'src': w,
'trg': v,
'correct': v in refdict[w],
'wrong': w in refdict and v not in refdict[w],
'identical': w == v,
'refs': ' '.join(refdict[w]),
}
)
ws.append(w)
if len(ws) != len(set(ws)):
print('WARNING: duplicate words exist in the predictions')
# print(res)
df = pd.DataFrame.from_records(res)
def boolcount(prop):
return len(df[df[prop]==True])
nidentical = boolcount('identical')
ncorrect = boolcount('correct')
nwrong= boolcount('wrong')
accuracy = ncorrect / (ncorrect + nwrong)
coverage = (ncorrect + nwrong) / len(refdict)
noov = len(refdict) - (ncorrect + nwrong)
stats = {'nidentical': nidentical, 'ncorrect': ncorrect, 'noov': noov, 'accuracy': accuracy, 'coverage': coverage}
return df, stats
|
coocmap-main
|
evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright (C) 2016-2018 Mikel Artetxe <artetxem@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import numpy as np
def get_array_module(x):
return np
def read(file, threshold=0, vocabulary=None, dtype='float'):
header = file.readline().split(' ')
count = int(header[0]) if threshold <= 0 else min(threshold, int(header[0]))
dim = int(header[1])
words = []
matrix = np.empty((count, dim), dtype=dtype) if vocabulary is None else []
for i in range(count):
word, vec = file.readline().split(' ', 1)
if word.strip() == '':
word2 = str(word.encode("utf-8"))
print(f'Warning: only space chars in word ({word2})', file=sys.stderr)
if vocabulary is None:
words.append(word)
matrix[i] = np.fromstring(vec, sep=' ', dtype=dtype)
elif word in vocabulary:
words.append(word)
matrix.append(np.fromstring(vec, sep=' ', dtype=dtype))
return (words, matrix) if vocabulary is None else (words, np.array(matrix, dtype=dtype))
def write(words, matrix, file):
m = matrix
print('%d %d' % m.shape, file=file)
for i in range(len(words)):
print(words[i] + ' ' + ' '.join(['%.6g' % x for x in m[i]]), file=file)
def length_normalize(matrix):
xp = get_array_module(matrix)
norms = xp.sqrt(xp.sum(matrix**2, axis=1))
norms[norms == 0] = 1
matrix /= norms[:, xp.newaxis]
def mean_center(matrix):
xp = get_array_module(matrix)
avg = xp.mean(matrix, axis=0)
matrix -= avg
def normalize(matrix, actions):
for action in actions:
if action == 'unit':
length_normalize(matrix)
elif action == 'center':
mean_center(matrix)
#################### End of original code ##################################
#################### Start of new code ##################################
else:
all = globals()
from inspect import isfunction
if action in all and isfunction(all[action]):
all[action](matrix)
else:
raise Exception('Unknown action: ' + action)
def sqrt(matrix):
xp = get_array_module(matrix)
matrix[:] = xp.sqrt(matrix)
def median_center(matrix):
xp = get_array_module(matrix)
# m = xp.median(matrix, axis=0)
m = np.percentile(matrix, q=50, axis=0)
matrix -= m
def pmi(X):
eps = 1e-8
rs = X.sum(axis=0, keepdims=True)
cs = X.sum(axis=1, keepdims=True)
X /= rs + eps
X /= cs + eps
def levy2014k(X, k=1):
eps = 1e-8
sum1 = np.sum(np.abs(X), axis=1, keepdims=True) + eps
sum0 = np.sum(np.abs(X), axis=0, keepdims=True) + eps
N = np.sum(X)
X[:] = np.maximum(0, np.log(X) + np.log(N) - np.log(sum1) - np.log(sum0) - np.log(k))
def levy2014_k5(X):
levy2014k(X, k=5)
def levy2014(X):
levy2014k(X, k=1)
def log(X):
X[:] = np.maximum(0, np.log(X))
def log1p(X):
X[:] = np.log(1 + X)
def glove(X):
# (8) of the glove paper: https://aclanthology.org/D14-1162.pdf
Y = np.log(1+X)
for _ in range(5):
bi = np.mean(Y, axis=1, keepdims=True)
Y -= bi
bj = np.mean(Y, axis=0, keepdims=True)
Y -= bj
print('bi ', np.mean(np.abs(bi)))
if np.mean(np.abs(bi)) > 1e-6:
print('bi failed', np.mean(np.abs(bi)))
if np.mean(np.abs(bj)) > 1e-6:
print('bj failed', np.mean(np.abs(bj)))
X[:] = Y
def unitL1(X):
norm1 = np.sum(np.abs(X), axis=1, keepdims=True)
norm1[norm1 == 0] = 1
X /= norm1
def fung1997(X):
from scipy.special import xlogy
sum1 = np.sum(np.abs(X), axis=1, keepdims=True)
sum0 = np.sum(np.abs(X), axis=0, keepdims=True)
N = np.sum(X)
X[:] = xlogy(X / N, X * N / (sum1 * sum0))
def length_normalize_axis0(matrix):
xp = get_array_module(matrix)
norms = xp.sqrt(xp.sum(matrix**2, axis=0))
norms[norms == 0] = 1
matrix /= norms
def mean_center_axis1(matrix):
xp = get_array_module(matrix)
avg = xp.mean(matrix, axis=1)
matrix -= avg[:, xp.newaxis]
# import faiss
# def faiss_knn(Q, X, k, dist='IP'):
# d = X.shape[1]
# if dist == 'IP':
# index = faiss.IndexFlatIP(d)
# elif dist == 'L2':
# index = faiss.IndexFlatL2(d)
# index.add(X)
# dists, inds = index.search(Q, k)
# return dists, inds
# def faiss_csls(Q, X, k, dist='IP', csls=10):
# # this k is neighborhood
# sim_bwd, _ = faiss_knn(X, Q, k=csls)
# knn_sim_bwd = sim_bwd.mean(axis=1)
# topvals, topinds = faiss_knn(Q, X, k=2*csls)
# for i in range(topvals.shape[0]):
# topvals[i] = 2 * topvals[i] - knn_sim_bwd[topinds[i]]
# ind = (-topvals).argsort(axis=1)
# topvals = np.take_along_axis(topvals, ind, axis=1)
# topinds = np.take_along_axis(topinds, ind, axis=1)
# return topvals, topinds
# def noise(X):
# xp = get_array_module(X)
# noise = np.random.randn(1, X.shape[1])
# noise /= xp.sqrt(xp.sum(noise**2))
# # size = np.random.randint(1, 3)
# size = 1
# randinds = np.random.randint(X.shape[1], size=size)
# X -= np.mean(X[randinds, :], axis=0)
# normalize(X, ['unit', 'center', 'unit'])
# def joint_noise(X, Y):
# xp = get_array_module(X)
# noise = np.random.randn(1, X.shape[1])
# noise /= xp.sqrt(xp.sum(noise**2))
# randinds = np.random.randint(X.shape[1], size=1)
# randcenter = np.mean(X[randinds, :], axis=0)
# X -= randcenter
# Y -= randcenter
# normalize(X, ['unit', 'center', 'unit'])
# normalize(Y, ['unit', 'center', 'unit'])
|
coocmap-main
|
embeddings.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
from dataclasses import dataclass
import wandb
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# experimental parameters
defaults = dict(
lan1='./europarl-v7.hu-en.en',
lan2='./europarl-v7.hu-en.hu',
eval='en-hu',
size1=20,
width=5,
symmetric=1,
vectorize='trunc', # fasttext sim_svd trunc word2vec
dim=300,
tokentype='WordLevel', # tokenizer WordLevel, BPE
vocab_size=5000,
limit_alphabet=100,
min_frequency=5,
supervision='unsupervised',
label='none',
)
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["WANDB_MODE"] = "offline" # switch to "online" to use wandb cloud sync
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
print('vocab has overlap of length', len(intersection))
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 5
stochastic_initial = 1
stochastic_multiplier = 2
threshold = 1e-4
maxiter = 100
eta = 1
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=10)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary(tiebreak=1e-3):
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = tiebreak * np.random.rand(d1.Co.shape[0], d2.Co.shape[0])
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
sim[0, 0] = 1
return sim
rows = []
def experiment(drop=20, dim=300, r1=99, r2=99):
def record(type, sim):
print(type)
simd = match.most_diff_match(sim, 10)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'drop': drop, 'dim_p': dim, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}-{drop}-{dim}.csv'))
normproc = ['unit', 'center', 'unit']
normproc1 = ['unit']
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap', simscoocmap)
def clip_drop():
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
match.clip(X, r1=r1, r2=r2)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap-clip', simscoocmap)
dropinit = simscoocmap
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
X = match.svd_power(X, beta=1, drop=drop, dim=None)
embeddings.normalize(X, normproc)
match.clip(X, r1=r1, r2=r2)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
record('coocmap-drop', simscoocmap)
# clip_drop() # run clip and drop as well
experiment(drop=20, dim=300)
|
coocmap-main
|
test_coocmap.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import itertools
import os
import sys
import subprocess
import time
# import lzma # needed for BUCC20Corpus
import numpy as np
from tokenizers import Token, Tokenizer
from tokenizers.models import BPE, WordLevel
from tokenizers.trainers import BpeTrainer, WordLevelTrainer
from tokenizers.pre_tokenizers import Metaspace, Whitespace, WhitespaceSplit
from tokenizers.normalizers import Lowercase
from fast import cooc_count
os.environ["TOKENIZERS_PARALLELISM"] = "true"
cachebase = os.path.expanduser('~/.cache/cooc/')
def full_path(lan):
if lan.startswith('~/') or lan.startswith('./') or lan.startswith('/'):
path = os.path.expanduser(lan)
print('trying path', path)
else:
# relative path from cachebase
path = os.path.expanduser(os.path.join(cachebase, lan))
print('trying cache path', path)
return path
def get_data(lan):
if lan in EuroParl.lans:
return EuroParl()
if lan in WikiDump.lans:
return WikiDump()
if lan in NewsCrawl.lans:
return NewsCrawl()
# else just get raw file from cache base
"""
wikidumps/zhwikishuf.jieba.txt: tokenized and to lower case
news-crawl/news.2018.en.shuffled.deduped: en-news for a change
"""
path = full_path(lan)
if os.path.isfile(path):
return HeadableData()
else:
raise Exception(f'No data for {lan} at {path}')
class HeadableData(object):
cachedir = os.path.expanduser(os.path.join(cachebase))
# works if you specify the path relative to the cachedir
def headmb(self, lan, sizemb):
size = int(sizemb * 1000000)
lantxt = full_path(lan)
f = open(lantxt, 'rt', encoding="utf-8")
sizedtxt = f.read(size)
return sizedtxt
class NewsCrawl(HeadableData):
"""
Data from https://data.statmt.org/news-crawl/en/
https://data.statmt.org/news-crawl/es/
processing of this data was very simple, so just notes here
wget https://data.statmt.org/news-crawl/en/news.2018.en.shuffled.deduped.gz
wget https://data.statmt.org/news-crawl/es/news.2019.es.shuffled.deduped.gz
wget https://data.statmt.org/news-crawl/hu/news.2019.hu.shuffled.deduped.gz
wget https://data.statmt.org/news-crawl/hu/news.2020.hu.shuffled.deduped.gz
wget https://data.statmt.org/news-crawl/hu/news.2021.hu.shuffled.deduped.gz
cat news.*.hu.* > news.2019-2021.hu.shuffled.deduped
This one removes around half the data
cat news.2019-2021.hu.shuffled.deduped | grep -v http | grep -v trackingCode > news.2019-2021.hu.shuffled.deduped.filtered
gzip -d *
"""
cachedir = os.path.expanduser(os.path.join(cachebase, 'news-crawl/'))
lans = ['news.2018.en.shuffled.deduped', 'news.2019.es.shuffled.deduped', 'news.2019-2021.hu.shuffled.deduped', 'news.2019-2021.hu.shuffled.deduped.filtered', 'news.2018-2019-2020-2022.hu.shuffled']
def headmb(self, lan, sizemb):
assert lan in self.lans, 'lan must be one of: ' + ', '.join(self.lans)
size = int(sizemb * 1000000)
lantxt = os.path.join(self.cachedir, f'{lan}')
f = open(lantxt, 'rt', encoding="utf-8")
sizedtxt = f.read(size)
return sizedtxt
class EuroParl(HeadableData):
cachedir = os.path.expanduser(os.path.join(cachebase, 'europarl/'))
urls = {
# 'fr-en': 'https://www.statmt.org/europarl/v7/fr-en.tgz',
# 'es-en': 'https://www.statmt.org/europarl/v7/es-en.tgz',
# 'de-en': 'https://www.statmt.org/europarl/v7/de-en.tgz',
'fi-en': 'https://www.statmt.org/europarl/v7/fi-en.tgz',
'hu-en': 'https://www.statmt.org/europarl/v7/hu-en.tgz',
}
lans_raw = [f'europarl-v7.{suf}' for suf in ['fr-en.fr', 'fr-en.en', 'es-en.es', 'es-en.en', 'de-en.de', 'hu-en.en', 'hu-en.hu', 'fi-en.fi', 'fi-en.en']]
lansshuf = [f'{pref}.shuf' for pref in lans_raw]
lans = lans_raw + lansshuf
def __init__(self):
cachedir = self.cachedir
if not os.path.isdir(cachedir):
print(f'Making dir {cachedir}', file=sys.stderr)
os.makedirs(cachedir, exist_ok=True)
def check_and_dl_all(self):
for lan in self.urls:
self.check_and_dl(lan)
for l in self.lans_raw:
if not os.path.isfile(os.path.join(self.cachedir, l)):
tgzname = l.split('.')[1] + '.tgz'
print(f'Extracting for {l}', file=sys.stderr)
proc = subprocess.run(f'tar xzf {tgzname}', shell=True, cwd=self.cachedir)
else:
print(f'Already extracted for {l}', file=sys.stderr)
for flan, fshuf in zip(self.lans_raw, self.lansshuf):
if not os.path.isfile(os.path.join(self.cachedir, fshuf)):
subprocess.run(f'cat {flan} | shuf > {fshuf}', shell=True, cwd=self.cachedir)
def check_and_dl(self, lan):
url = self.urls[lan]
fname = url.split('/')[-1]
outfile = os.path.join(self.cachedir, fname)
if not os.path.isfile(outfile):
print(f'Downloading {outfile}', file=sys.stderr)
proc = subprocess.run(f'wget -nv {url} -O {outfile}', shell=True, cwd=self.cachedir)
else:
print(f'Already downloaded {outfile}', file=sys.stderr)
def headmb(self, lan, sizemb):
assert lan in self.lans, 'lan must be one of: ' + ', '.join(self.lans)
size = int(sizemb * 1000000)
lantxt = os.path.join(self.cachedir, f'{lan}')
f = open(lantxt, 'rt', encoding="utf-8")
sizedtxt = f.read(size)
return sizedtxt
# https://www.statmt.org/europarl/v7/es-en.tgz
# wc: 2007723 52653110 346919801 europarl-v7.fr-en.fr
# wc: 2007723 50330641 301523301 europarl-v7.fr-en.en
class WikiDump(HeadableData):
"""
"""
urls = {
'enwiki': [
'https://dumps.wikimedia.org/enwiki/20230401/enwiki-20230401-pages-meta-current1.xml-p1p41242.bz2',
'https://dumps.wikimedia.org/enwiki/20230401/enwiki-20230401-pages-meta-current2.xml-p41243p151573.bz2',
'https://dumps.wikimedia.org/enwiki/20230401/enwiki-20230401-pages-meta-current3.xml-p151574p311329.bz2'
],
'eswiki': [
'https://dumps.wikimedia.org/eswiki/20230401/eswiki-20230401-pages-meta-current1.xml-p1p159400.bz2',
'https://dumps.wikimedia.org/eswiki/20230401/eswiki-20230401-pages-meta-current2.xml-p159401p693323.bz2',
'https://dumps.wikimedia.org/eswiki/20230401/eswiki-20230401-pages-meta-current3.xml-p693324p1897740.bz2'
],
'zhwiki': [
'https://dumps.wikimedia.org/zhwiki/20230401/zhwiki-20230401-pages-meta-current1.xml-p1p187712.bz2',
'https://dumps.wikimedia.org/zhwiki/20230401/zhwiki-20230401-pages-meta-current2.xml-p187713p630160.bz2',
'https://dumps.wikimedia.org/zhwiki/20230401/zhwiki-20230401-pages-meta-current3.xml-p630161p1389648.bz2',
'https://dumps.wikimedia.org/zhwiki/20230401/zhwiki-20230401-pages-meta-current4.xml-p1389649p2889648.bz2',
'https://dumps.wikimedia.org/zhwiki/20230401/zhwiki-20230401-pages-meta-current4.xml-p2889649p3391029.bz2',
'https://dumps.wikimedia.org/zhwiki/20230401/zhwiki-20230401-pages-meta-current5.xml-p3391030p4891029.bz2'
],
'frwiki': [
'https://dumps.wikimedia.org/frwiki/20230401/frwiki-20230401-pages-meta-current1.xml-p1p306134.bz2',
'https://dumps.wikimedia.org/frwiki/20230401/frwiki-20230401-pages-meta-current2.xml-p306135p1050822.bz2',
'https://dumps.wikimedia.org/frwiki/20230401/frwiki-20230401-pages-meta-current3.xml-p1050823p2550822.bz2'
],
'dewiki': [
'https://dumps.wikimedia.org/dewiki/20230401/dewiki-20230401-pages-meta-current1.xml-p1p297012.bz2',
'https://dumps.wikimedia.org/dewiki/20230401/dewiki-20230401-pages-meta-current2.xml-p297013p1262093.bz2',
'https://dumps.wikimedia.org/dewiki/20230401/dewiki-20230401-pages-meta-current3.xml-p1262094p2762093.bz2'
]
}
lans = {'enwikishuf', 'eswikishuf', 'zhwikishuf', 'frwikishuf', 'dewikishuf'}
cachedir = os.path.expanduser(os.path.join(cachebase, 'wikidumps/'))
def __init__(self):
cachedir = self.cachedir
if not os.path.isdir(cachedir):
print(f'Making dir {cachedir}', file=sys.stderr)
os.makedirs(cachedir, exist_ok=True)
def check_and_dl_all(self):
for lan in self.urls:
self.check_and_dl(lan)
def check_and_dl(self, lan):
landir = os.path.join(self.cachedir, lan)
if not os.path.isdir(landir):
os.makedirs(landir, exist_ok=True)
urls = self.urls[lan]
for partn, url in enumerate(urls):
# get last part of the url
fname = url.split('/')[-1]
outfile = os.path.join(landir, fname)
if not os.path.isfile(outfile):
print(f'Downloading {outfile}', file=sys.stderr)
proc = subprocess.Popen(['wget', '-nv', url, '-O', outfile])
output, error = proc.communicate()
print(output, file=sys.stderr)
print(error, file=sys.stderr)
else:
print(f'Already downloaded {outfile}', file=sys.stderr)
outdir = os.path.join(landir, f'OUT_{fname}')
if not os.path.isdir(outdir):
proc = subprocess.Popen(f'python -m wikiextractor.WikiExtractor {outfile} -o {outdir} -b 100M --no-templates'.split())
output, error = proc.communicate()
print(output, file=sys.stderr)
print(error, file=sys.stderr)
# cat OUT_*/*/wiki* > ../zhwiki.txt
# cat enwiki.txt | grep -v '</doc>' | grep -v '<doc id=' | shuf > enwikishuf.txt
# lantxt = os.path.join(self.cachedir, f'{lan}.txt')
# if not os.path.isfile(lantxt):
# print(f'concatenating to {lantxt}')
# with open(lantxt, 'w') as f:
# proc = subprocess.Popen(f'cat {landir}/OUT_*/*/wiki*'.split(), stdout=f)
# output, error = proc.communicate()
# # print(output, file=sys.stderr)
# print(error, file=sys.stderr)
def headmb(self, lan, sizemb):
assert lan in self.lans
size = int(sizemb * 1000000)
lantxt = os.path.join(self.cachedir, f'{lan}.txt')
f = open(lantxt, 'rt', encoding="utf-8")
sizedtxt = f.read(size)
return sizedtxt
class BUCC20Corpus(object):
dataurls = {
'en-wiki': 'http://corpus.leeds.ac.uk/serge/bucc/en.ol.xz',
'es-wiki': 'http://corpus.leeds.ac.uk/serge/bucc/es.ol.xz',
'zh-wiki': 'http://corpus.leeds.ac.uk/serge/bucc/zh.ol.xz',
'en-wac': 'http://corpus.leeds.ac.uk/serge/bucc/ukwac.ol.xz',
'de-wac': 'http://corpus.leeds.ac.uk/serge/bucc/dewac.ol.xz',
'fr-wac': 'http://corpus.leeds.ac.uk/serge/bucc/frwac.ol.xz',
'ru-wac': 'http://corpus.leeds.ac.uk/serge/bucc/ruwac.ol.xz',
}
cachedir = os.path.expanduser('~/.cache/bucc20/corpus/')
sizeddir = os.path.expanduser('~/.cache/bucc20/corpus/sized/')
def __init__(self):
sizeddir = BUCC20Corpus.sizeddir
cachedir = BUCC20Corpus.cachedir
if not os.path.isdir(cachedir):
print(f'Making dir {cachedir}', file=sys.stderr)
os.makedirs(cachedir, exist_ok=True)
if not os.path.isdir(sizeddir):
print(f'Making dir {sizeddir}', file=sys.stderr)
os.makedirs(sizeddir, exist_ok=True)
def check_and_dl_all(self):
for lan in BUCC20Corpus.dataurls:
self.check_and_dl(lan)
def check_and_dl(self, lan):
cachedir = BUCC20Corpus.cachedir
url = BUCC20Corpus.dataurls[lan]
outfile = os.path.join(cachedir, lan + '.xz')
# print(f'Making cache dir {self.cachedir}', file=sys.stderr)
if not os.path.isdir(cachedir):
print(f'Making cache dir {cachedir}', file=sys.stderr)
os.makedirs(cachedir, exist_ok=True)
if not os.path.isfile(outfile):
print(f'Downloading {outfile}', file=sys.stderr)
proc = subprocess.Popen(['wget', url, '-O', outfile])
output, error = proc.communicate()
print(output, file=sys.stderr)
print(error, file=sys.stderr)
def headmb(self, lan, sizemb):
size = int(sizemb * 1000000)
xzfile = os.path.join(BUCC20Corpus.cachedir, lan + '.xz')
if not os.path.isfile(xzfile):
self.check_and_dl(lan)
f = lzma.open(xzfile, 'rt', encoding="utf-8")
sizedtxt = f.read(size)
return sizedtxt
class EvalData(object):
def eval_path(self, lanpair):
pass
class MUSEEval(EvalData):
_base = 'https://dl.fbaipublicfiles.com/arrival/dictionaries/'
pairs = ['en-de', 'en-es', 'en-fr', 'en-ru', 'en-zh', 'en-fi', 'en-hu']
cachedir = os.path.expanduser(os.path.join(cachebase, 'muse_dicts/'))
types = {
'full': '.txt',
'train': '.0-5000.txt',
'test': '.5000-6500.txt'
}
def __init__(self, ):
if not os.path.isdir(self.cachedir):
print(f'Making cache dir {self.cachedir}', file=sys.stderr)
os.makedirs(self.cachedir, exist_ok=True)
for p in self.pairs:
self.download(p)
def download(self, p):
for t in self.types:
suff = self.types[t]
url = self._base + f'{p}{suff}'
outfile = os.path.join(self.cachedir, f'{p}{suff}')
if not os.path.isfile(outfile):
print(f'Downloading {url}', file=sys.stderr)
proc = subprocess.Popen(['wget', url, '-O', outfile])
output, error = proc.communicate()
def eval_path(self, lanpair, type='full'):
if lanpair not in self.pairs:
print(f'lanpair {lanpair} not in {self.pairs}, try downloading')
self.download(lanpair)
return os.path.join(self.cachedir, lanpair + self.types[type])
class BUCC20Eval(EvalData):
dataurls = {
'de-en': 'https://comparable.limsi.fr/bucc2020/tr/de-en-1-5000-training.txt',
'es-en': 'https://comparable.limsi.fr/bucc2020/tr/es-en-1-5000-training.txt',
'fr-en': 'https://comparable.limsi.fr/bucc2020/tr/fr-en-1-5000-training.txt',
'ru-en': 'https://comparable.limsi.fr/bucc2020/tr/ru-en-1-5000-training.txt',
'zh-en': 'https://comparable.limsi.fr/bucc2020/tr/zh-en-1-4500-training.txt',
'en-de': 'https://comparable.limsi.fr/bucc2020/tr/en-de-1-5000-training.txt',
'en-es': 'https://comparable.limsi.fr/bucc2020/tr/en-es-1-5000-training.txt',
'en-fr': 'https://comparable.limsi.fr/bucc2020/tr/en-fr-1-5000-training.txt',
'en-ru': 'https://comparable.limsi.fr/bucc2020/tr/en-ru-1-5000-training.txt',
'en-zh': 'https://comparable.limsi.fr/bucc2020/tr/en-zh-1-5000-training.txt',
}
cachedir = os.path.expanduser('~/.cache/bucc20/train/')
def __init__(self, ):
cachedir = BUCC20Eval.cachedir
if not os.path.isdir(cachedir):
print(f'Making cache dir {cachedir}', file=sys.stderr)
os.makedirs(cachedir, exist_ok=True)
for lanpair in BUCC20Eval.dataurls:
url = BUCC20Eval.dataurls[lanpair]
outfile = os.path.join(cachedir, lanpair + '.txt')
if not os.path.isfile(outfile):
print(f'Downloading {url}', file=sys.stderr)
proc = subprocess.Popen(['wget', url, '-O', outfile])
output, error = proc.communicate()
def eval_path(self, lanpair):
return os.path.join(BUCC20Eval.cachedir, lanpair + '.txt')
def train_bpe_tokenizer(train_data_paths, model_path, vocab_size, limit_alphabet=100, min_frequency=10):
trainer = BpeTrainer(special_tokens=["[UNK]"], min_frequency=min_frequency,
vocab_size=vocab_size, limit_alphabet=limit_alphabet)
tokenizer = Tokenizer(BPE(unk_token="[UNK]", fuse_unk=True))
tokenizer.normalizer = Lowercase()
tokenizer.pre_tokenizer = Metaspace()
print(f'data: {train_data_paths}', file=sys.stderr)
tokenizer.train(train_data_paths, trainer)
tokenizer.save(model_path)
return tokenizer
def train_word_tokenizer(train_data_paths, model_path, vocab_size, limit_alphabet=100, min_frequency=10):
trainer = WordLevelTrainer(special_tokens=["[UNK]"], min_frequency=min_frequency,
vocab_size=vocab_size, limit_alphabet=limit_alphabet)
tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
# tokenizer.pre_tokenizer = Metaspace()
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = Lowercase()
print(f'data: {train_data_paths}', file=sys.stderr)
tokenizer.train(train_data_paths, trainer)
tokenizer.save(model_path)
return tokenizer
def tokenize(data_path: str, tokenizer: Tokenizer):
t1 = time.perf_counter()
with open(data_path, 'r') as f:
all = f.readlines()
t2 = time.perf_counter()
print(f'reading {data_path} took: {t2 - t1 :.3f}', file=sys.stderr)
alljoined = "".join(all)
print(f'number of chars: {len(alljoined)}', file=sys.stderr)
all = alljoined.split('\n')
print(f'number of lines: {len(all)}', file=sys.stderr)
t1 = time.perf_counter()
tokened = tokenizer.encode_batch(all)
t2 = time.perf_counter()
print(f'encode_batch took: {t2 - t1 :.3f}', file=sys.stderr)
return tokened
def text_to_cooc(tokened, tokenizer: Tokenizer, width=6):
ids = [t.ids for t in tokened]
joined_ids = list(itertools.chain(*ids))
print('num words: ', len(joined_ids))
t1 = time.perf_counter()
cooc = cooc_count.cooc(np.array(joined_ids, dtype=np.int64), width=width, vocab_size=tokenizer.get_vocab_size())
t2 = time.perf_counter()
print(f'constructing cooc took: {t2 - t1 :.3f}', file=sys.stderr)
return cooc
class Corpus(object):
def __init__(self, datapath, basepath,
tokentype, vocab_size, limit_alphabet, min_frequency,
vectorize: str, width: int, dim: int, adaptive=False, write_vecs=False):
self.adaptive = adaptive
self.write_vecs = write_vecs
self.dim = int(dim)
self.width = int(width)
self.vectorize = vectorize
self.datapath = datapath
os.makedirs(basepath, exist_ok=True)
self.model_path = os.path.join(basepath, 'model.json')
if tokentype == 'WordLevel':
self.tokenizer = train_word_tokenizer([self.datapath], self.model_path, vocab_size, limit_alphabet, min_frequency)
elif tokentype == 'BPE':
self.tokenizer = train_bpe_tokenizer([self.datapath], self.model_path, vocab_size, limit_alphabet, min_frequency)
else:
raise Exception(f'{tokentype} not recognized')
self.tokened = tokenize(self.datapath, self.tokenizer)
self.tokened_out = os.path.join(basepath, 'c.tok')
self.ids_out = os.path.join(basepath, 'c.ids')
t1 = time.perf_counter()
if vectorize == 'fasttext' or vectorize == 'word2vec':
with open(self.tokened_out, 'w') as f:
# basic tokenizer does not output UNK for unknown words, leading to all words being used
f.writelines([' '.join([self.tokenizer.id_to_token(id) for id in li.ids]) for li in self.tokened])
# with open(self.ids_out, 'w') as f:
# f.writelines([' '.join([str(id) for id in li.ids]) for li in self.tokened])
t2 = time.perf_counter()
print(f'writing tokened took: {t2 - t1 :.3f}', file=sys.stderr)
self.vecpath = os.path.join(basepath, 'c.vec')
self.vecs = {}
self.Co = text_to_cooc(self.tokened, self.tokenizer, width=self.width)
if vectorize == 'fasttext':
self._fasttext_vecs()
elif vectorize == 'word2vec':
self._word2vec_vecs()
elif vectorize == 'sim_svd':
self._sim_vecs()
elif vectorize == 'trunc':
self._count_vecs()
else:
raise Exception('vectorize type not recognized')
def _write_vectors(self, m):
self.vec = m
with open(self.vecpath, 'w') as f:
vs = self.tokenizer.get_vocab_size()
print('%d %d' % m.shape, file=f)
for i in range(vs):
print(self.tokenizer.id_to_token(i) + ' ' + ' '.join(['%.6g' % x for x in m[i]]), file=f)
def _sim_vecs(self, alpha=0.5, beta=1):
maxdim = min(self.Co.shape[1], 10000)
Cot = self.Co[:, :maxdim]
u, s, _ = np.linalg.svd(np.power(Cot, alpha), full_matrices=False)
u = u[:, :self.dim]*np.power(s[:self.dim], beta)
self.vecs['sim_svd'] = u
if self.write_vecs:
self._write_vectors(mat)
def _fasttext_vecs(self, epoch=5):
import fasttext
# https://fasttext.cc/docs/en/options.html
# two common corrections for fasttext
if self.adaptive:
lradapt = 0.1 / np.power(self.dim / 50, 0.5)
mbsize = os.stat(self.tokened_out).st_size / 1e6
epoch = 5 if mbsize > 300 else int(5 * np.sqrt(300 / mbsize))
config = dict(model='skipgram', lr=lradapt, dim=self.dim, ws=self.width, epoch=epoch)
else:
config = dict(model='skipgram', lr=0.05, dim=self.dim, ws=self.width, epoch=5)
print(config)
# config = dict(model='skipgram', lr=0.05, dim=self.dim, ws=self.width, epoch=epoch, minn=0, maxn=0)
model = fasttext.train_unsupervised(self.tokened_out, thread=8, **config)
mat = np.zeros((self.tokenizer.get_vocab_size(), model.dim), dtype=float)
for w in self.tokenizer.get_vocab():
v = model.get_word_vector(w)
i = self.tokenizer.token_to_id(w)
mat[i] = v
self.vecs['fasttext'] = mat
if self.write_vecs:
self._write_vectors(mat)
def _word2vec_vecs(self, epoch=5):
import fasttext
# https://fasttext.cc/docs/en/options.html
# just fasttext without subwords
config = dict(model='skipgram', lr=0.05, dim=self.dim, ws=self.width, epoch=epoch, minn=0, maxn=0)
model = fasttext.train_unsupervised(self.tokened_out, thread=8, **config)
mat = np.zeros((self.tokenizer.get_vocab_size(), model.dim), dtype=float)
for w in self.tokenizer.get_vocab():
v = model.get_word_vector(w)
i = self.tokenizer.token_to_id(w)
mat[i] = v
self.vecs['word2vec'] = mat
def _count_vecs(self, alpha=0.5):
mat = np.power(np.array(self.Co[:, :self.dim], dtype=float), alpha)
self.vecs['trunc'] = mat
if self.write_vecs:
self._write_vectors(mat)
# eval = BUCC20Eval()
# bucc20.get_sized('zh-wiki', 2)
# bucc20.get_sized('zh-wiki', 4)
|
coocmap-main
|
data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from collections import Counter
import numpy as np
import embeddings
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
MAX_SVD_DIM = 5000 # maximum SVD to avoid long compute time
### initialization methods ###
def vecmap_unsup(x, z, norm_proc=['unit', 'center', 'unit']):
print('maxdim', MAX_SVD_DIM)
sim_size = min(MAX_SVD_DIM, min(x.shape[0], z.shape[0]))
u, s, vt = np.linalg.svd(x, full_matrices=False)
xsim = (u*s).dot(u.T)
u, s, vt = np.linalg.svd(z, full_matrices=False)
zsim = (u*s).dot(u.T)
del u, s, vt
xsim.sort(axis=1)
zsim.sort(axis=1)
norm_proc = ['unit', 'center', 'unit']
embeddings.normalize(xsim, norm_proc)
embeddings.normalize(zsim, norm_proc)
sim = xsim.dot(zsim.T)
return sim
def match_sim(xsim, zsim, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit']):
sim_size = min(xsim.shape[1], zsim.shape[1])
xsim = np.array(xsim[:, :sim_size])
zsim = np.array(zsim[:, :sim_size])
if sort:
xsim.sort(axis=1)
zsim.sort(axis=1)
embeddings.normalize(xsim, norm_proc)
embeddings.normalize(zsim, norm_proc)
sim = xsim @ zsim.T
return sim
### main search loops ###
def vecmap(x: np.ndarray, z: np.ndarray, args, sim_init=None, evalf=None):
print('running vecmap', x.shape)
keep_prob = args.stochastic_initial
best_objective = float('-inf')
last_improvement = 0
end = False
inds1, inds2 = 0, 0
for it in range(args.maxiter):
if it - last_improvement > args.stochastic_interval:
# maxswaps = max(1, maxswaps - 1)
if keep_prob == 1:
end = True
keep_prob = min(1.0, args.stochastic_multiplier * keep_prob)
last_improvement = it
if it == 0:
if sim_init is not None:
sim = sim_init
else:
sim = vecmap_unsup(x, z, norm_proc=['unit', 'center', 'unit'])
else:
# rotation
if args.method == 'orthogonal':
u, s, vt = np.linalg.svd(x[inds1].T @ z[inds2])
w = u @ vt
elif args.method == 'lstsq':
w, r, r, s = np.linalg.lstsq(x[inds1], z[inds2], rcond=1e-5)
sim = x @ w @ z.T
#
if args.csls:
sim = most_diff_match(sim, 10)
inds1, inds2, evalsim = match(sim, args.match)
if evalf is not None:
evalf(evalsim)
objf = np.mean(sim.max(axis=1))
objb = np.mean(sim.max(axis=0))
objective = (objf + objb) / 2
print(f'{it} {keep_prob} \t{objf:.4f}\t{objective:.4f}\t{best_objective:.4f}')
if objective >= best_objective + args.threshold:
last_improvement = it
if it != 0:
best_objective = objective
if end:
break
return inds1, inds2, sim
def coocmapt(Cp1: np.ndarray, Cp2: np.ndarray, args, normproc=['unit'], sim_init=None, evalf=None):
"""
basic coocmap using just numpy but only works for cosine distance
"""
best_objective = float('-inf')
last_improvement = 0
end = False
inds1, inds2 = 0, 0
simd = 0
for it in range(args.maxiter):
if it - last_improvement > args.stochastic_interval:
end = True
if it == 0:
if sim_init is not None:
sim = sim_init
else:
sim = match_sim(Cp1, Cp2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
sim_init = sim
# sim = vecmap_unsup(Cp1, Cp2)
if args.csls:
sim = most_diff_match(sim, 10)
inds1, inds2, evalsim = match(sim, args.match)
if evalf is not None:
evalf(evalsim)
if end:
break
uniqf2 = uniqb1 = len(inds1)
Cp1f = Cp1[:, inds1]
Cp2f = Cp2[:, inds2]
embeddings.normalize(Cp1f, normproc)
embeddings.normalize(Cp2f, normproc)
# maybe these matches
sim = Cp1f @ Cp2f.T
# X = torch.from_numpy(Cp1f)
# Y = torch.from_numpy(Cp2f)
# sim = -torch.cdist(X, Y, p=2).numpy()
objf = np.mean(np.max(sim, axis=1))
objb = np.mean(np.max(sim, axis=0))
objective = 0.5 * (objf + objb)
if objective > best_objective:
last_improvement = it
if it > 0: # the initial round use a different matrix and should not be compared
best_objective = objective
print(f'objective {it} \t{objf:.5f} \t{objective:.5f} \t {best_objective:.5f} \t {uniqf2} \t {uniqb1}')
return inds1, inds2, sim
def coocmapl1(Cp1: np.ndarray, Cp2: np.ndarray, args, normproc=['unit'], sim_init=None, evalf=None):
"""
duplicated code using cdistance from torch, mainly to test l1 distance
"""
best_objective = float('-inf')
last_improvement = 0
end = False
inds1, inds2 = 0, 0
simd = 0
for it in range(args.maxiter):
if it - last_improvement > args.stochastic_interval:
end = True
if it == 0:
if sim_init is not None:
sim = sim_init
else:
sim = match_sim(Cp1, Cp2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
sim_init = sim
# sim = vecmap_unsup(Cp1, Cp2)
if args.csls:
sim = most_diff_match(sim, 10)
inds1, inds2, evalsim = match(sim, args.match)
if evalf is not None:
evalf(evalsim)
if end:
break
uniqf2 = uniqb1 = len(inds1)
Cp1f = Cp1[:, inds1]
Cp2f = Cp2[:, inds2]
embeddings.normalize(Cp1f, normproc)
embeddings.normalize(Cp2f, normproc)
# maybe these matches
# sim = Cp1f @ Cp2f.T
import torch
if torch.cuda.is_available():
X = torch.from_numpy(Cp1f).cuda()
Y = torch.from_numpy(Cp2f).cuda()
sim = -torch.cdist(X, Y, p=1).cpu().numpy()
else:
X = torch.from_numpy(Cp1f)
Y = torch.from_numpy(Cp2f)
sim = -torch.cdist(X, Y, p=1).numpy()
# this is only approximately a greedy method, as this objective is not guaranteed to increase
objf = np.mean(np.max(sim, axis=1))
objb = np.mean(np.max(sim, axis=0))
objective = 0.5 * (objf + objb)
if objective > best_objective:
last_improvement = it
if it > 0: # the initial round use a different matrix and should not be compared
best_objective = objective
print(f'objective {it} \t{objf:.5f} \t{objective:.5f} \t {best_objective:.5f} \t {uniqf2} \t {uniqb1}')
return inds1, inds2, sim
def svd_power(X, beta=1, drop=None, dim=None, symmetric=False):
u, s, vt = np.linalg.svd(X, full_matrices=False)
print('np.power(s)', np.power(s, 1).sum())
if dim is not None:
# s = np.sqrt(np.maximum(0, s**2 - s[dim]**2))
# s = np.maximum(0, s - s[dim])
s[dim:]=0
print('np.power(s_dim)', np.power(s, 1).sum())
if dim is not None:
s = np.power(s, beta)
if drop is not None:
if isinstance(drop, np.ndarray):
s[list(drop)] = 0
elif isinstance(drop, int):
s[:drop] = 0
print('np.power(s_drop)', np.power(s, 1).sum())
if symmetric:
res = (u * s) @ u.T
else:
res = (u * s) @ vt
norm = np.linalg.norm(res - X, ord='fro')
normX = np.linalg.norm(X, ord='fro')
print(f'diff {norm:.2e} / {normX:.2e}')
return res
def sim_vecs(Co, dim, alpha=0.5, beta=1):
maxdim = min(Co.shape[1], 10000)
Co = Co[:, :maxdim]
u, s, _ = np.linalg.svd(np.power(Co, alpha), full_matrices=False)
u = u[:, :dim]*np.power(s[:dim], beta)
return u
### matching methods ###
def greedy_match(sim0, iters=10):
sim = sim0.copy()
for i in range(iters):
# if sim is n by m, am1 is size m, am0 is size n
am1 = np.nanargmax(sim, axis=0)
am0 = np.nanargmax(sim, axis=1)
bi0 = am0[am1] == np.arange(sim.shape[1])
bi1 = am1[am0] == np.arange(sim.shape[0])
assert bi0.sum() == bi1.sum()
bimatches = bi0.sum()
uniques = len(np.unique(am0)), len(np.unique(am1))
hubs = np.mean([c for _, c in Counter(am0).most_common(3)])
value = np.take_along_axis(sim0, am1[:, None], axis=1).mean()
stats = {'bimatches': bimatches, 'uniques': uniques, 'hubs': hubs, 'value': value}
print(stats)
if bimatches > 0.98 * min(*sim.shape):
break
for i in range(sim.shape[0]):
if bi1[i]:
sim[i] = float('nan')
sim[:, am0[i]] = float('nan')
sim[i, am0[i]] = float('inf')
return np.arange(sim.shape[1])[bi0], am0[bi0], sim
def most_diff_match(sim0, k):
sim = sim0.copy()
top0 = -np.partition(-sim, kth=k, axis=0)
top1 = -np.partition(-sim, kth=k, axis=1)
mean0 = top0[:k, :].mean(axis=0, keepdims=True)
mean1 = top1[:, :k].mean(axis=1, keepdims=True)
return sim - 0.5*(mean0 + mean1)
def forward_backward_match(sim):
indsf2 = np.argmax(sim, axis=1)
indsb1 = np.argmax(sim, axis=0)
indsb2 = np.arange(sim.shape[1])
indsf1 = np.arange(sim.shape[0])
inds1 = np.concatenate((indsf1, indsb1))
inds2 = np.concatenate((indsf2, indsb2))
hubsf = Counter(indsf2).most_common(3)
hubsb = Counter(indsb1).most_common(3)
print('hubs', hubsf, hubsb)
return inds1, inds2, sim
def match(sim, method):
if method == 'vecmap':
return forward_backward_match(sim)
elif method == 'coocmap':
return greedy_match(sim, iters=10)
### clipping ###
def clipthres(A, p1, p2):
R1 = np.percentile(A, p1, axis=1, keepdims=True)
r = np.percentile(R1, p2)
print('percent greater \t', np.sum((A > r) * 1) / A.size)
return r
def clipBoth(A, r1, r2):
ub = clipthres(A, r1, r2)
lb = clipthres(A, 100-r1, 100-r2)
print('clipped', lb, ub)
return lb, ub
def clip(A, r1=99, r2=99):
lb, ub = clipBoth(A, r1, r2)
A[A < lb] = lb
A[A > ub] = ub
|
coocmap-main
|
match.py
|
import os
import subprocess
from dataclasses import dataclass
import lzma
import wandb
import argparse
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# from baselines import VecMap
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
# os.environ["WANDB_MODE"] = "offline"
defaults = dict(
# data
lan1='enwikishuf',
lan2='dewikishuf',
eval='en-de',
size1=300,
# size2=20, skip2=10,
symmetric=1,
width=5,
# vectorization fasttext sim_svd count
vectorize='trunc',
dim=300,
# tokenizer WordLevel, BPE
tokentype='WordLevel',
vocab_size=500,
limit_alphabet=100,
min_frequency=5,
# experiment
supervision='unsupervised',
label='glove',
)
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
print('vocab has overlap of length', len(intersection))
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 10
stochastic_add = 1e-1
stochastic_multiplier = 2
threshold = 1e-4
stochastic_initial = 1
maxswaps = 100
maxiter = 100
eta = 1
#
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=3)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary(tiebreak=1e-3):
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = tiebreak * np.random.rand(d1.Co.shape[0], d2.Co.shape[0])
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
sim[0, 0] = 1
return sim
rows = []
def experiment(drop=20, dim=300):
print('original dim', d1.Co.shape)
def record(type, sim):
print(type)
# plt.figure()
# plt.imshow(sims[type])
simd = match.most_diff_match(sim, 10)
# inds1, inds2, sim_greed = match.greedy_match(simd, iters=5)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'drop': drop, 'dim_p': dim, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}-{drop}-{dim}.csv'))
normproc1 = ['unit']
normproc = ['unit', 'center', 'unit']
def standard_normalize(normproc, name):
A1 = np.array(1.0*d1.Co)
A2 = np.array(1.0*d2.Co)
embeddings.normalize(A1, normproc)
embeddings.normalize(A2, normproc)
_, _, simscoocmap = match.coocmapt(A1, A2, args, normproc=normproc1, sim_init=None, evalf=evalf)
record(name, simscoocmap)
# standard_normalize(['log'] + normproc, 'log')
standard_normalize(['log1p'] + normproc, 'log1p')
def levy2014():
standard_normalize(['levy2014', 'unit'], 'levy2014-l2')
standard_normalize(['levy2014'] + normproc, 'levy2014-normalize')
standard_normalize(['levy2014_k5', 'unit'], 'levy2014_k5-l2')
standard_normalize(['levy2014_k5'] + normproc, 'levy2014_k5-normalize')
A1 = np.array(1.0*d1.Co)
A2 = np.array(1.0*d2.Co)
embeddings.normalize(A1, ['levy2014', 'unitL1'])
embeddings.normalize(A2, ['levy2014', 'unitL1'])
_, _, simscoocmap = match.coocmapl1(A1, A2, args, normproc=['unitL1'], sim_init=None, evalf=evalf)
record('levy2014-l1', simscoocmap)
A1 = np.array(1.0*d1.Co)
A2 = np.array(1.0*d2.Co)
embeddings.normalize(A1, ['levy2014_k5', 'unitL1'])
embeddings.normalize(A2, ['levy2014_k5', 'unitL1'])
_, _, simscoocmap = match.coocmapl1(A1, A2, args, normproc=['unitL1'], sim_init=None, evalf=evalf)
record('levy2014_k5-l1', simscoocmap)
def glove():
standard_normalize(['glove', 'unit'], 'glove-l2')
standard_normalize(['glove'] + normproc, 'glove-normalize')
A1 = np.array(1.0*d1.Co)
A2 = np.array(1.0*d2.Co)
embeddings.normalize(A1, ['glove', 'unitL1'])
embeddings.normalize(A2, ['glove', 'unitL1'])
_, _, simscoocmap = match.coocmapl1(A1, A2, args, normproc=['unitL1'], sim_init=None, evalf=evalf)
record('glove-l1', simscoocmap)
glove()
# A1 = np.sqrt(d1.Co)
# A2 = np.sqrt(d2.Co)
# embeddings.normalize(A1, normproc)
# embeddings.normalize(A2, normproc)
# _, _, simscoocmap = match.coocmapt(A1, A2, args, normproc=normproc1, sim_init=None, evalf=evalf)
# record('coocmap', simscoocmap)
# A1c = np.array(A1)
# A2c = np.array(A2)
# match.clip(A1c, r1=99, r2=99)
# match.clip(A2c, r1=99, r2=99)
# _, _, simscoocmap = match.coocmapt(A1c, A2c, args, normproc=normproc1, sim_init=None, evalf=evalf)
# record('coocmap-clip', simscoocmap)
# A1f = match.svd_power(A1, beta=1, drop=drop, dim=None)
# A2f = match.svd_power(A2, beta=1, drop=drop, dim=None)
# normproc = ['unit', 'center', 'unit']
# embeddings.normalize(A1f, normproc)
# embeddings.normalize(A2f, normproc)
# match.clip(A1f, r1=99, r2=99)
# match.clip(A2f, r1=99, r2=99)
# # dropinit = match.most_diff_match(simscoocmap, 10)
# dropinit = simscoocmap
# _, _, sim = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
# record('coocmap-drop', sim)
# clipinit = sim
def rapp1995(name, init):
alpha = 1.0
A1f = np.power(d1.Co, alpha)
A2f = np.power(d2.Co, alpha)
norm = ['pmi', 'unitL1']
embeddings.normalize(A1f, norm)
embeddings.normalize(A2f, norm)
_, _, simscoocmap = match.coocmapl1(A1f, A2f, args, normproc=['unitL1'], sim_init=init, evalf=evalf)
record(name, simscoocmap)
# rapp1995('rapp1995', None)
# rapp1995('rapp1995-init', clipinit)
def fung1997(name, init):
alpha = 1.0
A1f = np.power(d1.Co, alpha)
A2f = np.power(d2.Co, alpha)
norm = ['fung1997', 'unitL1']
embeddings.normalize(A1f, norm)
embeddings.normalize(A2f, norm)
_, _, simscoocmap = match.coocmapl1(A1f, A2f, args, normproc=['unitL1'], sim_init=init, evalf=evalf)
record(name, simscoocmap)
# fung1997('fung1997-l1', None)
# fung1997('fung1997-l1-init', clipinit)
# normproc1 = ['unit']
# dictinit = dict_init_binary()
# _, _, simdictinit = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=dictinit, evalf=evalf)
# record('dict-init-drop', simdictinit)
# generate a simple grid enumeration
from itertools import product
drops = [20]
dims = [100]
grid_plan = product(drops, dims)
for drop, dim in grid_plan:
if drop >= dim: continue
experiment(drop, dim)
# method = VecMap(d1.vecpath, d2.vecpath, dictpath, wandb.run.dir, cfg)
# method.run()
# res = method.eval(dictpath)
# # write the predictions
# wandb.log({'accuracy': res['accuracy'], 'coverage': res['coverage']})
|
coocmap-main
|
experiments/test_accvsize_cooc.py
|
import os
import subprocess
from dataclasses import dataclass
import lzma
import wandb
import argparse
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# from baselines import VecMap
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
os.environ["WANDB_MODE"] = "offline" # turn on to use wandb to sync
defaults = dict(
lan1='enwikishuf',
lan2='eswikishuf',
eval='en-es',
size1=30,
symmetric=1,
width=5,
vectorize='sim_svd', # vectorization fasttext sim_svd trunc word2vec
dim=300,
tokentype='WordLevel', # tokenizer WordLevel, BPE
vocab_size=5000,
limit_alphabet=100,
min_frequency=5,
supervision='unsupervised',
label='none',
)
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
print('vocab has overlap of length', len(intersection))
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 10
stochastic_initial = 1
stochastic_multiplier = 2
threshold = 1e-4
maxiter = 100
eta = 1
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=3)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary(tiebreak=1e-3):
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = tiebreak * np.random.rand(d1.Co.shape[0], d2.Co.shape[0])
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
sim[0, 0] = 1
return sim
rows = []
def experiment(drop=20, dim=300, r1=99, r2=99):
print('original dim', d1.Co.shape)
def record(type, sim):
print(type)
simd = match.most_diff_match(sim, 10)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'drop': drop, 'dim_p': dim, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}-{drop}-{dim}.csv'))
d1ft = d1.vecs[cfg.vectorize]
d2ft = d2.vecs[cfg.vectorize]
normproc = ['unit', 'center', 'unit']
normproc1 = ['unit']
embeddings.normalize(d1ft, normproc)
embeddings.normalize(d2ft, normproc)
_, _, sim = match.vecmap(d1ft, d2ft, args, evalf=evalf)
record(f'vecmap-{cfg.vectorize}', sim)
def f(Co):
A1 = np.sqrt(Co)
X = match.sim_vecs(A1, dim, alpha=1)
embeddings.normalize(X, normproc)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, sim = match.vecmap(X, Z, args, evalf=evalf)
record('vecmap-raw', sim)
###### coocmap ######
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap', simscoocmap)
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
match.clip(X, r1=r1, r2=r2)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap-clip', simscoocmap)
dropinit = simscoocmap
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
X = match.svd_power(X, beta=1, drop=drop, dim=None)
embeddings.normalize(X, normproc)
match.clip(X, r1=r1, r2=r2)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
record('coocmap-drop', simscoocmap)
def dict_init():
dictinit = dict_init_binary()
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simdictinit = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=dictinit, evalf=evalf)
record('dict-init', simdictinit)
experiment(drop=20, dim=300)
# use the official vecmap implementation instead, need to turn on data.Corpus.write_vecs to write out vectors
# method = VecMap(d1.vecpath, d2.vecpath, dictpath, wandb.run.dir, cfg)
# method.run()
# res = method.eval(dictpath)
# # write the predictions
# wandb.log({'accuracy': res['accuracy'], 'coverage': res['coverage']})
|
coocmap-main
|
experiments/test_accvsize.py
|
import os
import subprocess
from dataclasses import dataclass
import lzma
import wandb
import argparse
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# from baselines import VecMap
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
defaults = dict(
# data
lan1='enwikishuf',
lan2='eswikishuf',
eval='en-es',
size1=10,
# size2=20, skip2=10,
symmetric=1,
width=5,
# vectorization fasttext sim_svd count
vectorize='trunc',
dim=300,
# tokenizer WordLevel, BPE
tokentype='WordLevel',
vocab_size=100,
limit_alphabet=100,
min_frequency=1,
# experiment
supervision='basic-init',
label='none',
)
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
##### often fails
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 20
stochastic_add = 1e-1
stochastic_multiplier = 2
threshold = 1e-4
stochastic_initial = 1
maxswaps = 100
maxiter = 100
eta = 1
#
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=3)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary():
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = np.zeros((d1.Co.shape[0], d2.Co.shape[0]))
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
return sim
rows = []
def experiment(drop=20, dim=300, r1=1, r2=1):
print('original dim', d1.Co.shape)
def record(type, sim):
print(type)
# plt.figure()
# plt.imshow(sims[type])
simd = match.most_diff_match(sim, 10)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'drop': drop, 'dim_p': dim, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}-{drop}-{dim}.csv'))
normproc1 = ['unit']
normproc = ['unit', 'center', 'unit']
A1 = np.sqrt(d1.Co)
A2 = np.sqrt(d2.Co)
embeddings.normalize(A1, normproc)
embeddings.normalize(A2, normproc)
if cfg.supervision == 'common-init':
coocinit = match.match_sim(A1, A2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
elif cfg.supervision == 'clip-init':
A1c = np.array(A1)
A2c = np.array(A2)
match.clip(A1c, r1=99, r2=99)
match.clip(A2c, r1=99, r2=99)
coocinit = match.match_sim(A1c, A2c, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
else:
coocinit = None
# d1ft = d1.vecs[cfg.vectorize]
# d2ft = d2.vecs[cfg.vectorize]
# embeddings.normalize(d1ft, normproc)
# embeddings.normalize(d2ft, normproc)
# _, _, sim = match.vecmap(d1ft, d2ft, args, evalf=evalf, sim_init=coocinit)
# record(f'vecmap-{cfg.vectorize}', sim)
def coocmapvec():
def sqrt_sim(x):
u, s, vt = np.linalg.svd(x, full_matrices=False)
return (u*s).dot(u.T)
A1f = sqrt_sim(d1ft)
A2f = sqrt_sim(d2ft)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
_, _, simscoocmap = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record(f'coocmap-{cfg.vectorize}', simscoocmap)
A1c = np.array(A1f)
A2c = np.array(A2f)
match.clip(A1c, r1=99, r2=99)
match.clip(A2c, r1=99, r2=99)
_, _, simscoocmap = match.coocmapt(A1c, A2c, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record(f'coocmap-{cfg.vectorize}-clip', simscoocmap)
initdrop = simscoocmap
A1d = match.svd_power(A1f, beta=1, drop=drop, dim=dim)
A2d = match.svd_power(A2f, beta=1, drop=drop, dim=dim)
embeddings.normalize(A1d, normproc)
embeddings.normalize(A2d, normproc)
match.clip(A1d, r1=99, r2=99)
match.clip(A2d, r1=99, r2=99)
_, _, simscoocmap = match.coocmapt(A1d, A2d, args, normproc=normproc1, sim_init=initdrop, evalf=evalf)
record(f'coocmap-{cfg.vectorize}-drop', simscoocmap)
# coocmapvec()
# what if I get the correspondence analysis vectors here??
# sim0 = match.match_sim(A1, A2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
A1f = match.svd_power(A1, beta=1, drop=None, dim=dim)
A2f = match.svd_power(A2, beta=1, drop=None, dim=dim)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
# _, _, simscoocmap = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
# record('coocmap', simscoocmap)
A1c = np.array(A1f)
A2c = np.array(A2f)
match.clip(A1c, r1=100-r1, r2=100-r2)
match.clip(A2c, r1=100-r1, r2=100-r2)
_, _, simscoocmap = match.coocmapt(A1c, A2c, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record(f'coocmap-clip-{r1:.1f}-{r2:.1f}', simscoocmap)
dropinit = simscoocmap
A1f = match.svd_power(A1, beta=1, drop=drop, dim=dim)
A2f = match.svd_power(A2, beta=1, drop=drop, dim=dim)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
match.clip(A1f, r1=100-r1, r2=100-r2)
match.clip(A2f, r1=100-r1, r2=100-r2)
# dropinit = match.most_diff_match(simscoocmap, 10)
_, _, sim = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
record(f'coocmap-drop-{r1:.1f}-{r2:.1f}', sim)
A1f = match.svd_power(A1c, beta=1, drop=drop, dim=dim)
A2f = match.svd_power(A2c, beta=1, drop=drop, dim=dim)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
match.clip(A1f, r1=100-r1, r2=100-r2)
match.clip(A2f, r1=100-r1, r2=100-r2)
_, _, sim = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
record(f'coocmap-clip-drop-{r1:.1f}-{r2:.1f}', sim)
# generate a simple grid enumeration
r1 = [0.5, 1, 2, 5]
r2 = [0.5, 1, 2, 5]
from itertools import product
grid_plan = list(product(r1, r2))
print(grid_plan)
np.random.shuffle(grid_plan)
for r1, r2 in grid_plan:
drop = np.ceil(min(20, int(cfg.dim) * 20/400)) # 400 -> 20
experiment(drop, int(cfg.dim), r1, r2)
# method = VecMap(d1.vecpath, d2.vecpath, dictpath, wandb.run.dir, cfg)
# method.run()
# res = method.eval(dictpath)
# # write the predictions
# wandb.log({'accuracy': res['accuracy'], 'coverage': res['coverage']})
|
coocmap-main
|
experiments/test_dropclip.py
|
import os
import subprocess
from dataclasses import dataclass
import lzma
import wandb
import argparse
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# from baselines import VecMap
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
defaults = dict(
# data
lan1='enwikishuf',
lan2='eswikishuf',
eval='en-es',
size1=10,
# size2=20, skip2=10,
symmetric=1,
width=3,
# vectorization fasttext sim_svd count
vectorize='trunc',
dim=300,
# tokenizer WordLevel, BPE
tokentype='WordLevel',
vocab_size=100,
limit_alphabet=100,
min_frequency=1,
# experiment
supervision='basic-init',
label='clipanddropmore',
)
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
##### often fails
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 10
stochastic_add = 1e-1
stochastic_multiplier = 2
threshold = 1e-4
stochastic_initial = 1
maxswaps = 100
maxiter = 100
eta = 1
#
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=3)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary():
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = np.zeros((d1.Co.shape[0], d2.Co.shape[0]))
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
return sim
rows = []
def experiment(drop=20, dim=300):
print('original dim', d1.Co.shape)
def record(type, sim):
print(type)
# plt.figure()
# plt.imshow(sims[type])
simd = match.most_diff_match(sim, 10)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'drop': drop, 'dim_p': dim, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}-{drop}-{dim}.csv'))
normproc1 = ['unit']
normproc = ['unit', 'center', 'unit']
A1 = np.sqrt(d1.Co)
A2 = np.sqrt(d2.Co)
embeddings.normalize(A1, normproc)
embeddings.normalize(A2, normproc)
if cfg.supervision == 'common-init':
coocinit = match.match_sim(A1, A2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
elif cfg.supervision == 'clip-init':
A1c = np.array(A1)
A2c = np.array(A2)
match.clip(A1c, r1=99, r2=99)
match.clip(A2c, r1=99, r2=99)
coocinit = match.match_sim(A1c, A2c, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
else:
coocinit = None
d1ft = d1.vecs[cfg.vectorize]
d2ft = d2.vecs[cfg.vectorize]
embeddings.normalize(d1ft, normproc)
embeddings.normalize(d2ft, normproc)
_, _, sim = match.vecmap(d1ft, d2ft, args, evalf=evalf, sim_init=coocinit)
record(f'vecmap-{cfg.vectorize}', sim)
def coocmapvec():
def sqrt_sim(x):
u, s, vt = np.linalg.svd(x, full_matrices=False)
return (u*s).dot(u.T)
A1f = sqrt_sim(d1ft)
A2f = sqrt_sim(d2ft)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
_, _, simscoocmap = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record(f'coocmap-{cfg.vectorize}', simscoocmap)
A1c = np.array(A1f)
A2c = np.array(A2f)
match.clip(A1c, r1=99, r2=99)
match.clip(A2c, r1=99, r2=99)
_, _, simscoocmap = match.coocmapt(A1c, A2c, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record(f'coocmap-{cfg.vectorize}-clip', simscoocmap)
initdrop = simscoocmap
A1d = match.svd_power(A1f, beta=1, drop=drop, dim=dim)
A2d = match.svd_power(A2f, beta=1, drop=drop, dim=dim)
embeddings.normalize(A1d, normproc)
embeddings.normalize(A2d, normproc)
match.clip(A1d, r1=99, r2=99)
match.clip(A2d, r1=99, r2=99)
_, _, simscoocmap = match.coocmapt(A1d, A2d, args, normproc=normproc1, sim_init=initdrop, evalf=evalf)
record(f'coocmap-{cfg.vectorize}-drop', simscoocmap)
coocmapvec()
# what if I get the correspondence analysis vectors here??
# sim0 = match.match_sim(A1, A2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
A1f = match.svd_power(A1, beta=1, drop=None, dim=dim)
A2f = match.svd_power(A2, beta=1, drop=None, dim=dim)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
_, _, simscoocmap = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record('coocmap', simscoocmap)
A1c = np.array(A1f)
A2c = np.array(A2f)
match.clip(A1c, r1=98.5, r2=98.5)
match.clip(A2c, r1=98.5, r2=98.5)
_, _, simscoocmap = match.coocmapt(A1c, A2c, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record('coocmap-clip-1.5', simscoocmap)
dropinit_more = simscoocmap
A1c = np.array(A1f)
A2c = np.array(A2f)
match.clip(A1c, r1=99, r2=99)
match.clip(A2c, r1=99, r2=99)
_, _, simscoocmap = match.coocmapt(A1c, A2c, args, normproc=normproc1, sim_init=coocinit, evalf=evalf)
record('coocmap-clip', simscoocmap)
dropinit = simscoocmap
A1f = match.svd_power(A1, beta=1, drop=drop, dim=dim)
A2f = match.svd_power(A2, beta=1, drop=drop, dim=dim)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
match.clip(A1f, r1=99, r2=99)
match.clip(A2f, r1=99, r2=99)
# dropinit = match.most_diff_match(simscoocmap, 10)
_, _, sim = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
record('coocmap-drop', sim)
A1f = match.svd_power(A1, beta=1, drop=drop, dim=dim)
A2f = match.svd_power(A2, beta=1, drop=drop, dim=dim)
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
match.clip(A1f, r1=98.5, r2=98.5)
match.clip(A2f, r1=98.5, r2=98.5)
_, _, sim = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=dropinit_more, evalf=evalf)
record('coocmap-drop-1.5', sim)
# normproc1 = ['unit']
# dictinit = dict_init_binary()
# _, _, simdictinit = match.coocmapt(A1, A2, args, normproc=normproc1, sim_init=dictinit, evalf=evalf)
# record('dict-init', simdictinit)
# generate a simple grid enumeration
drop = np.ceil(min(20, int(cfg.dim) * 20/400)) # 400 -> 20
experiment(int(drop), int(cfg.dim))
# method = VecMap(d1.vecpath, d2.vecpath, dictpath, wandb.run.dir, cfg)
# method.run()
# res = method.eval(dictpath)
# # write the predictions
# wandb.log({'accuracy': res['accuracy'], 'coverage': res['coverage']})
|
coocmap-main
|
experiments/test_accvdim.py
|
import os
from dataclasses import dataclass
import wandb
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# experimental parameters
defaults = dict(
lan1='./europarl-v7.hu-en.en',
lan2='./europarl-v7.hu-en.hu',
eval='en-hu',
size1=20,
width=5,
symmetric=1,
vectorize='trunc', # fasttext sim_svd trunc word2vec
dim=300,
tokentype='WordLevel', # tokenizer WordLevel, BPE
vocab_size=5000,
limit_alphabet=100,
min_frequency=5,
supervision='unsupervised',
label='none',
)
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["WANDB_MODE"] = "offline" # switch to "online" to use wandb cloud sync
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
print('vocab has overlap of length', len(intersection))
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 5
stochastic_initial = 1
stochastic_multiplier = 2
threshold = 1e-4
maxiter = 100
eta = 1
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=10)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary(tiebreak=1e-3):
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = tiebreak * np.random.rand(d1.Co.shape[0], d2.Co.shape[0])
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
sim[0, 0] = 1
return sim
rows = []
def experiment(drop=20, dim=300, r1=99, r2=99):
def record(type, sim):
print(type)
simd = match.most_diff_match(sim, 10)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'drop': drop, 'dim_p': dim, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}-{drop}-{dim}.csv'))
normproc = ['unit', 'center', 'unit']
normproc1 = ['unit']
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap', simscoocmap)
def clip_drop():
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
match.clip(X, r1=r1, r2=r2)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap-clip', simscoocmap)
dropinit = simscoocmap
def f(Co):
X = np.sqrt(Co)
embeddings.normalize(X, normproc)
X = match.svd_power(X, beta=1, drop=drop, dim=None)
embeddings.normalize(X, normproc)
match.clip(X, r1=r1, r2=r2)
return X
X, Z = f(d1.Co), f(d2.Co)
_, _, simscoocmap = match.coocmapt(X, Z, args, normproc=normproc1, sim_init=dropinit, evalf=evalf)
record('coocmap-drop', simscoocmap)
# clip_drop() # run clip and drop as well
experiment(drop=20, dim=300)
|
coocmap-main
|
experiments/test_coocmap.py
|
import os
import subprocess
from dataclasses import dataclass
import lzma
import wandb
import argparse
import shutil
import pandas as pd
import numpy as np
import data
import match
import evaluation
import embeddings
# from baselines import VecMap
os.environ['WANDB_IGNORE_GLOBS'] = 'lan1/*,lan2/*'
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
defaults = dict(
# data
lan1='enwikishuf',
lan2='eswikishuf',
eval='en-es',
size1=50,
# size2=20, skip2=10,
symmetric=1,
width=5,
# vectorization fasttext sim_svd count
vectorize='fasttext',
dim=300,
# tokenizer WordLevel, BPE
tokentype='WordLevel',
vocab_size=5000,
limit_alphabet=100,
min_frequency=5,
# experiment
supervision='unsupervised',
label='iter100',
)
run = wandb.init(config=defaults, project='data efficiency')
base1 = os.path.join(wandb.run.dir, 'lan1')
base2 = os.path.join(wandb.run.dir, 'lan2')
os.makedirs(base1)
os.makedirs(base2)
cfg = wandb.config
def make_sized(lan, sizemb, pout, skipmb=0):
corpus = data.get_data(lan)
text = corpus.headmb(lan, skipmb+sizemb)
with open(pout, 'wt', encoding='utf-8') as fout:
fout.write(text[int(skipmb*1e6):])
p1 = os.path.join(base1, 'c.txt')
p2 = os.path.join(base2, 'c.txt')
make_sized(cfg.lan1, cfg.size1, p1)
size2 = cfg.size1 if cfg.symmetric == 1 else cfg.size2
skip2 = cfg.size1 if cfg.lan1 == cfg.lan2 else 0
make_sized(cfg.lan2, size2, p2, skipmb=skip2)
d1 = data.Corpus(p1, base1,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
d2 = data.Corpus(p2, base2,
tokentype=cfg.tokentype, vocab_size=cfg.vocab_size, limit_alphabet=cfg.limit_alphabet, min_frequency=cfg.min_frequency,
vectorize=cfg.vectorize, width=cfg.width, dim=cfg.dim)
def get_evaldict():
lan1s, lan2s = cfg.eval.split('-')
eval = data.MUSEEval()
dictpath = os.path.join(wandb.run.dir, 'eval_id.dict')
with open(dictpath, 'wt', encoding='utf-8', errors='surrogateescape') as f:
v1 = d1.tokenizer.get_vocab()
v2 = d2.tokenizer.get_vocab()
intersection = set(v1.keys()).intersection(v2.keys())
print('vocab has overlap of length', len(intersection))
for w in intersection:
f.write(f'{w}\t{w}\n')
# dictid = dictpath
if lan1s != lan2s:
dictpath = os.path.join(wandb.run.dir, 'eval_dict.dict')
lanpath = eval.eval_path(f'{lan1s}-{lan2s}', type='full')
shutil.copyfile(lanpath, dictpath)
return dictpath
dictpath = get_evaldict()
@dataclass
class SearchArgs:
stochastic_interval = 10
threshold = 1e-4
stochastic_initial = 1
stochastic_multiplier = 2
maxswaps = 100
maxiter = 50
eta = 1
#
method = 'orthogonal' # or orthogonal or lstsq
match = 'vecmap'
csls = True
args = SearchArgs()
dumpdir = os.path.join(wandb.run.dir, 'dump')
os.makedirs(dumpdir, exist_ok=True)
def evalf(sim):
# simf = match.most_diff_match(sim, k=3)
f, stats = evaluation.report_sim(sim, d1.tokenizer, d2.tokenizer, dictpath)
print(stats)
def dict_init_binary():
inds = evaluation.dict_to_inds(dictpath, d1.tokenizer, d2.tokenizer, full=False)
sim = np.zeros((d1.Co.shape[0], d2.Co.shape[0]))
for i in range(len(inds[0])):
sim[inds[0][i], inds[1][i]] = 1
return sim
rows = []
def record(type, sim):
print('recording', type)
simd = match.most_diff_match(sim, 10)
df, stats = evaluation.report_sim(simd, d1.tokenizer, d2.tokenizer, dictpath)
info = stats
info.update({'id': run.id, 'method_type': type})
for k, v in cfg.items():
if k in info: print(f'Warning: {k} already exist')
info[k] = v
rows.append(info)
wandb.log({'table': wandb.Table(dataframe=pd.DataFrame.from_records(rows))})
wandb.log({'basicinfo': info})
print(info)
df.to_csv(os.path.join(dumpdir, f'{type}.csv'))
def experiment(dim):
namemap = {'vecmap': 'bidir', 'coocmap': 'greedy'}
name = namemap[args.match]
label = f'-{name}-csls' if args.csls else f'{name}'
print('original dim', d1.Co.shape)
d1ft = d1.vecs[cfg.vectorize]
d2ft = d2.vecs[cfg.vectorize]
normproc = ['unit', 'center', 'unit']
embeddings.normalize(d1ft, normproc)
embeddings.normalize(d2ft, normproc)
_, _, sim = match.vecmap(d1ft, d2ft, args, evalf=evalf)
record('vecmap-fasttext' + label, sim)
def sqrt_sim(x):
u, s, vt = np.linalg.svd(x, full_matrices=False)
return (u*s).dot(u.T)
A1f = sqrt_sim(d1ft)
A2f = sqrt_sim(d2ft)
normproc = ['unit', 'center', 'unit']
embeddings.normalize(A1f, normproc)
embeddings.normalize(A2f, normproc)
normproc1 = ['unit']
_, _, simscoocmap = match.coocmapt(A1f, A2f, args, normproc=normproc1, sim_init=None, evalf=evalf)
record(f'coocmap-{cfg.vectorize}-sqrt' + label, simscoocmap)
A1 = np.sqrt(d1.Co)
A2 = np.sqrt(d2.Co)
dn1 = match.sim_vecs(A1, dim, alpha=1)
dn2 = match.sim_vecs(A2, dim, alpha=1)
# dn1 = np.array(d1.vec)
# dn2 = np.array(d2.vec)
embeddings.normalize(dn1, normproc)
embeddings.normalize(dn2, normproc)
_, _, sim = match.vecmap(dn1, dn2, args, evalf=evalf)
record('vecmap-raw' + label, sim)
###### coocmap ######
embeddings.normalize(A1, normproc)
embeddings.normalize(A2, normproc)
normproc1 = ['unit']
_, _, simscoocmap = match.coocmapt(A1, A2, args, normproc=normproc1, sim_init=None, evalf=evalf)
record('coocmap' + label, simscoocmap)
# what if I get the correspondence analysis vectors here??
# sim0 = match.match_sim(A1, A2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
normproc1 = ['unit']
dictinit = dict_init_binary()
_, _, simdictinit = match.coocmapt(A1, A2, args, normproc=normproc1, sim_init=dictinit, evalf=evalf)
record('dict-init' + label, simdictinit)
args.match = 'vecmap'
args.csls = True
experiment(cfg.dim)
args.csls = False
experiment(cfg.dim)
# args.match = 'coocmap'
# args.csls = True
# experiment(cfg.dim)
# args.csls = False
# experiment(cfg.dim)
|
coocmap-main
|
experiments/test_matching.py
|
from setuptools import setup
from Cython.Build import cythonize
import numpy
# python setup.py build_ext --inplace
setup(
ext_modules=cythonize(
['cooc_count.pyx'],
annotate=True),
include_dirs=[numpy.get_include()]
)
|
coocmap-main
|
fast/setup.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import torch
import random
import numpy as np
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
import sys
sys.path.append("fisher_information_loss")
import models
import dataloading
def recons_attack(model, X, y, lam, link_func):
"""
Runs the Balle et al. GLM attack https://arxiv.org/abs/2201.04845.
"""
def compute_grad(model, X, y):
return ((X @ model.theta).sigmoid() - y)[:, None] * X
n = len(y)
grad = compute_grad(model, X, y)
B1 = (grad.sum(0)[None, :] - grad)[:, 0]
denom = B1 + n * lam * model.theta[0][None]
X_hat = (grad.sum(0)[None, :] - grad + n * lam * model.theta[None, :]) / denom[:, None]
y_hat = link_func(X_hat @ model.theta) + denom
return X_hat, y_hat
def compute_correct_ratio(etas, num_bins, predictions, target):
order = etas.argsort()
bin_size = len(target) // num_bins + 1
bin_accs = []
for prediction in predictions:
prediction = np.array(prediction)
correct = (prediction == target)
bin_accs.append([correct[order[lower:lower + bin_size]].mean()
or lower in range(0, len(correct), bin_size)])
return np.array(bin_accs)
parser = argparse.ArgumentParser(description="Evaluate GLM reconstruction attack.")
parser.add_argument("--data_folder", default="data/", type=str,
help="folder in which to store data")
parser.add_argument("--num_trials", default=10000, type=int,
help="Number of trials")
parser.add_argument("--lam", default=0.01, type=float,
help="regularization parameter for logistic regression")
parser.add_argument("--sigma", default=1e-5, type=float,
help="Gaussian noise parameter for output perturbation")
args = parser.parse_args()
train_data = dataloading.load_dataset(
name="mnist", split="train", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
test_data = dataloading.load_dataset(
name="mnist", split="test", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
train_data['features'] = torch.cat([torch.ones(len(train_data['targets']), 1), train_data['features']], 1)
test_data['features'] = torch.cat([torch.ones(len(test_data['targets']), 1), test_data['features']], 1)
model = models.get_model("logistic")
model.train(train_data, l2=args.lam, weights=None)
true_theta = model.theta.clone()
predictions = model.predict(train_data["features"])
acc = ((predictions == train_data["targets"]).float()).mean()
print(f"Training accuracy of classifier {acc.item():.3f}")
predictions = model.predict(test_data["features"])
acc = ((predictions == test_data["targets"]).float()).mean()
print(f"Test accuracy of classifier {acc.item():.3f}")
J = model.influence_jacobian(train_data)[:, :, 1:-1] / args.sigma
etas = J.pow(2).sum(1).mean(1)
X = train_data["features"]
y = train_data["targets"].float()
n, d = X.size(0), X.size(1) - 1
link_func = torch.sigmoid
X_means = torch.zeros(X.shape)
errors = torch.zeros(len(y))
with torch.no_grad():
print('Running reconstruction attack for %d trials:' % args.num_trials)
for i in tqdm(range(args.num_trials)):
model.theta = true_theta + args.sigma * torch.randn(true_theta.size())
X_hat, y_hat = recons_attack(model, X, y, args.lam, link_func)
X_means += X_hat / args.num_trials
errors += (X_hat[:, 1:] - X[:, 1:]).pow(2).sum(1) / (d * args.num_trials)
X_means = X_means[:, 1:]
# filter out examples that the attack failed on
mask = torch.logical_not(torch.isnan(errors))
etas = etas[mask]
errors = errors[mask]
_, order = etas.reciprocal().sort()
# plot MSE lower bound vs. true MSE
plt.figure(figsize=(8,5))
below_bound = etas.reciprocal() < errors
plt.scatter(etas[below_bound].reciprocal().detach(), errors[below_bound].detach(), s=10)
plt.scatter(etas[torch.logical_not(below_bound)].reciprocal().detach(), errors[torch.logical_not(below_bound)].detach(),
s=10, color='indianred')
plt.plot(np.power(10, np.arange(-5.5, 3, 0.1)), np.power(10, np.arange(-5.5, 3, 0.1)), 'k', label='Lower bound')
plt.axvline(x=1, color='k', linestyle=':')
plt.xticks(fontsize=20)
plt.xlim([1e-6, 1e4])
plt.xlabel('Predicted MSE', fontsize=20)
plt.xscale('log')
plt.yticks(fontsize=20)
plt.ylabel('Recons. attack MSE', fontsize=20)
plt.yscale('log')
plt.legend(loc='lower right', fontsize=20)
os.makedirs("figs", exist_ok=True)
plt.savefig("figs/recons_mse.pdf", bbox_inches="tight")
# plot reconstructed samples
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X[mask][order[i], 1:].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/orig_highest8.pdf", bbox_inches="tight")
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X_means[mask][order[i]].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/recons_highest8.pdf", bbox_inches="tight")
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X[mask][order[-i-1], 1:].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/orig_lowest8.pdf", bbox_inches="tight")
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X_means[mask][order[-i-1]].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/recons_lowest8.pdf", bbox_inches="tight")
|
bounding_data_reconstruction-main
|
mnist_logistic_reconstruction.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import array
import gzip
import logging
import os
from os import path
import struct
import math
import urllib.request
from torchvision import datasets as torch_datasets
from torchvision import transforms
import numpy as np
import numpy.random as npr
from sklearn.decomposition import PCA
_DATA_FOLDER = "data/"
def _download(url, data_folder, filename):
"""
Download a URL to a file in the temporary data directory, if it does not
already exist.
"""
if not path.exists(data_folder):
os.makedirs(data_folder)
out_file = path.join(data_folder, filename)
if not path.isfile(out_file):
urllib.request.urlretrieve(url, out_file)
logging.info(f"Downloaded {url} to {data_folder}")
def _partial_flatten(x):
"""
Flatten all but the first dimension of an ndarray.
"""
return np.reshape(x, (x.shape[0], -1))
def _one_hot(x, k, dtype=np.float32):
"""
Create a one-hot encoding of x of size k.
"""
return np.array(x[:, None] == np.arange(k), dtype)
def mnist_raw(dataset):
"""
Download and parse the raw MNIST dataset.
"""
if dataset == "mnist":
# mirror of http://yann.lecun.com/exdb/mnist/:
base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
elif dataset == "fmnist":
base_url = "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
elif dataset == "kmnist":
base_url = "http://codh.rois.ac.jp/kmnist/dataset/kmnist/"
else:
raise RuntimeError("Unknown dataset: " + dataset)
data_folder = path.join(_DATA_FOLDER, dataset)
def parse_labels(filename):
"""
Parses labels in MNIST raw label file.
"""
with gzip.open(filename, "rb") as fh:
_ = struct.unpack(">II", fh.read(8))
return np.array(array.array("B", fh.read()), dtype=np.uint8)
def parse_images(filename):
"""
Parses images in MNIST raw label file.
"""
with gzip.open(filename, "rb") as fh:
_, num_DATA_FOLDER, rows, cols = struct.unpack(">IIII", fh.read(16))
return np.array(array.array("B", fh.read()), dtype=np.uint8).reshape(
num_DATA_FOLDER, rows, cols
)
# download all MNIST files:
for filename in [
"train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz",
]:
_download(base_url + filename, data_folder, filename)
# parse all images and labels:
train_images = parse_images(path.join(data_folder, "train-images-idx3-ubyte.gz"))
train_labels = parse_labels(path.join(data_folder, "train-labels-idx1-ubyte.gz"))
test_images = parse_images(path.join(data_folder, "t10k-images-idx3-ubyte.gz"))
test_labels = parse_labels(path.join(data_folder, "t10k-labels-idx1-ubyte.gz"))
return train_images, train_labels, test_images, test_labels
def preprocess_data(train_images, train_labels, test_images, test_labels,
binary, permute_train, normalize, pca_dims):
if binary:
num_labels = 2
train_mask = np.logical_or(train_labels == 0, train_labels == 1)
test_mask = np.logical_or(test_labels == 0, test_labels == 1)
train_images, train_labels = train_images[train_mask], train_labels[train_mask]
test_images, test_labels = test_images[test_mask], test_labels[test_mask]
else:
num_labels = np.max(test_labels) + 1
train_labels = _one_hot(train_labels, num_labels)
test_labels = _one_hot(test_labels, num_labels)
if pca_dims > 0:
pca = PCA(n_components=pca_dims, svd_solver='full')
pca.fit(train_images)
train_images = pca.transform(train_images)
test_images = pca.transform(test_images)
if normalize:
train_images /= np.linalg.norm(train_images, 2, 1)[:, None]
test_images /= np.linalg.norm(test_images, 2, 1)[:, None]
# permute training data:
if permute_train:
perm = np.random.RandomState(0).permutation(train_images.shape[0])
train_images = train_images[perm]
train_labels = train_labels[perm]
return train_images, train_labels, test_images, test_labels
def mnist(dataset="mnist", binary=False, permute_train=False, normalize=False, pca_dims=0):
"""
Download, parse and process MNIST data to unit scale and one-hot labels.
"""
# obtain raw MNIST data:
train_images, train_labels, test_images, test_labels = mnist_raw(dataset)
# flatten and normalize images, create one-hot labels:
train_images = _partial_flatten(train_images) / np.float32(255.0)
test_images = _partial_flatten(test_images) / np.float32(255.0)
return preprocess_data(train_images, train_labels, test_images, test_labels,
binary, permute_train, normalize, pca_dims)
def cifar(dataset="cifar10", binary=False, permute_train=False, normalize=False, pca_dims=0):
data_folder = path.join(_DATA_FOLDER, dataset)
normalizer = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_transforms = transforms.Compose([transforms.ToTensor(), normalizer])
if dataset == "cifar10":
train_set = torch_datasets.CIFAR10(root=data_folder, train=True, transform=train_transforms, download=True)
test_set = torch_datasets.CIFAR10(root=data_folder, train=False, transform=train_transforms, download=True)
elif dataset == "cifar100":
train_set = torch_datasets.CIFAR100(root=data_folder, train=True, transform=train_transforms, download=True)
test_set = torch_datasets.CIFAR100(root=data_folder, train=False, transform=train_transforms, download=True)
train_images = []
train_labels = []
for (x, y) in train_set:
train_images.append(np.rollaxis(x.numpy(), 0, 3).flatten())
train_labels.append(y)
train_images = np.stack(train_images)
train_labels = np.array(train_labels)
test_images = []
test_labels = []
for (x, y) in test_set:
test_images.append(np.rollaxis(x.numpy(), 0, 3).flatten())
test_labels.append(y)
test_images = np.stack(test_images)
test_labels = np.array(test_labels)
return preprocess_data(train_images, train_labels, test_images, test_labels,
binary, permute_train, normalize, pca_dims)
def get_datastream(images, labels, batch_size, permutation=False, last_batch=True):
"""
Returns a data stream of `images` and corresponding `labels` in batches of
size `batch_size`. Also returns the number of batches per epoch, `num_batches`.
To loop through the whole dataset in permuted order, set `permutation` to `True`.
To not return the last batch, set `last_batch` to `False`.
"""
# compute number of batches to return:
num_images = images.shape[0]
def permutation_datastream():
"""
Data stream iterator that returns randomly permuted images until eternity.
"""
while True:
perm = npr.permutation(num_images)
for i in range(num_batches):
batch_idx = perm[i * batch_size : (i + 1) * batch_size]
yield images[batch_idx], labels[batch_idx], batch_idx
def random_sampler_datastream():
"""
Data stream iterator that returns a uniformly random batch of images until eternity.
"""
while True:
batch_idx = npr.permutation(num_images)[:batch_size]
yield images[batch_idx], labels[batch_idx], batch_idx
# return iterator factory:
if permutation:
num_batches = int((math.ceil if last_batch else math.floor)(float(num_images) / float(batch_size)))
return random_sampler_datastream, num_batches
else:
num_complete_batches, leftover = divmod(num_images, batch_size)
num_batches = num_complete_batches + (last_batch and bool(leftover))
return permutation_datastream, num_batches
|
bounding_data_reconstruction-main
|
datasets.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import torch
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
sys.path.append("fisher_information_loss")
import models
import dataloading
parser = argparse.ArgumentParser(description="MNIST training with FIL.")
parser.add_argument("--data_folder", default="data/", type=str,
help="folder in which to store data")
parser.add_argument("--num_trials", default=10, type=int,
help="number of repeated trials")
parser.add_argument("--lam", default=0.01, type=float,
help="l2 regularization parameter")
parser.add_argument("--sigma", default=0.01, type=float,
help="Gaussian noise multiplier")
args = parser.parse_args()
train_data = dataloading.load_dataset(
name="mnist", split="train", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
test_data = dataloading.load_dataset(
name="mnist", split="test", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
n = len(train_data["targets"])
all_etas, all_epsilons, all_rdp_epsilons = [], [], []
for i in range(args.num_trials):
model = models.get_model("logistic")
model.train(train_data, l2=args.lam, weights=None)
# Renyi-DP accounting
rdp_eps = 4 / (n * args.lam * args.sigma) ** 2
# FIL accounting
J = model.influence_jacobian(train_data)[:, :, :-1] / args.sigma
etas = J.pow(2).sum(1).mean(1).sqrt()
print(f"Trial {i+1:d}: RDP epsilon = {rdp_eps:.4f}, Max FIL eta = {etas.max():.4f}")
model.theta = model.theta + args.sigma * torch.randn_like(model.theta)
all_etas.append(etas.detach().numpy())
all_rdp_epsilons.append(rdp_eps)
predictions = model.predict(train_data["features"])
acc = ((predictions == train_data["targets"]).float()).mean()
print(f"Training accuracy of classifier {acc.item():.3f}")
predictions = model.predict(test_data["features"])
acc = ((predictions == test_data["targets"]).float()).mean()
print(f"Test accuracy of classifier {acc.item():.3f}")
all_etas = np.stack(all_etas, 0)
all_rdp_epsilons = np.stack(all_rdp_epsilons, 0)
fil_bound = 1 / np.power(all_etas, 2).mean(0)
rdp_bound = 0.25 / (math.exp(all_rdp_epsilons.mean()) - 1)
plt.figure(figsize=(8,5))
_ = plt.hist(np.log10(fil_bound), bins=100, label='dFIL bound', color='silver', edgecolor='black', linewidth=0.3)
plt.axvline(x=np.log10(rdp_bound), color='k', linestyle='--', label='RDP bound')
plt.axvline(x=0, color='k', linestyle=':')
plt.xlabel('MSE lower bound', fontsize=20)
plt.ylabel('Count', fontsize=20)
plt.xticks(np.arange(-1, 11, 2), labels=['$10^{%d}$' % t for t in np.arange(-1, 11, 2)], fontsize=20)
plt.yticks(fontsize=20)
plt.legend(loc='upper left', fontsize=20)
os.makedirs("figs", exist_ok=True)
plt.savefig("figs/mnist_linear_hist.pdf", bbox_inches="tight")
|
bounding_data_reconstruction-main
|
mnist_logistic_regression.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import jax
import jax.numpy as jnp
from jax.experimental import stax
DTYPE_MAPPING = {
"float32": "f32",
"float64": "f64",
"int32": "s32",
"int64": "s64",
"uint32": "u32",
"uint64": "u64",
}
def _l2_normalize(x, eps=1e-7):
return x * jax.lax.rsqrt((x ** 2).sum() + eps)
def estimate_spectral_norm(f, input_shape, seed=0, n_steps=20):
input_shape = tuple([1] + [input_shape[i] for i in range(1, len(input_shape))])
rng = jax.random.PRNGKey(seed)
u0 = jax.random.normal(rng, input_shape)
v0 = jnp.zeros_like(f(u0))
def fun(carry, _):
u, v = carry
v, f_vjp = jax.vjp(f, u)
v = _l2_normalize(v)
u, = f_vjp(v)
u = _l2_normalize(u)
return (u, v), None
(u, v), _ = jax.lax.scan(fun, (u0, v0), xs=None, length=n_steps)
return jnp.vdot(v, f(u))
def accuracy(predictions, targets):
"""
Compute accuracy of `predictions` given the associated `targets`.
"""
target_class = jnp.argmax(targets, axis=-1)
predicted_class = jnp.argmax(predictions, axis=-1)
return jnp.mean(predicted_class == target_class)
def get_model(rng, model_name, input_shape, num_labels):
"""
Returns model specified by `model_name`. Model is initialized using the
specified random number generator `rng`.
Optionally, the input image `height` and `width` can be specified as well.
"""
# initialize convolutional network:
if model_name == "cnn":
init_random_params, predict = stax.serial(
stax.Conv(16, (8, 8), padding="SAME", strides=(2, 2)),
stax.Gelu,
stax.AvgPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding="VALID", strides=(2, 2)),
stax.Gelu,
stax.AvgPool((2, 2), (1, 1)),
stax.Flatten,
stax.Dense(32),
stax.Gelu,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
elif model_name == "cnn_tanh":
init_random_params, predict = stax.serial(
stax.Conv(16, (8, 8), padding="SAME", strides=(2, 2)),
stax.Tanh,
stax.AvgPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding="VALID", strides=(2, 2)),
stax.Tanh,
stax.AvgPool((2, 2), (1, 1)),
stax.Flatten,
stax.Dense(32),
stax.Tanh,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
elif model_name == "cnn_cifar":
init_random_params, predict = stax.serial(
stax.Conv(32, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.Conv(32, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.AvgPool((2, 2), (2, 2)),
stax.Conv(64, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.Conv(64, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.AvgPool((2, 2), (2, 2)),
stax.Conv(128, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.Conv(128, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.AvgPool((2, 2), (2, 2)),
stax.Flatten,
stax.Dense(128),
stax.Tanh,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
# initialize multi-layer perceptron:
elif model_name == "mlp":
init_random_params, predict = stax.serial(
stax.Dense(256),
stax.Gelu,
stax.Dense(256),
stax.Gelu,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
elif model_name == "mlp_tanh":
init_random_params, predict = stax.serial(
stax.Dense(256),
stax.Tanh,
stax.Dense(256),
stax.Tanh,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
# initialize linear model:
elif model_name == "linear":
init_random_params, predict_raw = stax.Dense(num_labels)
def predict(params, inputs):
logits = predict_raw(params, inputs)
return jnp.hstack([logits, jnp.zeros(logits.shape)])
_, init_params = init_random_params(rng, input_shape)
else:
raise ValueError(f"Unknown model: {model_name}")
# return initial model parameters and prediction function:
return init_params, predict
|
bounding_data_reconstruction-main
|
utils.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import jax.numpy as jnp
import jax.random as jnr
from jax import jit, grad, vmap, nn
from jax.tree_util import tree_flatten, tree_unflatten
import math
def get_loss_func(predict):
"""
Returns the loss function for the specified `predict`ion function.
"""
@jit
def loss(params, inputs, targets):
"""
Multi-class loss entropy loss function for model with parameters `params`
and the specified `inputs` and one-hot `targets`.
"""
predictions = nn.log_softmax(predict(params, inputs))
if predictions.ndim == 1:
return -jnp.sum(predictions * targets)
return -jnp.mean(jnp.sum(predictions * targets, axis=-1))
return loss
def get_grad_func(loss, norm_clip=0, soft_clip=False):
@jit
def clipped_grad(params, inputs, targets):
grads = grad(loss)(params, inputs, targets)
if norm_clip == 0:
return grads
else:
nonempty_grads, tree_def = tree_flatten(grads)
total_grad_norm = jnp.add(jnp.linalg.norm(
[jnp.linalg.norm(neg.ravel()) for neg in nonempty_grads]), 1e-7)
if soft_clip:
divisor = nn.gelu(total_grad_norm / norm_clip - 1) + 1
else:
divisor = jnp.maximum(total_grad_norm / norm_clip, 1.)
normalized_nonempty_grads = [g / divisor for g in nonempty_grads]
return tree_unflatten(tree_def, normalized_nonempty_grads)
return clipped_grad
def get_update_func(get_params, grad_func, opt_update, norm_clip=0, reshape=True):
"""
Returns the parameter update function for the specified `predict`ion function.
"""
@jit
def update(i, rng, opt_state, batch, sigma, weight_decay):
"""
Function that performs `i`-th model update using the specified `batch` on
optimizer state `opt_state`. Updates are privatized by noise addition
with variance `sigma`.
"""
# compute parameter gradient:
inputs, targets = batch
if reshape:
inputs = jnp.expand_dims(inputs, 1)
params = get_params(opt_state)
multiplier = 1 if norm_clip == 0 else norm_clip
# add noise to gradients:
grads = vmap(grad_func, in_axes=(None, 0, 0))(params, inputs, targets)
grads_flat, grads_treedef = tree_flatten(grads)
grads_flat = [g.sum(0) for g in grads_flat]
rngs = jnr.split(rng, len(grads_flat))
noisy_grads = [
(g + multiplier * sigma * jnr.normal(r, g.shape)) / len(targets)
for r, g in zip(rngs, grads_flat)
]
# weight decay
params_flat, _ = tree_flatten(params)
noisy_grads = [
g + weight_decay * param
for g, param in zip(noisy_grads, params_flat)
]
noisy_grads = tree_unflatten(grads_treedef, noisy_grads)
# perform parameter update:
return opt_update(i, noisy_grads, opt_state)
return update
|
bounding_data_reconstruction-main
|
trainer.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import jax
import jax.numpy as jnp
import jax.random as jnr
import hydra
from jax import grad
from jax.experimental import optimizers
from jax.tree_util import tree_flatten, tree_unflatten
import math
import accountant
import datasets
import trainer
import utils
import time
def batch_predict(predict, params, images, batch_size):
num_images = images.shape[0]
num_batches = int(math.ceil(float(num_images) / float(batch_size)))
predictions = []
for i in range(num_batches):
lower = i * batch_size
upper = min((i+1) * batch_size, num_images)
predictions.append(predict(params, images[lower:upper]))
return jnp.concatenate(predictions)
@hydra.main(config_path="configs", config_name="mnist")
def main(cfg):
# set up random number generator:
logging.info(f"Running using JAX {jax.__version__}...")
rng = jnr.PRNGKey(int(time.time()))
# create dataloader for MNIST dataset:
if cfg.dataset.startswith("cifar"):
num_channels = 3
image_size = 32
train_images, train_labels, test_images, test_labels = datasets.cifar(
dataset=cfg.dataset, binary=cfg.binary, pca_dims=cfg.pca_dims)
else:
num_channels = 1
image_size = 28
train_images, train_labels, test_images, test_labels = datasets.mnist(
dataset=cfg.dataset, binary=cfg.binary, pca_dims=cfg.pca_dims)
logging.info(f"Training set max variance: %.4f" % train_images.var(0).max())
num_samples, d = train_images.shape
num_labels = train_labels.shape[1]
if num_labels == 2:
num_labels = 1
if cfg.model.startswith("cnn"):
assert cfg.pca_dims == 0, f"Cannot use PCA with {cfg.model} model."
image_shape = (-1, image_size, image_size, num_channels)
train_images = jnp.reshape(train_images, image_shape)
test_images = jnp.reshape(test_images, image_shape)
data_stream, num_batches = datasets.get_datastream(
train_images, train_labels, cfg.batch_size
)
batches = data_stream()
# set up model:
if cfg.model.startswith("cnn"):
input_shape = (-1, image_size, image_size, num_channels)
else:
input_shape = (-1, d)
init_params, predict = utils.get_model(rng, cfg.model, input_shape, num_labels)
num_params = sum(p.size for p in tree_flatten(init_params)[0])
# create optimizer:
if cfg.optimizer == "sgd":
opt_init, opt_update, get_params = optimizers.momentum(
cfg.step_size, cfg.momentum_mass
)
elif cfg.optimizer == "adam":
opt_init, opt_update, get_params = optimizers.adam(cfg.step_size)
else:
raise ValueError(f"Unknown optimizer: {cfg.optimizer}")
opt_state = opt_init(init_params)
# get loss function and update functions:
loss = trainer.get_loss_func(predict)
grad_func = trainer.get_grad_func(loss, norm_clip=cfg.norm_clip, soft_clip=True)
update = trainer.get_update_func(
get_params, grad_func, opt_update, norm_clip=cfg.norm_clip,
reshape=cfg.model.startswith("cnn")
)
# get function that computes the Jacobian norms for privacy accounting:
gelu_approx = 1.115
fil_accountant = accountant.get_grad_jacobian_trace_func(
grad_func, get_params, reshape=cfg.model.startswith("cnn"),
label_privacy=cfg.label_privacy
)
dp_accountant = accountant.get_dp_accounting_func(cfg.batch_size, cfg.sigma / gelu_approx)
# compute subsampling factor
if cfg.sigma > 0:
eps = math.sqrt(2 * math.log(1.25 / cfg.delta)) * 2 * gelu_approx / cfg.sigma
q = float(cfg.batch_size) / num_samples
subsampling_factor = q / (q + (1-q) * math.exp(-eps))
else:
subsampling_factor = 0
logging.info(f"Subsampling factor is {subsampling_factor:.4f}")
# train the model:
logging.info(f"Training {cfg.model} model with {num_params} parameters using {cfg.optimizer}...")
etas_squared = jnp.zeros((cfg.num_epochs, train_images.shape[0]))
epsilons = jnp.zeros(cfg.num_epochs)
rdp_epsilons = jnp.zeros(cfg.num_epochs)
train_accs = jnp.zeros(cfg.num_epochs)
test_accs = jnp.zeros(cfg.num_epochs)
num_iters = 0
for epoch in range(cfg.num_epochs):
# perform full training sweep through the data:
itercount = itertools.count()
if epoch > 0:
etas_squared = etas_squared.at[epoch].set(etas_squared[epoch-1])
for batch_counter in range(num_batches):
# get next batch:
num_iters += 1
i = next(itercount)
rng = jnr.fold_in(rng, i)
images, labels, batch_idx = next(batches)
batch = (images, labels)
# update privacy loss:
if cfg.sigma > 0 and cfg.do_accounting:
etas_batch = fil_accountant(rng, opt_state, batch) / cfg.sigma / cfg.norm_clip
etas_squared = etas_squared.at[epoch, batch_idx].add(
subsampling_factor * jnp.power(etas_batch, 2), unique_indices=True
)
# perform private parameter update:
opt_state = update(i, rng, opt_state, batch, cfg.sigma, cfg.weight_decay)
# measure training and test accuracy, and average privacy loss:
params = get_params(opt_state)
spectral_norm = utils.estimate_spectral_norm(lambda x: predict(params, x), input_shape)
train_predictions = batch_predict(predict, params, train_images, cfg.batch_size)
test_predictions = batch_predict(predict, params, test_images, cfg.batch_size)
train_accuracy = utils.accuracy(train_predictions, train_labels)
test_accuracy = utils.accuracy(test_predictions, test_labels)
train_accs = train_accs.at[epoch].set(train_accuracy)
test_accs = test_accs.at[epoch].set(test_accuracy)
params, _ = tree_flatten(params)
params_norm = math.sqrt(sum([jnp.power(p, 2).sum() for p in params]))
if cfg.sigma > 0 and cfg.do_accounting:
median_eta = jnp.median(jnp.sqrt(etas_squared[epoch]))
max_eta = jnp.sqrt(etas_squared[epoch]).max()
delta = 1e-5
epsilon = dp_accountant(num_iters, len(train_labels), delta)
epsilons = epsilons.at[epoch].set(epsilon)
rdp_epsilon = dp_accountant(num_iters, len(train_labels), delta, alpha=2)
rdp_epsilons = rdp_epsilons.at[epoch].set(rdp_epsilon)
# print out progress:
logging.info(f"Epoch {epoch + 1}:")
logging.info(f" -> training accuracy = {train_accuracy:.4f}")
logging.info(f" -> test accuracy = {test_accuracy:.4f}")
logging.info(f" -> parameter norm = {params_norm:.4f}, spectral norm = {spectral_norm:.4f}")
if cfg.sigma > 0 and cfg.do_accounting:
logging.info(f" -> Median FIL privacy loss = {median_eta:.4f}")
logging.info(f" -> Max FIL privacy loss = {max_eta:.4f}")
logging.info(f" -> DP privacy loss = ({epsilon:.4f}, {delta:.2e})")
logging.info(f" -> 2-RDP privacy loss = {rdp_epsilon:.4f}")
etas = jnp.sqrt(etas_squared) if cfg.sigma > 0 and cfg.do_accounting else float("inf")
return etas, epsilons, rdp_epsilons, train_accs, test_accs
# run all the things:
if __name__ == "__main__":
main()
|
bounding_data_reconstruction-main
|
train_classifier.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import jax.numpy as jnp
import jax.random as jnr
from jax import jit, jvp, vjp, jacrev, vmap, nn
from jax.tree_util import tree_flatten
import trainer
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp, get_privacy_spent
def get_grad_jacobian_norm_func(grad_func, get_params, method="jvp", reshape=True, label_privacy=False):
"""
Returns a function that computes norm of the Jacobian of the parameter
gradients for the specified `loss` function for an optimizer in which the
`get_params` function returns the model parameters.
"""
# assertions:
assert method in ["jvp", "full"], f"Unknown method: {method}"
@jit
def compute_power_iteration_jvp(params, w, inputs, targets):
"""
Computes a single power iteration via the JVP method. Does not include
Jacobian w.r.t. targets.
"""
# compute JVP of per-example parameter gradient Jacobian with w:
if label_privacy:
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, inputs, x
)
_, w = jvp(perex_grad, (targets,), (w,))
else:
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, x, targets
)
_, w = jvp(perex_grad, (inputs,), (w,))
# compute norm of the JVP:
w_flattened, _ = tree_flatten(w)
norms = [
jnp.power(jnp.reshape(v, (v.shape[0], -1)), 2).sum(axis=1)
for v in w_flattened
]
norms = jnp.sqrt(sum(norms) + 1e-7)
# compute VJP of per-example parameter gradient Jacobian with w:
if label_privacy:
_, f_vjp = vjp(perex_grad, targets)
else:
_, f_vjp = vjp(perex_grad, inputs)
w_out = f_vjp(w)[0]
return norms, w_out
@jit
def compute_power_iteration_full(params, w, inputs, targets):
"""
Computes a single power iteration by computing the full Jacobian and
right-multiplying it. Does not include Jacobian w.r.t. targets.
"""
# compute per-example parameter gradient Jacobian:
J = jacrev(grad_func, 1)(params, inputs, targets)
J_flattened, _ = tree_flatten(J)
# compute JVP with w:
jvp_exact = [(v * w).sum(-1) for v in J_flattened]
# compute norm of the JVP:
norms = [
jnp.power(jnp.reshape(v, (-1, v.shape[-1])), 2).sum(axis=0)
for v in jvp_exact
]
norms = jnp.sqrt(sum(norms))
# compute VJP of per-example parameter gradient Jacobian with w:
vjp_exact = [
J_flattened[i] * jnp.expand_dims(jvp_exact[i], -1)
for i in jnp.arange(len(jvp_exact))
]
w_out = sum(
[jnp.reshape(v, (-1, v.shape[-2], v.shape[-1])).sum(0) for v in vjp_exact]
)
return norms, w_out
@jit
def grad_jacobian_norm(rng, opt_state, batch, num_iters=20):
"""
Computes norm of the Jacobian of the parameter gradients. The function
performs `num_iters` power iterations.
"""
# initialize power iterates:
inputs, targets = batch
if reshape:
inputs = jnp.expand_dims(inputs, 1)
w = jnr.normal(rng, shape=(targets.shape if label_privacy else inputs.shape))
w_norm = jnp.sqrt(jnp.power(w.reshape(w.shape[0], -1), 2).sum(axis=1) + 1e-7)
w = w / jnp.expand_dims(w_norm, tuple(range(1, len(w.shape))))
# perform power iterations:
params = get_params(opt_state)
for i in jnp.arange(num_iters):
if method == "jvp":
norms, w = compute_power_iteration_jvp(params, w, inputs, targets)
elif method == "full":
norms, w = compute_power_iteration_full(params, w, inputs, targets)
w_norm = jnp.sqrt(jnp.power(w.reshape(w.shape[0], -1), 2).sum(axis=1) + 1e-7)
w = w / jnp.expand_dims(w_norm, tuple(range(1, len(w.shape))))
# set nan values to 0 because gradient is 0
norms = jnp.nan_to_num(norms)
return norms
# return the function:
return grad_jacobian_norm
def get_grad_jacobian_trace_func(grad_func, get_params, reshape=True, label_privacy=False):
"""
Returns a function that computes the (square root of the) trace of the Jacobian
of the parameters.
"""
@jit
def grad_jacobian_trace(rng, opt_state, batch, num_iters=50):
params = get_params(opt_state)
inputs, targets = batch
if reshape:
inputs = jnp.expand_dims(inputs, 1)
if label_privacy:
flattened_shape = jnp.reshape(targets, (targets.shape[0], -1)).shape
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, inputs, x
)
else:
flattened_shape = jnp.reshape(inputs, (inputs.shape[0], -1)).shape
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, x, targets
)
num_iters = targets.shape[1] if label_privacy else num_iters
rngs = jnr.split(rng, num_iters)
trace = jnp.zeros(inputs.shape[0])
for i, g in zip(jnp.arange(num_iters), rngs):
indices = jnr.categorical(g, jnp.ones(shape=flattened_shape))
if label_privacy:
indices = i * jnp.ones(flattened_shape[0])
w = jnp.reshape(nn.one_hot(indices, flattened_shape[1]), targets.shape)
_, w = jvp(perex_grad, (targets,), (w,))
else:
indices = jnr.categorical(rng, jnp.ones(shape=flattened_shape))
w = jnp.reshape(nn.one_hot(indices, flattened_shape[1]), inputs.shape)
_, w = jvp(perex_grad, (inputs,), (w,))
# compute norm of the JVP:
w_flattened, _ = tree_flatten(w)
norms = [
jnp.power(jnp.reshape(v, (v.shape[0], -1)), 2).sum(axis=1)
for v in w_flattened
]
trace = trace + sum(norms) / num_iters
# set nan values to 0 because gradient is 0
trace = jnp.nan_to_num(trace)
return jnp.sqrt(trace + 1e-7)
# return the function:
return grad_jacobian_trace
def get_dp_accounting_func(batch_size, sigma):
"""
Returns the (eps, delta)-DP accountant if alpha=None,
or the (alpha, eps)-RDP accountant otherwise.
"""
def compute_epsilon(steps, num_examples, target_delta=1e-5, alpha=None):
if num_examples * target_delta > 1.:
warnings.warn('Your delta might be too high.')
q = batch_size / float(num_examples)
if alpha is None:
orders = list(jnp.linspace(1.1, 10.9, 99)) + list(range(11, 64))
rdp_const = compute_rdp(q, sigma, steps, orders)
eps, _, _ = get_privacy_spent(orders, rdp_const, target_delta=target_delta)
else:
eps = compute_rdp(q, sigma, steps, alpha)
return eps
return compute_epsilon
|
bounding_data_reconstruction-main
|
accountant.py
|
import numpy as np
from utils.constant import NEGATIVE, POSITIVE
from utils.utils import mean_to_canonical
import networkx as nx
from collections import Counter
from sklearn.linear_model import LinearRegression, LogisticRegression
import random
from utils.utils import numberToBase
from utils.utils import multi_index_to_index
from utils.utils import sigmoid, outliers_iqr, threshold_matrix
from utils.r_pca import R_pca
class Ivy:
def __init__(self):
self.valid_iv = None
self.w = None
def train(self,IVs,X=None, deps=[], anchor=None, class_balance=None, use_forward=True, use_canonical=True, **kwargs):
# determine valid IVs
if self.valid_iv == None:
self.valid_iv = list(range(IVs.shape[1]))
# self._is_valid_iv(IVs)
# record use_forward and use_canonical
self.use_forward = use_forward
self.use_canonical = use_canonical
# change the encoding of IVs to {-1,1}
IVs = self._convert_iv(IVs)
# assign class balance
if class_balance is None:
# find out class_balance in a heuristic fashion
# other class_balance identification methods can also be used
class_balance = Counter(IVs.flatten())
class_balance = class_balance[1]/(class_balance[-1]+class_balance[1])
class_balance = [1-class_balance, class_balance]
n,p = IVs.shape
# sem: second empirical moment matirx
sem = np.matmul(IVs.T,IVs)/n
# for conditional independent model,
# the inverse graph is the complete graph
inverse_graph = nx.complete_graph(p)
# incidence matrix of the inverse graph
M = nx.incidence_matrix(inverse_graph).T.toarray()
edge_list_M = np.asarray(inverse_graph.edges())
# make sure deps are in increasing order
deps = [[int(x[0]),int(x[1])] for x in deps]
# update M and edge_list_M by removing edges in deps
selector = [(list(x) not in deps) for x in edge_list_M]
M = M[selector,:]
edge_list_M = edge_list_M[selector,:]
if self.use_forward:
# create q
exp_q = np.abs(sem[edge_list_M[:,0],edge_list_M[:,1]])
# handle zeros in exp_q
eps_zero = 1e-12
exp_q[exp_q==0] = eps_zero
# take log of exp_q to get q
q = np.log(exp_q)
# use only positive entries in sem
selector_positive_sem = sem[edge_list_M[:,0],edge_list_M[:,1]]>0
q_positve = q[selector_positive_sem]
M_postive = M[selector_positive_sem,:]
# make sure that M matrix is full-rank
# find the all-zero column
selector_non_zero_column_M_positve = np.sum(M_postive,axis=0)>0
# compute l
# r_cond = None is for using future default
# and silence warning
l_subset,_,_,_ = np.linalg.lstsq(
M_postive[:,selector_non_zero_column_M_positve],
q_positve,rcond=None)
l = np.zeros(M.shape[1])
l[:] = np.nan
l[selector_non_zero_column_M_positve] = l_subset
l[np.isnan(l)] = -100000
self.w = np.exp(l)
# get the w which is the mean parameter
# the following are in {0,1} encoding
self.class_balance = class_balance #[neg, pos]
if self.use_canonical:
# first empirical moments with {-1,1} encoding
fem = np.mean(IVs,axis=0)
# second empirical moments with {-1,1} encoding
sem = ((IVs.T) @ (IVs))/(n)
# augmented with z
fem_aug = np.append(fem,self.class_balance[1]-self.class_balance[0])
sem_aug = np.vstack([sem,self.w])
sem_aug = np.hstack([sem_aug,np.append(self.w,0).reshape(-1,1)])
mean_parameter = np.copy(sem_aug)
np.fill_diagonal(mean_parameter,fem_aug)
self.im, self.theta = mean_to_canonical(deps,mean_parameter,maxiter=100,alpha=0.2)
# update class balance
self.class_balance_im = self.im.infer.query([str(p)],show_progress=False).normalize(inplace=False).values
# use canonical parameters
self.im.infer.calibrate()
self.im.infer.get_cliques()
factor_dict = self.im.infer.get_clique_beliefs()
# create prob_dict
prob_dict = {}
for clique in factor_dict:
prob_index = np.array([numberToBase(x,3,len(clique)) for x in range(3**len(clique))])
prob = []
factor = factor_dict[clique].normalize(inplace=False)
for assignment in prob_index:
# see if assignment is the same dimension of factor
absence_selector = (assignment==1)
if np.sum(absence_selector) == 0:
prob.append(factor.values[tuple((assignment/2).astype(int))])
else:
# if not, marginalize:
margin_factor = factor.marginalize(np.array(factor.scope())[np.where(absence_selector)],inplace=False)
prob.append(margin_factor.values[tuple((assignment[~absence_selector]/2).astype(int))])
# compute condtional probability upon z
selector = (prob_index[:,factor.scope().index(str(p))]==0)
prob = np.array(prob)
prob[selector] = prob[selector]/class_balance[0]
selector = (prob_index[:,factor.scope().index(str(p))]==2)
prob[selector] = prob[selector]/class_balance[1]
prob_dict.update({clique:{"prob_index":np.copy(prob_index),"prob":np.log(prob)}})
self.prob_dict = prob_dict
def dependency_validity(self,IVs,lam=None,mu=None,**kwargs):
# take symmetric matrix as an argument as well
if (IVs.shape[0]==IVs.shape[1]) and np.all(IVs==IVs.T):
Sigma = IVs
else:
Sigma = np.cov(IVs.T)
# compute the inverse covariance matrix
pinv_Sigma = np.linalg.pinv(Sigma)
# set default value of lam and mu
lam_default = 1/np.sqrt(np.max(pinv_Sigma.shape))
mu_default = np.prod(pinv_Sigma.shape)/(4*np.sum(np.abs(pinv_Sigma)))
if lam == None:
lam = lam_default
else:
lam = lam * lam_default
if mu == None:
mu = mu_default
else:
mu = mu * mu_default
rpca = R_pca(pinv_Sigma,lmbda=lam,mu=mu)
L, S = rpca.fit(max_iter=10000, iter_print=10000)
# estimate accuracy
u, s, vh = np.linalg.svd(L)
l = -u[:,0]*np.sqrt(s[0])
score = (Sigma @ l)
return score, S, L
def predict_proba(self,IVs,is_ad_hoc=False):
# compute soft label
IVs = self._convert_iv(IVs)
# w = self.w
# conditional_accuracy = self.conditional_accuracy
n,p = IVs.shape
if self.use_canonical:
# use canonical parameters
pos_posterior = np.zeros(n)
neg_posterior = np.zeros(n)
for clique in self.prob_dict:
iv_index_in_clique = [x for x in range(len(clique)) if list(clique)[x]!= str(p)]
iv_index_in_IVs = np.array(list(clique))[iv_index_in_clique].astype(int)
assignment = np.zeros([n,len(clique)]).astype(int)
assignment[:,iv_index_in_clique] = IVs[:,iv_index_in_IVs]+1
pos_assignment = np.copy(assignment)
neg_assignment = np.copy(assignment)
pos_assignment[:,list(clique).index(str(p))] = 2
neg_assignment[:,list(clique).index(str(p))] = 0
del assignment
# find the index of the assignment
pos_assignment_single_index = multi_index_to_index(pos_assignment)
neg_assignment_single_index = multi_index_to_index(neg_assignment)
# update positve posterior
pos_posterior = pos_posterior + self.prob_dict[clique]["prob"][pos_assignment_single_index]
# update negative posterior
neg_posterior = neg_posterior + self.prob_dict[clique]["prob"][neg_assignment_single_index]
Zprob = sigmoid(-(neg_posterior+np.log(self.class_balance_im[0]))+(pos_posterior+np.log(self.class_balance_im[1])))
else:
raise NotImplementedError("set use_cannonical=True in training!")
return Zprob
def predict(self, IVs, b=0.5):
Zprob = self.predict_proba(IVs)
Z = np.where(Zprob > b, POSITIVE, NEGATIVE)
return Z
def _convert_iv(self,IVs):
IVs = IVs-1
IVs = IVs[:,self.valid_iv]
return IVs
def get_weights(self):
return self.w
def get_dependencies(self,S):
# threshod deps_mat
deps_mat = np.abs(np.copy(S))
np.fill_diagonal(deps_mat,0)
# compute thresh
thresh_list = np.unique(np.abs(deps_mat[np.triu_indices(deps_mat.shape[0],k=1)]))
outlier_index = outliers_iqr(thresh_list)[0]
if len(outlier_index)>0:
min_outlier = np.min(thresh_list[outlier_index])
thresh = np.max(thresh_list[thresh_list<min_outlier])
short_thresh_list = thresh_list[thresh_list>=thresh]
thresh = short_thresh_list[np.argmax(short_thresh_list[1:]/short_thresh_list[0:-1])+1]
# get the edges
deps = threshold_matrix(deps_mat,thresh)
else:
deps = []
return deps
def get_valid_iv_indx(self,score):
# get valid IVs
valid_thresh_list = np.sort(np.abs(score))
# exclue the first one
valid_thresh = valid_thresh_list[np.argmax(valid_thresh_list[2:]/valid_thresh_list[1:-1])+2]
valid_indx = np.where(np.abs(score)>=valid_thresh)[0]
return valid_indx
|
ivy-tutorial-master
|
methods/ivy.py
|
from __future__ import division, print_function
import numpy as np
try:
from pylab import plt
except ImportError:
print('Unable to import pylab. R_pca.plot_fit() will not work.')
try:
# Python 2: 'xrange' is the iterative version
range = xrange
except NameError:
# Python 3: 'range' is iterative - no need for 'xrange'
pass
class R_pca:
def __init__(self, D, mu=None, lmbda=None):
self.D = D
self.S = np.zeros(self.D.shape)
self.Y = np.zeros(self.D.shape)
if mu:
self.mu = mu
else:
self.mu = np.prod(self.D.shape) / (4 * self.frobenius_norm(self.D))
self.mu_inv = 1 / self.mu
if lmbda:
self.lmbda = lmbda
else:
self.lmbda = 1 / np.sqrt(np.max(self.D.shape))
@staticmethod
def frobenius_norm(M):
return np.linalg.norm(M, ord='fro')
@staticmethod
def shrink(M, tau):
return np.sign(M) * np.maximum((np.abs(M) - tau), np.zeros(M.shape))
def svd_threshold(self, M, tau):
U, S, V = np.linalg.svd(M, full_matrices=False)
return np.dot(U, np.dot(np.diag(self.shrink(S, tau)), V))
def fit(self, tol=None, max_iter=1000, iter_print=100):
iter = 0
err = np.Inf
Sk = self.S
Yk = self.Y
Lk = np.zeros(self.D.shape)
if tol:
_tol = tol
else:
_tol = 1E-7 * self.frobenius_norm(self.D)
while (err > _tol) and iter < max_iter:
Lk = self.svd_threshold(
self.D - Sk + self.mu_inv * Yk, self.mu_inv)
Sk = self.shrink(
self.D - Lk + (self.mu_inv * Yk), self.mu_inv * self.lmbda)
Yk = Yk + self.mu * (self.D - Lk - Sk)
err = self.frobenius_norm(self.D - Lk - Sk)
iter += 1
if (iter % iter_print) == 0 or iter == 1 or iter > max_iter or err <= _tol:
print('iteration: {0}, error: {1}'.format(iter, err))
self.L = Lk
self.S = Sk
return Lk, Sk
def plot_fit(self, size=None, tol=0.1, axis_on=True):
n, d = self.D.shape
if size:
nrows, ncols = size
else:
sq = np.ceil(np.sqrt(n))
nrows = int(sq)
ncols = int(sq)
ymin = np.nanmin(self.D)
ymax = np.nanmax(self.D)
print('ymin: {0}, ymax: {1}'.format(ymin, ymax))
numplots = np.min([n, nrows * ncols])
plt.figure()
for n in range(numplots):
plt.subplot(nrows, ncols, n + 1)
plt.ylim((ymin - tol, ymax + tol))
plt.plot(self.L[n, :] + self.S[n, :], 'r')
plt.plot(self.L[n, :], 'b')
if not axis_on:
plt.axis('off')
|
ivy-tutorial-master
|
utils/r_pca.py
|
import numpy as np
from .constant import NEGATIVE, POSITIVE
import pandas as pd
from collections import Counter
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.inference import BeliefPropagation
import itertools as it
import random
from statsmodels import robust
class IsingModel:
def __init__(self, theta, seed=None):
if seed is not None:
np.random.seed(seed)
# graph
self.G = MarkovModel()
for _, row in theta.iterrows():
# unary
if row["j"]==row["k"]:
self.G.add_node(str(int(row["j"])))
theta_jj = row["value"]
self.G.add_factors(DiscreteFactor([str(int(row["j"]))], [2],
np.exp([-theta_jj,theta_jj])))
# pairwise
elif row["value"]!=0:
self.G.add_edge(str(int(row["j"])), str(int(row["k"])))
theta_jk = row["value"]
self.G.add_factors(DiscreteFactor([str(int(row["j"])), str(int(row["k"]))],
[2, 2], np.exp([theta_jk, -theta_jk, -theta_jk, theta_jk])))
self.G.check_model()
self.infer = BeliefPropagation(self.G)
self.infer.calibrate()
def get_moments(self):
p = len(list(self.G.nodes))
factor_dict = self.infer.get_clique_beliefs()
mom_matrix = np.zeros([p,p])
for clique in factor_dict:
for pair in it.combinations(clique,2):
moment = factor_dict[clique].marginalize(set(clique).difference(set(pair)),inplace=False)
moment = moment.normalize(inplace=False)
pair_int = [int(x) for x in pair]
moment = moment.values[0,0]+moment.values[1,1]-moment.values[0,1]-moment.values[1,0]
mom_matrix[pair_int[0],pair_int[1]] = moment
mom_matrix[pair_int[1],pair_int[0]] = moment
for unary in it.combinations(clique,1):
unary_int = [int(x) for x in unary][0]
moment = factor_dict[clique].marginalize(set(clique).difference(set(unary)),inplace=False).normalize(inplace=False).values
moment = moment[1]-moment[0]
mom_matrix[unary_int,unary_int] = moment
return mom_matrix
def mean_to_canonical(deps, mean_parameter, theta=None, alpha=0.1, maxiter=20, accelerated=True, verbose=False):
# initialization
p = mean_parameter.shape[0]
if theta is None:
# initialize theta
theta = pd.concat([pd.DataFrame([[x,x,np.random.rand(1)[0]] for x in range(p)],columns=["j","k","value"]),
pd.DataFrame([[int(x[0]),int(x[1]),np.random.rand(1)[0]] for x in deps],columns=["j","k","value"]),
pd.DataFrame([[x,p-1,np.random.rand(1)[0]] for x in range(p-1)],columns=["j","k","value"])],axis=0)
theta = theta.reset_index(drop=True)
theta["j"] = theta["j"].astype(int)
theta["k"] = theta["k"].astype(int)
deps_aug = [(str(int(row["j"])),str(int(row["k"]))) for _, row in theta.iterrows()]
# error threshold
error_thresh = 1e-4
# current iteration
itr = 1
# initial error
error = 1000
# accelerated gradient descent
theta_list = []
while True:
# intialize an Ising model
im = IsingModel(theta)
# gradient
moment = im.get_moments()
grad = moment[(theta["j"],theta["k"])] - mean_parameter[(theta["j"],theta["k"])]
# error
error = np.linalg.norm(grad)
# update theta
theta["value"] = theta["value"].values - alpha * grad
theta_list.append(theta)
# keep only the latest two elements
if len(theta_list)>2:
del theta_list[0]
# update theta
if accelerated:
theta["value"] = theta_list[1]["value"] + (itr-2)/(itr-1) * (theta_list[1]["value"] - theta_list[0]["value"])
if itr % 50 == 0 and verbose:
print("Error:", error)
itr = itr+1
# satisfy stopping criteria?
if error < error_thresh or itr == maxiter:
break
return im, theta
def numberToBase(n, b, p):
if n == 0:
return [0]*p
digits = []
while n:
digits.append(int(n % b))
n //= b
digits = digits[::-1]
full = ([0]*(p-len(digits)))
full.extend(digits)
return full
def multi_index_to_index(multi_index):
if len(multi_index.shape)>1:
p = multi_index.shape[1]
else:
p = len(multi_index)
base_3 = np.flip(np.array([3**x for x in range(p)])).reshape(-1,1)
return (multi_index @ base_3).T[0]
def factor2Df(result):
key = [x[0] for x in result.assignment([0])[0]]
key.append("prob")
result_df = pd.DataFrame()
for i in range(np.prod(result.cardinality)):
assignment = tuple(x[1] for x in result.assignment([i])[0])
row = list(assignment)
row.append(result.values[assignment])
result_df = result_df.append([row])
result_df.columns=key
result_df=result_df.reset_index(drop=True)
return result_df
def sampling(world,selector,n,random_state=None):
# initialization
if random_state is None:
rng = np.random.RandomState()
else:
rng = random_state
world = world[selector]
prob = world["prob"]/np.sum(world["prob"])
index_sample = rng.choice(a=world.index,size=n,replace=True,p=prob).tolist()
samp = world.loc[index_sample].drop(columns=['prob'])
samp = samp.reset_index(drop=True)
return samp
def sigmoid(t):
return 1/(1+np.exp(-t))
def outliers_iqr(ys):
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
return np.where((ys > upper_bound) | (ys < lower_bound))
def threshold_matrix(J, thresh=0.2):
deps = []
for i in range(J.shape[0]):
for j in range(J.shape[1]):
if (abs(J[i,j]) >= thresh) and (i<j):
deps.append((str(i),str(j)))
return deps
def get_f1(predicted_list,true_list):
fp = len(set(predicted_list).difference(true_list))
fn = len(set(true_list).difference(predicted_list))
tp = len(set(predicted_list).intersection(true_list))
pos = len(true_list)
recall = tp/pos
precision = tp/(tp+fp)
f1 = 2*(precision*recall)/(precision+recall)
return f1
def summary(stats, alpha=0.05):
ci_lower_stats = np.quantile(stats,alpha/2,axis=0)
ci_upper_stats = np.quantile(stats,(1-alpha/2),axis=0)
median_stats = np.median(stats,axis=0)
return ci_lower_stats, ci_upper_stats, median_stats
|
ivy-tutorial-master
|
utils/utils.py
|
POSITIVE = 1
NEGATIVE = -1
|
ivy-tutorial-master
|
utils/constant.py
|
import numpy as np
import networkx as nx
import itertools as it
import pandas as pd
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import DiscreteFactor
import pylab as plt
from utils.utils import IsingModel, factor2Df, sampling
from collections import Counter
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination
from pgmpy.factors.discrete import TabularCPD
class DataSimulator:
def __init__(self,p,cond_indp=False,d=3,
include_unary_factor=False,seed=None,
theta_jj=None,theta_jk=None,theta_jz=None,deps=[],
causal_effect=False, q=0, valid_z=True):
# iv model
self.iv_mdl = IsingDataSimulator(
p,cond_indp,d,include_unary_factor,seed=seed)
# causal model
self.causal_mdl = BayesNetDataSimulator(
causal_effect=causal_effect,valid_z=valid_z,q=q)
self.p = p
self.q = q
def _get_samples(self,n,seed=None):
# sample from iv_mdl
samp_iv = self.iv_mdl.get_samples(n)
# sample from causal_mdl
samp_causal = self.causal_mdl.get_samples(samp_iv)
# all samples
samp_all = pd.concat([samp_iv,samp_causal],axis=1)
return samp_all
def get_samples(self,n,seed=None):
samp_all = self._get_samples(n,seed=seed)
p = self.p
q = self.q
# ground truth iv that we seeks to synthesized
z = samp_all["z"].values
# prepare IV candidates valid + invalid
IVs = samp_all[["iv"+str(x+1) for x in range(p)]+[
"invalid_iv"+str(x+1) for x in range(q)]].values*2
# preapre risk factor
x = samp_all["risk"].values*2-1
# preapre outcome
y = samp_all["outcome"].values*2-1
return z, IVs, x, y
def estimate_causal_effect_from_z(slef,X):
n = X.shape[0]
Z1 = X[X.z==1].shape[0]
Z0 = X[X.z==0].shape[0]
Y1Z1 = X[(X.z==1)&(X.outcome==1)].shape[0]
Y1Z0 = X[(X.z==0)&(X.outcome==1)].shape[0]
X1Z1 = X[(X.z==1)&(X.risk==1)].shape[0]
X1Z0 = X[(X.z==0)&(X.risk==1)].shape[0]
alpha = (np.log(Y1Z1/Z1)-np.log(Y1Z0/Z0))/(np.log(X1Z1/Z1)-np.log(X1Z0/Z0))
return alpha
class IsingDataSimulator:
def __init__(self,p,cond_indp=False,d=3,
include_unary_factor=False,seed=None,
theta_jj=None,theta_jk=None,theta_jz=None,deps=[]):
'''
the factors are desinged to follow {-1,1} Ising models
parameters:
theta_jj: prevalance of LFs and z
theta_jk: dependency between LFs
theta_jz: conditional dependency between LFs and z
'''
# initialization
rng = np.random.RandomState(seed)
rng_dep = np.random.RandomState(seed)
# nodes
nodelist = [str(x) for x in range(p)]
# add dependency among IVs
if cond_indp is False:
# create deps if not specified, otherwise just use the given deps
if len(deps)==0:
nodes = range(p)
# assign each node to cliques of random sizes
while len(nodes) > 0:
bag_size = np.minimum(rng_dep.randint(1, d), len(nodes))
nodes = list(rng_dep.permutation(nodes))
cl_nodes = np.sort(nodes[0:bag_size])
for pair in it.combinations(cl_nodes, 2):
deps.append((str(pair[0]), str(pair[1])) )
del nodes[0:bag_size]
else:
deps=[]
# define parameters
if theta_jj is None:
# unary potentials
theta_jj = rng.rand(p+1)/4*rng.choice([-1,1],replace=True,size=p+1) if include_unary_factor else np.zeros(p+1)
if theta_jk is None:
# dependency on the true label
# some (minority) LFs conflict with others
theta_jk = (rng.rand(len(deps))/2+0.5)*rng.choice([-1,1],replace=True,size=len(deps),p=[0.2,0.8])
# the disagreements should not overwhelme the agreements
theta_jk[theta_jk<0] = theta_jk[theta_jk<0]*0.5
elif len(theta_jk)==1:
theta_jk = np.array(theta_jk*len(deps))
else:
# error
pass
if theta_jz is None:
# LFs need to be positively correlated with the ground truth label
theta_jz = rng.rand(p)/8+0.25
elif len(theta_jz)==1:
theta_jz = np.array(theta_jz*p)
# # unary potentail
# theta_jj = 0.05 if include_unary_factor else 0
# theta_jk = 1 # 1/2
# theta_jz = 1 # 1/4
# create theta
theta = pd.concat([pd.DataFrame([[x,x,theta_jj[x]] for x in range(p+1)],columns=["j","k","value"]),
pd.DataFrame([[int(x[0]),int(x[1]),theta_jk[indx]] for indx, x in enumerate(deps)],columns=["j","k","value"]),
pd.DataFrame([[x,p,theta_jz[x]] for x in range(p)],columns=["j","k","value"])],axis=0)
theta = theta.reset_index(drop=True)
theta["j"] = theta["j"].astype(int)
theta["k"] = theta["k"].astype(int)
# ising model
self.im = IsingModel(theta)
self.theta = theta
self.dependency = deps
# collect statistics
self._collect_statistics()
def _collect_statistics(self):
nodelist = list(self.im.G.nodes())
p = len(nodelist)
# The second moment matrix
# {-1,1} encoding
mom_matrix = np.zeros([p, p])
for pair in it.combinations(nodelist, 2):
moment = self.im.infer.query(list(pair),show_progress=False).normalize(inplace=False)
moment = moment.values[0,0]+moment.values[1,1]-moment.values[0,1]-moment.values[1,0]
mom_matrix[int(pair[0]), int(pair[1])] = moment
mom_matrix[int(pair[1]), int(pair[0])] = moment
np.fill_diagonal(mom_matrix,1)
# The mean parameters
# {-1,1} encoding
mean_vec = np.zeros(p)
for unary in it.combinations(nodelist,1):
moment = self.im.infer.query(list(unary),show_progress=False).normalize(inplace=False)
moment = moment.values[1]-moment.values[0]
unary_int = [int(x) for x in unary][0]
mean_vec[unary_int] = moment
# class balance
class_balance = [1-(mean_vec[-1]+1)/2,(mean_vec[-1]+1)/2]
# covariance matrix
cov_matrix = mom_matrix - np.outer(mean_vec, mean_vec)
# inverse covariance matirx
inv_cov_matrix = np.linalg.inv(cov_matrix)
# update statistics
self.mom_matrix = mom_matrix
self.mean_vec = mean_vec
self.class_balance = class_balance
self.cov_matrix = cov_matrix
self.inv_cov_matrix = inv_cov_matrix
# propagarte accuracy parameters
self._get_accuracy()
def _get_accuracy(self):
nodelist = list(self.im.G.nodes())
# accuracy for both classes
self.accuracy = []
for node in nodelist[:-1]:
result = self.im.infer.query([node, nodelist[-1]],show_progress=False)
result.normalize()
self.accuracy.append(result.values[(0,0)]+result.values[(1,1)])
self.accuracy = np.array(self.accuracy)
# conditional accuracy
self.conditional_accuracy = []
for node in nodelist[:-1]:
result = self.im.infer.query([node,nodelist[-1]],show_progress=False)
result.divide(self.im.infer.query([nodelist[-1]],show_progress=False))
self.conditional_accuracy.append(
[result.values[0,0],result.values[1,1]])
self.conditional_accuracy = np.array(self.conditional_accuracy)
def draw_graph(self):
# node name
nodelist = list(self.im.G.nodes())
# position
cliques = [x for x in nx.find_cliques(self.im.G)]
cliques.sort(key=len)
clique_order = [y for x in cliques for y in x if y is not nodelist[-1]]
pos = nx.shell_layout(self.im.G, nlist=[[nodelist[-1]],clique_order])
# label_dict
label_value = ["iv"+str(int(x)+1) for x in nodelist]
label_value[-1] = "z"
label_dict = dict(zip(nodelist,label_value))
# draw
nx.draw(self.im.G, with_labels=True,
node_color='#A0CBE2',node_size=800,alpha=0.95,
pos = pos, labels=label_dict)
# return
return plt.show()
def get_samples(self,n,seed=None):
rng = np.random.RandomState(seed)
nodelist = list(self.im.G.nodes())
# here p represent the number of IVs (LFs)
p = len(nodelist)-1
# samples
z_prob_pos = self.class_balance[1]
z = rng.choice(2,n,replace=True,p=[1-z_prob_pos,z_prob_pos])
factor_dict = self.im.infer.get_clique_beliefs()
samp_pos = None
samp_neg = None
for clique in factor_dict:
factor_df = factor2Df(factor_dict[clique].normalize(inplace=False))
# z=1
if samp_pos is None:
samp_pos = sampling(factor_df,factor_df[str(p)]==1,n=Counter(z)[1],random_state=rng)
else:
samp_factor = sampling(factor_df,factor_df[str(p)]==1,n=Counter(z)[1],random_state=rng)
samp_pos = pd.concat([samp_pos,samp_factor.drop(str(p),axis=1)],axis=1)
# z=-1
if samp_neg is None:
samp_neg = sampling(factor_df,factor_df[str(p)]==0,n=Counter(z)[0],random_state=rng)
else:
samp_factor = sampling(factor_df,factor_df[str(p)]==0,n=Counter(z)[0],random_state=rng)
samp_neg = pd.concat([samp_neg,samp_factor.drop(str(p),axis=1)],axis=1)
samp = pd.concat([samp_neg,samp_pos],axis=0)
samp = samp[[str(x) for x in range(p+1)]]
samp = samp.rename(columns={str(p):"z"})
[samp.rename(columns={str(x):"iv"+str(x+1)},inplace=True) for x in list(reversed(range(p)))]
samp.reset_index(inplace=True,drop=True)
return samp
class BayesNetDataSimulator:
def __init__(self, causal_effect=False, q=0, valid_z=True):
# numer of invalid (confounded) IVs:
self.q = q
# define cpds
cpd_z = TabularCPD(variable='z', variable_card=2,
values=[[0.5], [0.5]])
if valid_z:
cpd_confounder = TabularCPD(variable='confounder', variable_card=2,
values=[[0.3], [0.7]])
else:
mutate_prob = 0.55
# here the confounder is a snp that change as z change
cpd_confounder = TabularCPD(variable='confounder', variable_card=2,
values = [[mutate_prob,1-mutate_prob],
[1-mutate_prob,mutate_prob]],
evidence=["z"],evidence_card=[2])
risk_prob = 0.8
cpd_risk = TabularCPD(variable='risk', variable_card=2,
values=[1-risk_prob*np.array([0.1, 0.9, 0.5, 1]),
risk_prob*np.array([0.1, 0.9, 0.5, 1])],
evidence=['z', 'confounder'],
evidence_card=[2, 2])
# is there true causal effect between the risk factor and the outcome?
outcome_prob = 0.7 if valid_z else 0.55
if causal_effect:
# true causal effect
cpd_outcome = TabularCPD(variable="outcome", variable_card=2,
values = [1-outcome_prob*np.array([0.4,0.6,0.8,1]),
outcome_prob*np.array([0.4,0.6,0.8,1])],
evidence=["confounder","risk"],evidence_card=[2,2])
else:
# null causal effect
cpd_outcome = TabularCPD(variable="outcome", variable_card=2,
values = [[outcome_prob,1-outcome_prob],
[1-outcome_prob,outcome_prob]],
evidence=["confounder"],evidence_card=[2])
# default cpd_list and edge_list
cpd_list = [cpd_z, cpd_confounder, cpd_risk, cpd_outcome]
edge_list = [('z', 'risk'), ('confounder', 'risk'),('confounder', 'outcome')]
# update edge_list depending on whether there is causal effect
if causal_effect:
edge_list.append(('risk','outcome'))
# update edge_list depending on valid_z
if valid_z is False:
edge_list.append(('z','confounder'))
# update cpd_list and edge_list depending on whether there is invalid IVs
# q is the number of invalid IVs that are associated with the confounder
accuracy_invalid_iv = 0.55
for i in range(self.q):
cpd_invalid_iv = TabularCPD(variable="invalid_iv"+str(i+1), variable_card=2,
values = [[accuracy_invalid_iv,1-accuracy_invalid_iv],
[1-accuracy_invalid_iv,accuracy_invalid_iv]],
evidence=["confounder"],evidence_card=[2])
cpd_list.append(cpd_invalid_iv)
edge_list.append(('confounder',"invalid_iv"+str(i+1)))
# initialize the bayes net
# Starting with defining the network structure
self.bn = BayesianModel(edge_list)
# Associating the parameters with the model structure.
self.bn.add_cpds(*cpd_list)
# Checking if the cpds are valid for the model.
self.bn.check_model()
# world
infer = VariableElimination(self.bn)
world = infer.query(["confounder","z","risk","outcome"],show_progress=False)
world = factor2Df(world)
self.world = world
self.infer = infer
def get_samples(self,iv_samp,seed=None):
# initialization
rng = np.random.RandomState(seed)
world = self.world
# sample from the bayes net
bn_samp = pd.concat([
sampling(world,world["z"]==1,n=Counter(
iv_samp["z"])[1],random_state=rng).set_index(
iv_samp[iv_samp["z"]==1].index),
sampling(world,world["z"]==0,n=Counter(
iv_samp["z"])[0],random_state=rng).set_index(
iv_samp[iv_samp["z"]==0].index)],axis=0)
bn_samp = bn_samp.sort_index()
del bn_samp["z"]
# generate invalid snps samples:
for i in range(self.q):
world = factor2Df(self.infer.query(["confounder","invalid_iv"+str(i+1)],show_progress=False))
# sample from the bayes net
invalid_samp = pd.concat([
sampling(world,world["confounder"]==1,n=Counter(
bn_samp["confounder"])[1],random_state=rng).set_index(
bn_samp[bn_samp["confounder"]==1].index),
sampling(world,world["confounder"]==0,n=Counter(
bn_samp["confounder"])[0],random_state=rng).set_index(
bn_samp[bn_samp["confounder"]==0].index)],axis=0)
invalid_samp = invalid_samp.sort_index()
del invalid_samp["confounder"]
bn_samp = pd.concat([bn_samp,invalid_samp],axis=1)
return bn_samp
def get_true_casual_effect(self):
world = self.world
Z1 = world[(world.z==1)][['prob']].sum()
Z0 = world[(world.z==0)][['prob']].sum()
Y1Z1 = world[(world.z==1)&(world.outcome==1)][['prob']].sum()
Y1Z0 = world[(world.z==0)&(world.outcome==1)][['prob']].sum()
X1Z1 = world[(world.z==1)&(world.risk==1)][['prob']].sum()
X1Z0 = world[(world.z==0)&(world.risk==1)][['prob']].sum()
alpha = (np.log(Y1Z1/Z1)-np.log(Y1Z0/Z0))/(np.log(X1Z1/Z1)-np.log(X1Z0/Z0))
alpha = alpha.values[0]
return alpha
|
ivy-tutorial-master
|
utils/data_simulator.py
|
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from utils.constant import NEGATIVE, POSITIVE
def ProbWaldEstimator(X, Y, Zprob, mode="bxby", **kwargs):
if set(Zprob)=={-1,1}:
Zprob = (Zprob+1)/2
sample_weight_ZX = np.array([[sum(Zprob[X==x]), sum(1-Zprob[X==x])] for x in [-1,1]]).reshape(-1)
sample_weight_ZY = np.array([[sum(Zprob[Y==x]), sum(1-Zprob[Y==x])] for x in [-1,1]]).reshape(-1)
Z_unique = np.array([1,-1,1,-1]).reshape(-1, 1)
X_unique = np.array([-1,-1,1,1])
Y_unique = np.array([-1,-1,1,1])
if "bx" in mode.lower():
clf1 = LogisticRegression(
random_state=0, solver="lbfgs", tol=1e-10, max_iter=10000, C=1e16
)
else:
clf1 = LinearRegression()
clf1.fit(Z_unique, X_unique, sample_weight=sample_weight_ZX)
if "by" in mode.lower():
clf2 = LogisticRegression(
random_state=0, solver="lbfgs", tol=1e-10, max_iter=10000, C=1e16
)
else:
clf2 = LinearRegression()
clf2.fit(Z_unique, Y_unique, sample_weight=sample_weight_ZY)
coef1 = clf1.coef_[0][0] if "bx" in mode.lower() else clf1.coef_[0]
coef2 = clf2.coef_[0][0] if "by" in mode.lower() else clf2.coef_[0]
if len(set(Zprob))==2:
return coef2
else:
return coef2 / coef1
|
ivy-tutorial-master
|
estimators/prob_wald_estimator.py
|
from sklearn.linear_model import LinearRegression, LogisticRegression
import numpy as np
from sklearn.metrics import r2_score
def WaldEstimator(X, Y, Z, mode="bxby",return_predictive_score=False):
# # adjust the probability range of Z to real
# # if Z is a probability not binary label
# # this will avoid adjusting extreme UAS
# if len(set(Z))>2 and max(Z)<1 and min(Z)>0:
# Z = np.log(Z)-np.log(1-Z)
if "bx" in mode.lower():
clf1 = LogisticRegression(
random_state=0, max_iter=10000, C=1e16, solver="lbfgs"
)
else:
clf1 = LinearRegression()
Z = Z.reshape(-1, 1)
clf1.fit(Z, X)
if "by" in mode.lower():
clf2 = LogisticRegression(
random_state=0, solver="lbfgs", tol=1e-10, max_iter=10000, C=1e16
)
else:
clf2 = LinearRegression()
clf2.fit(Z, Y)
coef1 = clf1.coef_[0][0] if "bx" in mode.lower() else clf1.coef_[0]
coef2 = clf2.coef_[0][0] if "by" in mode.lower() else clf2.coef_[0]
if len(set(X))>2:
if return_predictive_score is True:
# conitnuous
predictive_score = r2_score(X,clf1.predict(Z))
return [coef2/coef1, predictive_score]
else:
return coef2/coef1
elif np.all(X==Z.reshape(-1)):
# observational association
return coef2
else:
return coef2/coef1
import statsmodels.api as sm
def WaldEstimatorStats(X, Y, Z, mode="bxby", detail=False):
# add intercept to the first column of Z
Z = sm.add_constant(Z)
# change the encoding of Y
Y = (Y+1)/2
# change the encoding of X if it is binary:
if(len(set(X))==2):
X = (X+1)/2
if "bx" in mode.lower():
clf1 = sm.Logit(X,Z)
else:
clf1 = sm.OLS(X,Z)
clf1_results = clf1.fit(disp=0)
if "by" in mode.lower():
clf2 = sm.Logit(Y,Z)
else:
clf2 = sm.OLS(Y,Z)
clf2_results = clf2.fit(disp=0)
# coeffiicents
coef1 = clf1_results.params[1]
coef2 = clf2_results.params[1]
# standard error
se1 = clf1_results.bse[1]
se2 = clf2_results.bse[1]
if detail:
return {
"beta_x_z": coef1,
"beta_y_z": coef2,
"se_x_z": se1,
"se_y_z": se2
}
if np.all(X==Z.reshape(-1)):
# observational association
return coef2
else:
# wald ratio
return coef2/coef1
|
ivy-tutorial-master
|
estimators/wald_estimator.py
|
import multiprocessing
import numpy as np
from joblib import Parallel, delayed
from sklearn import preprocessing
from tqdm import tqdm
from methods.ivy import Ivy
def ComputeCausalitySingle(
X,
Y,
IVs,
IV_Model_list,
estimator_list,
is_soft_label=True,
ablation_train=1,
ablation_test=1,
Z_true=None,
deps = [],
random_seed = None,
use_canonical=True,
use_forward=True,
return_predictive_score=False):
if random_seed is not None:
np.random.seed(random_seed)
# train test split
train_index, test_index = SplitDataset(Y, ablation_train, ablation_test)
# X trian and test split need to be seperately specified
X_train = X[train_index]
X_test = X[test_index]
IVs_train = IVs[train_index,:]
IVs_test = IVs[test_index,:]
# determine if X is binary
if(len(set(X))>2):
# continuous, scale X
scaler = preprocessing.StandardScaler().fit(X_train.reshape(-1, 1))
X_train = scaler.transform(X_train.reshape(-1, 1)).reshape(-1)
X_test = scaler.transform(X_test.reshape(-1, 1)).reshape(-1)
mode = "cxby"
else:
# binary
mode = "bxby"
causality_list = []
for estimator in estimator_list:
for IV_Model in IV_Model_list:
# train the IV_model
iv_model = IV_Model()
iv_model.train(IVs_train,X_train,deps=deps,
lr=1e-4,n_epochs=10000,log_train_every=20000,
verbose=False,use_canonical=use_canonical,
use_forward=use_forward)
# compute synthesized IV on IVs_test
if is_soft_label is True:
# for Ivy, when X is continuous, use ad-hoc score for synthesis
if (IV_Model.__name__ in ["Ivy"]) and (mode is "cxby"):
Z_test = iv_model.predict_proba(IVs_test,is_ad_hoc=True)
elif IV_Model.__name__ is "ObservationalAssociation":
Z_test = iv_model.predict_proba(IVs_test,X_test)
# otherwise just use regular probability
else:
Z_test = iv_model.predict_proba(IVs_test)
elif is_soft_label is False:
Z_test = iv_model.predict(IVs_test)
# compute Wald estimator
causality = estimator(
X_test,
Y[test_index],
Z_test,
mode=mode,
return_predictive_score=return_predictive_score)
# update causality_list
if isinstance(causality,list):
causality_list.extend(causality)
else:
causality_list.append(causality)
if Z_true is not None:
causality = estimator(X_test, Y[test_index], Z_true[test_index], mode=mode)
# update causality_list
if(len(set(X))>2):
causality_list.extend(causality)
else:
causality_list.append(causality)
return causality_list
def ComputeCausality(
X,
Y,
IVs,
IV_Model_list,
estimator_list,
is_soft_label=True,
ablation_train=1,
ablation_test=1,
Z_true=None,
deps=[],
n_trial=100,
num_cores=None,
random_seed=None,
use_canonical=True,
use_forward=True,
return_predictive_score=False):
if num_cores is None:
num_cores = multiprocessing.cpu_count()
if random_seed is None:
random_seed = [None]*n_trial
# preprocessing
if "Ivy" in [x.__name__ for x in IV_Model_list]:
pass
result = Parallel(n_jobs=num_cores)(
delayed(ComputeCausalitySingle)(
X,
Y,
IVs,
IV_Model_list,
estimator_list,
is_soft_label = is_soft_label,
ablation_train = ablation_train,
ablation_test = ablation_test,
Z_true = Z_true,
random_seed = random_seed[i],
deps = deps,
use_canonical = use_canonical,
use_forward=use_forward,
return_predictive_score=return_predictive_score)
for i in tqdm(range(n_trial)))
return result
def SplitDataset(Y, ablation_train=1, ablation_test=1):
n = Y.shape[0]
while True:
# split train and test in halves
train_index = np.random.choice(n,n//2,replace=False)
test_index = np.setdiff1d(range(n),train_index)
# ablation in training set
train_index = np.random.choice(train_index,round(n/2*ablation_train)-1,replace=False)
# ablation in test set
test_index = np.random.choice(test_index,round(n/2*ablation_test)-1,replace=False)
# make sure that the split includes both calsses
if ( len(set(Y[train_index]))>1 and len(set(Y[test_index]))>1):
break
return np.sort(train_index), np.sort(test_index)
|
ivy-tutorial-master
|
estimators/compute_causality.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import csv
import logging
import pickle
import numpy as np
import torch
import transformers
import src.slurm
import src.contriever
import src.utils
import src.data
import src.normalize_text
def embed_passages(args, passages, model, tokenizer):
total = 0
allids, allembeddings = [], []
batch_ids, batch_text = [], []
with torch.no_grad():
for k, p in enumerate(passages):
batch_ids.append(p["id"])
if args.no_title or not "title" in p:
text = p["text"]
else:
text = p["title"] + " " + p["text"]
if args.lowercase:
text = text.lower()
if args.normalize_text:
text = src.normalize_text.normalize(text)
batch_text.append(text)
if len(batch_text) == args.per_gpu_batch_size or k == len(passages) - 1:
encoded_batch = tokenizer.batch_encode_plus(
batch_text,
return_tensors="pt",
max_length=args.passage_maxlength,
padding=True,
truncation=True,
)
encoded_batch = {k: v.cuda() for k, v in encoded_batch.items()}
embeddings = model(**encoded_batch)
embeddings = embeddings.cpu()
total += len(batch_ids)
allids.extend(batch_ids)
allembeddings.append(embeddings)
batch_text = []
batch_ids = []
if k % 100000 == 0 and k > 0:
print(f"Encoded passages {total}")
allembeddings = torch.cat(allembeddings, dim=0).numpy()
return allids, allembeddings
def main(args):
model, tokenizer, _ = src.contriever.load_retriever(args.model_name_or_path)
print(f"Model loaded from {args.model_name_or_path}.", flush=True)
model.eval()
model = model.cuda()
if not args.no_fp16:
model = model.half()
passages = src.data.load_passages(args.passages)
shard_size = len(passages) // args.num_shards
start_idx = args.shard_id * shard_size
end_idx = start_idx + shard_size
if args.shard_id == args.num_shards - 1:
end_idx = len(passages)
passages = passages[start_idx:end_idx]
print(f"Embedding generation for {len(passages)} passages from idx {start_idx} to {end_idx}.")
allids, allembeddings = embed_passages(args, passages, model, tokenizer)
save_file = os.path.join(args.output_dir, args.prefix + f"_{args.shard_id:02d}")
os.makedirs(args.output_dir, exist_ok=True)
print(f"Saving {len(allids)} passage embeddings to {save_file}.")
with open(save_file, mode="wb") as f:
pickle.dump((allids, allembeddings), f)
print(f"Total passages processed {len(allids)}. Written to {save_file}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--passages", type=str, default=None, help="Path to passages (.tsv file)")
parser.add_argument("--output_dir", type=str, default="wikipedia_embeddings", help="dir path to save embeddings")
parser.add_argument("--prefix", type=str, default="passages", help="prefix path to save embeddings")
parser.add_argument("--shard_id", type=int, default=0, help="Id of the current shard")
parser.add_argument("--num_shards", type=int, default=1, help="Total number of shards")
parser.add_argument(
"--per_gpu_batch_size", type=int, default=512, help="Batch size for the passage encoder forward pass"
)
parser.add_argument("--passage_maxlength", type=int, default=512, help="Maximum number of tokens in a passage")
parser.add_argument(
"--model_name_or_path", type=str, help="path to directory containing model weights and config file"
)
parser.add_argument("--no_fp16", action="store_true", help="inference in fp32")
parser.add_argument("--no_title", action="store_true", help="title not added to the passage body")
parser.add_argument("--lowercase", action="store_true", help="lowercase text before encoding")
parser.add_argument("--normalize_text", action="store_true", help="lowercase text before encoding")
args = parser.parse_args()
src.slurm.init_distributed_mode(args)
main(args)
|
contriever-main
|
generate_passage_embeddings.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import argparse
import torch
import logging
import json
import numpy as np
import os
import src.slurm
import src.contriever
import src.beir_utils
import src.utils
import src.dist_utils
import src.contriever
logger = logging.getLogger(__name__)
def main(args):
src.slurm.init_distributed_mode(args)
src.slurm.init_signal_handler()
os.makedirs(args.output_dir, exist_ok=True)
logger = src.utils.init_logger(args)
model, tokenizer, _ = src.contriever.load_retriever(args.model_name_or_path)
model = model.cuda()
model.eval()
query_encoder = model
doc_encoder = model
logger.info("Start indexing")
metrics = src.beir_utils.evaluate_model(
query_encoder=query_encoder,
doc_encoder=doc_encoder,
tokenizer=tokenizer,
dataset=args.dataset,
batch_size=args.per_gpu_batch_size,
norm_query=args.norm_query,
norm_doc=args.norm_doc,
is_main=src.dist_utils.is_main(),
split="dev" if args.dataset == "msmarco" else "test",
score_function=args.score_function,
beir_dir=args.beir_dir,
save_results_path=args.save_results_path,
lower_case=args.lower_case,
normalize_text=args.normalize_text,
)
if src.dist_utils.is_main():
for key, value in metrics.items():
logger.info(f"{args.dataset} : {key}: {value:.1f}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", type=str, help="Evaluation dataset from the BEIR benchmark")
parser.add_argument("--beir_dir", type=str, default="./", help="Directory to save and load beir datasets")
parser.add_argument("--text_maxlength", type=int, default=512, help="Maximum text length")
parser.add_argument("--per_gpu_batch_size", default=128, type=int, help="Batch size per GPU/CPU for indexing.")
parser.add_argument("--output_dir", type=str, default="./my_experiment", help="Output directory")
parser.add_argument("--model_name_or_path", type=str, help="Model name or path")
parser.add_argument(
"--score_function", type=str, default="dot", help="Metric used to compute similarity between two embeddings"
)
parser.add_argument("--norm_query", action="store_true", help="Normalize query representation")
parser.add_argument("--norm_doc", action="store_true", help="Normalize document representation")
parser.add_argument("--lower_case", action="store_true", help="lowercase query and document text")
parser.add_argument(
"--normalize_text", action="store_true", help="Apply function to normalize some common characters"
)
parser.add_argument("--save_results_path", type=str, default=None, help="Path to save result object")
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--main_port", type=int, default=-1, help="Main port (for multi-node SLURM jobs)")
args, _ = parser.parse_known_args()
main(args)
|
contriever-main
|
eval_beir.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import argparse
import torch
import transformers
from src.normalize_text import normalize
def save(tensor, split_path):
if not os.path.exists(os.path.dirname(split_path)):
os.makedirs(os.path.dirname(split_path))
with open(split_path, 'wb') as fout:
torch.save(tensor, fout)
def apply_tokenizer(path, tokenizer, normalize_text=False):
alltokens = []
lines = []
with open(path, "r", encoding="utf-8") as fin:
for k, line in enumerate(fin):
if normalize_text:
line = normalize(line)
lines.append(line)
if len(lines) > 1000000:
tokens = tokenizer.batch_encode_plus(lines, add_special_tokens=False)['input_ids']
tokens = [torch.tensor(x, dtype=torch.int) for x in tokens]
alltokens.extend(tokens)
lines = []
tokens = tokenizer.batch_encode_plus(lines, add_special_tokens=False)['input_ids']
tokens = [torch.tensor(x, dtype=torch.int) for x in tokens]
alltokens.extend(tokens)
alltokens = torch.cat(alltokens)
return alltokens
def tokenize_file(args):
filename = os.path.basename(args.datapath)
savepath = os.path.join(args.outdir, f"{filename}.pkl")
if os.path.exists(savepath):
if args.overwrite:
print(f"File {savepath} already exists, overwriting")
else:
print(f"File {savepath} already exists, exiting")
return
try:
tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer, local_files_only=True)
except:
tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer, local_files_only=False)
print(f"Encoding {args.datapath}...")
tokens = apply_tokenizer(args.datapath, tokenizer, normalize_text=args.normalize_text)
print(f"Saving at {savepath}...")
save(tokens, savepath)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--datapath", type=str)
parser.add_argument("--outdir", type=str)
parser.add_argument("--tokenizer", type=str)
parser.add_argument("--overwrite", action="store_true")
parser.add_argument("--normalize_text", action="store_true")
args, _ = parser.parse_known_args()
tokenize_file(args)
|
contriever-main
|
preprocess.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import glob
import numpy as np
import torch
import src.utils
from src.evaluation import calculate_matches
logger = logging.getLogger(__name__)
def validate(data, workers_num):
match_stats = calculate_matches(data, workers_num)
top_k_hits = match_stats.top_k_hits
#logger.info('Validation results: top k documents hits %s', top_k_hits)
top_k_hits = [v / len(data) for v in top_k_hits]
#logger.info('Validation results: top k documents hits accuracy %s', top_k_hits)
return top_k_hits
def main(opt):
logger = src.utils.init_logger(opt, stdout_only=True)
datapaths = glob.glob(args.data)
r20, r100 = [], []
for path in datapaths:
data = []
with open(path, 'r') as fin:
for line in fin:
data.append(json.loads(line))
#data = json.load(fin)
answers = [ex['answers'] for ex in data]
top_k_hits = validate(data, args.validation_workers)
message = f"Evaluate results from {path}:"
for k in [5, 10, 20, 100]:
if k <= len(top_k_hits):
recall = 100 * top_k_hits[k-1]
if k == 20:
r20.append(f"{recall:.1f}")
if k == 100:
r100.append(f"{recall:.1f}")
message += f' R@{k}: {recall:.1f}'
logger.info(message)
print(datapaths)
print('\t'.join(r20))
print('\t'.join(r100))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True, type=str, default=None)
parser.add_argument('--validation_workers', type=int, default=16,
help="Number of parallel processes to validate results")
args = parser.parse_args()
main(args)
|
contriever-main
|
evaluate_retrieved_passages.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pdb
import os
import time
import sys
import torch
from torch.utils.tensorboard import SummaryWriter
import logging
import json
import numpy as np
import torch.distributed as dist
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from src.options import Options
from src import data, beir_utils, slurm, dist_utils, utils, contriever, finetuning_data, inbatch
import train
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = logging.getLogger(__name__)
def finetuning(opt, model, optimizer, scheduler, tokenizer, step):
run_stats = utils.WeightedAvgStats()
tb_logger = utils.init_tb_logger(opt.output_dir)
if hasattr(model, "module"):
eval_model = model.module
else:
eval_model = model
eval_model = eval_model.get_encoder()
train_dataset = finetuning_data.Dataset(
datapaths=opt.train_data,
negative_ctxs=opt.negative_ctxs,
negative_hard_ratio=opt.negative_hard_ratio,
negative_hard_min_idx=opt.negative_hard_min_idx,
normalize=opt.eval_normalize_text,
global_rank=dist_utils.get_rank(),
world_size=dist_utils.get_world_size(),
maxload=opt.maxload,
training=True,
)
collator = finetuning_data.Collator(tokenizer, passage_maxlength=opt.chunk_length)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=opt.per_gpu_batch_size,
drop_last=True,
num_workers=opt.num_workers,
collate_fn=collator,
)
train.eval_model(opt, eval_model, None, tokenizer, tb_logger, step)
evaluate(opt, eval_model, tokenizer, tb_logger, step)
epoch = 1
model.train()
prev_ids, prev_mask = None, None
while step < opt.total_steps:
logger.info(f"Start epoch {epoch}, number of batches: {len(train_dataloader)}")
for i, batch in enumerate(train_dataloader):
batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()}
step += 1
train_loss, iter_stats = model(**batch, stats_prefix="train")
train_loss.backward()
if opt.optim == "sam" or opt.optim == "asam":
optimizer.first_step(zero_grad=True)
sam_loss, _ = model(**batch, stats_prefix="train/sam_opt")
sam_loss.backward()
optimizer.second_step(zero_grad=True)
else:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
run_stats.update(iter_stats)
if step % opt.log_freq == 0:
log = f"{step} / {opt.total_steps}"
for k, v in sorted(run_stats.average_stats.items()):
log += f" | {k}: {v:.3f}"
if tb_logger:
tb_logger.add_scalar(k, v, step)
log += f" | lr: {scheduler.get_last_lr()[0]:0.3g}"
log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB"
logger.info(log)
run_stats.reset()
if step % opt.eval_freq == 0:
train.eval_model(opt, eval_model, None, tokenizer, tb_logger, step)
evaluate(opt, eval_model, tokenizer, tb_logger, step)
if step % opt.save_freq == 0 and dist_utils.get_rank() == 0:
utils.save(
eval_model,
optimizer,
scheduler,
step,
opt,
opt.output_dir,
f"step-{step}",
)
model.train()
if step >= opt.total_steps:
break
epoch += 1
def evaluate(opt, model, tokenizer, tb_logger, step):
dataset = finetuning_data.Dataset(
datapaths=opt.eval_data,
normalize=opt.eval_normalize_text,
global_rank=dist_utils.get_rank(),
world_size=dist_utils.get_world_size(),
maxload=opt.maxload,
training=False,
)
collator = finetuning_data.Collator(tokenizer, passage_maxlength=opt.chunk_length)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset,
sampler=sampler,
batch_size=opt.per_gpu_batch_size,
drop_last=False,
num_workers=opt.num_workers,
collate_fn=collator,
)
model.eval()
if hasattr(model, "module"):
model = model.module
correct_samples, total_samples, total_step = 0, 0, 0
all_q, all_g, all_n = [], [], []
with torch.no_grad():
for i, batch in enumerate(dataloader):
batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()}
all_tokens = torch.cat([batch["g_tokens"], batch["n_tokens"]], dim=0)
all_mask = torch.cat([batch["g_mask"], batch["n_mask"]], dim=0)
q_emb = model(input_ids=batch["q_tokens"], attention_mask=batch["q_mask"], normalize=opt.norm_query)
all_emb = model(input_ids=all_tokens, attention_mask=all_mask, normalize=opt.norm_doc)
g_emb, n_emb = torch.split(all_emb, [len(batch["g_tokens"]), len(batch["n_tokens"])])
all_q.append(q_emb)
all_g.append(g_emb)
all_n.append(n_emb)
all_q = torch.cat(all_q, dim=0)
all_g = torch.cat(all_g, dim=0)
all_n = torch.cat(all_n, dim=0)
labels = torch.arange(0, len(all_q), device=all_q.device, dtype=torch.long)
all_sizes = dist_utils.get_varsize(all_g)
all_g = dist_utils.varsize_gather_nograd(all_g)
all_n = dist_utils.varsize_gather_nograd(all_n)
labels = labels + sum(all_sizes[: dist_utils.get_rank()])
scores_pos = torch.einsum("id, jd->ij", all_q, all_g)
scores_neg = torch.einsum("id, jd->ij", all_q, all_n)
scores = torch.cat([scores_pos, scores_neg], dim=-1)
argmax_idx = torch.argmax(scores, dim=1)
sorted_scores, indices = torch.sort(scores, descending=True)
isrelevant = indices == labels[:, None]
rs = [r.cpu().numpy().nonzero()[0] for r in isrelevant]
mrr = np.mean([1.0 / (r[0] + 1) if r.size else 0.0 for r in rs])
acc = (argmax_idx == labels).sum() / all_q.size(0)
acc, total = dist_utils.weighted_average(acc, all_q.size(0))
mrr, _ = dist_utils.weighted_average(mrr, all_q.size(0))
acc = 100 * acc
message = []
if dist_utils.is_main():
message = [f"eval acc: {acc:.2f}%", f"eval mrr: {mrr:.3f}"]
logger.info(" | ".join(message))
if tb_logger is not None:
tb_logger.add_scalar(f"eval_acc", acc, step)
tb_logger.add_scalar(f"mrr", mrr, step)
def main():
logger.info("Start")
options = Options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
directory_exists = os.path.isdir(opt.output_dir)
if dist.is_initialized():
dist.barrier()
os.makedirs(opt.output_dir, exist_ok=True)
if not directory_exists and dist_utils.is_main():
options.print_options(opt)
if dist.is_initialized():
dist.barrier()
utils.init_logger(opt)
step = 0
retriever, tokenizer, retriever_model_id = contriever.load_retriever(opt.model_path, opt.pooling, opt.random_init)
opt.retriever_model_id = retriever_model_id
model = inbatch.InBatch(opt, retriever, tokenizer)
model = model.cuda()
optimizer, scheduler = utils.set_optim(opt, model)
# if dist_utils.is_main():
# utils.save(model, optimizer, scheduler, global_step, 0., opt, opt.output_dir, f"step-{0}")
logger.info(utils.get_parameters(model))
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout):
module.p = opt.dropout
if torch.distributed.is_initialized():
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=False,
)
logger.info("Start training")
finetuning(opt, model, optimizer, scheduler, tokenizer, step)
if __name__ == "__main__":
main()
|
contriever-main
|
finetuning.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import time
import sys
import torch
import logging
import json
import numpy as np
import random
import pickle
import torch.distributed as dist
from torch.utils.data import DataLoader, RandomSampler
from src.options import Options
from src import data, beir_utils, slurm, dist_utils, utils
from src import moco, inbatch
logger = logging.getLogger(__name__)
def train(opt, model, optimizer, scheduler, step):
run_stats = utils.WeightedAvgStats()
tb_logger = utils.init_tb_logger(opt.output_dir)
logger.info("Data loading")
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
tokenizer = model.module.tokenizer
else:
tokenizer = model.tokenizer
collator = data.Collator(opt=opt)
train_dataset = data.load_data(opt, tokenizer)
logger.warning(f"Data loading finished for rank {dist_utils.get_rank()}")
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=opt.per_gpu_batch_size,
drop_last=True,
num_workers=opt.num_workers,
collate_fn=collator,
)
epoch = 1
model.train()
while step < opt.total_steps:
train_dataset.generate_offset()
logger.info(f"Start epoch {epoch}")
for i, batch in enumerate(train_dataloader):
step += 1
batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()}
train_loss, iter_stats = model(**batch, stats_prefix="train")
train_loss.backward()
optimizer.step()
scheduler.step()
model.zero_grad()
run_stats.update(iter_stats)
if step % opt.log_freq == 0:
log = f"{step} / {opt.total_steps}"
for k, v in sorted(run_stats.average_stats.items()):
log += f" | {k}: {v:.3f}"
if tb_logger:
tb_logger.add_scalar(k, v, step)
log += f" | lr: {scheduler.get_last_lr()[0]:0.3g}"
log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB"
logger.info(log)
run_stats.reset()
if step % opt.eval_freq == 0:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
encoder = model.module.get_encoder()
else:
encoder = model.get_encoder()
eval_model(
opt, query_encoder=encoder, doc_encoder=encoder, tokenizer=tokenizer, tb_logger=tb_logger, step=step
)
if dist_utils.is_main():
utils.save(model, optimizer, scheduler, step, opt, opt.output_dir, f"lastlog")
model.train()
if dist_utils.is_main() and step % opt.save_freq == 0:
utils.save(model, optimizer, scheduler, step, opt, opt.output_dir, f"step-{step}")
if step > opt.total_steps:
break
epoch += 1
def eval_model(opt, query_encoder, doc_encoder, tokenizer, tb_logger, step):
for datasetname in opt.eval_datasets:
metrics = beir_utils.evaluate_model(
query_encoder,
doc_encoder,
tokenizer,
dataset=datasetname,
batch_size=opt.per_gpu_eval_batch_size,
norm_doc=opt.norm_doc,
norm_query=opt.norm_query,
beir_dir=opt.eval_datasets_dir,
score_function=opt.score_function,
lower_case=opt.lower_case,
normalize_text=opt.eval_normalize_text,
)
message = []
if dist_utils.is_main():
for metric in ["NDCG@10", "Recall@10", "Recall@100"]:
message.append(f"{datasetname}/{metric}: {metrics[metric]:.2f}")
if tb_logger is not None:
tb_logger.add_scalar(f"{datasetname}/{metric}", metrics[metric], step)
logger.info(" | ".join(message))
if __name__ == "__main__":
logger.info("Start")
options = Options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
directory_exists = os.path.isdir(opt.output_dir)
if dist.is_initialized():
dist.barrier()
os.makedirs(opt.output_dir, exist_ok=True)
if not directory_exists and dist_utils.is_main():
options.print_options(opt)
if dist.is_initialized():
dist.barrier()
utils.init_logger(opt)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
if opt.contrastive_mode == "moco":
model_class = moco.MoCo
elif opt.contrastive_mode == "inbatch":
model_class = inbatch.InBatch
else:
raise ValueError(f"contrastive mode: {opt.contrastive_mode} not recognised")
if not directory_exists and opt.model_path == "none":
model = model_class(opt)
model = model.cuda()
optimizer, scheduler = utils.set_optim(opt, model)
step = 0
elif directory_exists:
model_path = os.path.join(opt.output_dir, "checkpoint", "latest")
model, optimizer, scheduler, opt_checkpoint, step = utils.load(
model_class,
model_path,
opt,
reset_params=False,
)
logger.info(f"Model loaded from {opt.output_dir}")
else:
model, optimizer, scheduler, opt_checkpoint, step = utils.load(
model_class,
opt.model_path,
opt,
reset_params=False if opt.continue_training else True,
)
if not opt.continue_training:
step = 0
logger.info(f"Model loaded from {opt.model_path}")
logger.info(utils.get_parameters(model))
if dist.is_initialized():
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=False,
)
dist.barrier()
logger.info("Start training")
train(opt, model, optimizer, scheduler, step)
|
contriever-main
|
train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import csv
import json
import logging
import pickle
import time
import glob
from pathlib import Path
import numpy as np
import torch
import transformers
import src.index
import src.contriever
import src.utils
import src.slurm
import src.data
from src.evaluation import calculate_matches
import src.normalize_text
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def embed_queries(args, queries, model, tokenizer):
model.eval()
embeddings, batch_question = [], []
with torch.no_grad():
for k, q in enumerate(queries):
if args.lowercase:
q = q.lower()
if args.normalize_text:
q = src.normalize_text.normalize(q)
batch_question.append(q)
if len(batch_question) == args.per_gpu_batch_size or k == len(queries) - 1:
encoded_batch = tokenizer.batch_encode_plus(
batch_question,
return_tensors="pt",
max_length=args.question_maxlength,
padding=True,
truncation=True,
)
encoded_batch = {k: v.cuda() for k, v in encoded_batch.items()}
output = model(**encoded_batch)
embeddings.append(output.cpu())
batch_question = []
embeddings = torch.cat(embeddings, dim=0)
print(f"Questions embeddings shape: {embeddings.size()}")
return embeddings.numpy()
def index_encoded_data(index, embedding_files, indexing_batch_size):
allids = []
allembeddings = np.array([])
for i, file_path in enumerate(embedding_files):
print(f"Loading file {file_path}")
with open(file_path, "rb") as fin:
ids, embeddings = pickle.load(fin)
allembeddings = np.vstack((allembeddings, embeddings)) if allembeddings.size else embeddings
allids.extend(ids)
while allembeddings.shape[0] > indexing_batch_size:
allembeddings, allids = add_embeddings(index, allembeddings, allids, indexing_batch_size)
while allembeddings.shape[0] > 0:
allembeddings, allids = add_embeddings(index, allembeddings, allids, indexing_batch_size)
print("Data indexing completed.")
def add_embeddings(index, embeddings, ids, indexing_batch_size):
end_idx = min(indexing_batch_size, embeddings.shape[0])
ids_toadd = ids[:end_idx]
embeddings_toadd = embeddings[:end_idx]
ids = ids[end_idx:]
embeddings = embeddings[end_idx:]
index.index_data(ids_toadd, embeddings_toadd)
return embeddings, ids
def validate(data, workers_num):
match_stats = calculate_matches(data, workers_num)
top_k_hits = match_stats.top_k_hits
print("Validation results: top k documents hits %s", top_k_hits)
top_k_hits = [v / len(data) for v in top_k_hits]
message = ""
for k in [5, 10, 20, 100]:
if k <= len(top_k_hits):
message += f"R@{k}: {top_k_hits[k-1]} "
print(message)
return match_stats.questions_doc_hits
def add_passages(data, passages, top_passages_and_scores):
# add passages to original data
merged_data = []
assert len(data) == len(top_passages_and_scores)
for i, d in enumerate(data):
results_and_scores = top_passages_and_scores[i]
docs = [passages[doc_id] for doc_id in results_and_scores[0]]
scores = [str(score) for score in results_and_scores[1]]
ctxs_num = len(docs)
d["ctxs"] = [
{
"id": results_and_scores[0][c],
"title": docs[c]["title"],
"text": docs[c]["text"],
"score": scores[c],
}
for c in range(ctxs_num)
]
def add_hasanswer(data, hasanswer):
# add hasanswer to data
for i, ex in enumerate(data):
for k, d in enumerate(ex["ctxs"]):
d["hasanswer"] = hasanswer[i][k]
def load_data(data_path):
if data_path.endswith(".json"):
with open(data_path, "r") as fin:
data = json.load(fin)
elif data_path.endswith(".jsonl"):
data = []
with open(data_path, "r") as fin:
for k, example in enumerate(fin):
example = json.loads(example)
data.append(example)
return data
def main(args):
print(f"Loading model from: {args.model_name_or_path}")
model, tokenizer, _ = src.contriever.load_retriever(args.model_name_or_path)
model.eval()
model = model.cuda()
if not args.no_fp16:
model = model.half()
index = src.index.Indexer(args.projection_size, args.n_subquantizers, args.n_bits)
# index all passages
input_paths = glob.glob(args.passages_embeddings)
input_paths = sorted(input_paths)
embeddings_dir = os.path.dirname(input_paths[0])
index_path = os.path.join(embeddings_dir, "index.faiss")
if args.save_or_load_index and os.path.exists(index_path):
index.deserialize_from(embeddings_dir)
else:
print(f"Indexing passages from files {input_paths}")
start_time_indexing = time.time()
index_encoded_data(index, input_paths, args.indexing_batch_size)
print(f"Indexing time: {time.time()-start_time_indexing:.1f} s.")
if args.save_or_load_index:
index.serialize(embeddings_dir)
# load passages
passages = src.data.load_passages(args.passages)
passage_id_map = {x["id"]: x for x in passages}
data_paths = glob.glob(args.data)
alldata = []
for path in data_paths:
data = load_data(path)
output_path = os.path.join(args.output_dir, os.path.basename(path))
queries = [ex["question"] for ex in data]
questions_embedding = embed_queries(args, queries, model, tokenizer)
# get top k results
start_time_retrieval = time.time()
top_ids_and_scores = index.search_knn(questions_embedding, args.n_docs)
print(f"Search time: {time.time()-start_time_retrieval:.1f} s.")
add_passages(data, passage_id_map, top_ids_and_scores)
hasanswer = validate(data, args.validation_workers)
add_hasanswer(data, hasanswer)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as fout:
for ex in data:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
print(f"Saved results to {output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data",
required=True,
type=str,
default=None,
help=".json file containing question and answers, similar format to reader data",
)
parser.add_argument("--passages", type=str, default=None, help="Path to passages (.tsv file)")
parser.add_argument("--passages_embeddings", type=str, default=None, help="Glob path to encoded passages")
parser.add_argument(
"--output_dir", type=str, default=None, help="Results are written to outputdir with data suffix"
)
parser.add_argument("--n_docs", type=int, default=100, help="Number of documents to retrieve per questions")
parser.add_argument(
"--validation_workers", type=int, default=32, help="Number of parallel processes to validate results"
)
parser.add_argument("--per_gpu_batch_size", type=int, default=64, help="Batch size for question encoding")
parser.add_argument(
"--save_or_load_index", action="store_true", help="If enabled, save index and load index if it exists"
)
parser.add_argument(
"--model_name_or_path", type=str, help="path to directory containing model weights and config file"
)
parser.add_argument("--no_fp16", action="store_true", help="inference in fp32")
parser.add_argument("--question_maxlength", type=int, default=512, help="Maximum number of tokens in a question")
parser.add_argument(
"--indexing_batch_size", type=int, default=1000000, help="Batch size of the number of passages indexed"
)
parser.add_argument("--projection_size", type=int, default=768)
parser.add_argument(
"--n_subquantizers",
type=int,
default=0,
help="Number of subquantizer used for vector quantization, if 0 flat index is used",
)
parser.add_argument("--n_bits", type=int, default=8, help="Number of bits per subquantizer")
parser.add_argument("--lang", nargs="+")
parser.add_argument("--dataset", type=str, default="none")
parser.add_argument("--lowercase", action="store_true", help="lowercase text before encoding")
parser.add_argument("--normalize_text", action="store_true", help="normalize text")
args = parser.parse_args()
src.slurm.init_distributed_mode(args)
main(args)
|
contriever-main
|
passage_retrieval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import os
import csv
import json
def convert2beir(data_path, output_path):
splits = ['test', 'dev', 'train']
queries_path = os.path.join(output_path, "queries.jsonl")
corpus_path = os.path.join(output_path, "corpus.jsonl")
os.makedirs(os.path.dirname(corpus_path), exist_ok=True)
queries = []
with open(queries_path, "w", encoding="utf-8") as fout:
with open(os.path.join(data_path, f"topic.tsv"), "r", encoding="utf-8") as fin:
reader = csv.reader(fin, delimiter="\t")
for x in reader:
qdict = {
"_id": x[0],
"text": x[1]
}
json.dump(qdict, fout, ensure_ascii=False)
fout.write('\n')
with open(os.path.join(data_path, "collection", "docs.jsonl"), "r") as fin:
with open(corpus_path, "w", encoding="utf-8") as fout:
for line in fin:
x = json.loads(line)
x["_id"] = x["id"]
x["text"] = x["contents"]
x["title"] = ""
del x["id"]
del x["contents"]
json.dump(x, fout, ensure_ascii=False)
fout.write('\n')
for split in splits:
qrels_path = os.path.join(output_path, "qrels", f"{split}.tsv")
os.makedirs(os.path.dirname(qrels_path), exist_ok=True)
with open(os.path.join(data_path, f"qrels.{split}.txt"), "r", encoding="utf-8") as fin:
with open(qrels_path, "w", encoding="utf-8") as fout:
writer = csv.writer(fout, delimiter='\t')
writer.writerow(["query-id", "corpus-id", "score"])
for line in fin:
line = line.strip()
el = line.split()
qid = el[0]
i = el[2]
s = el[3]
writer.writerow([qid, i, s])
if __name__ == '__main__':
convert2beir(sys.argv[1], sys.argv[2])
|
contriever-main
|
data_scripts/convertmrtydi2beir.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import os
import json
from collections import defaultdict
def preprocess_xmkqa(input_path, output_dir):
os.makedirs(output_dir, exist_ok=True)
mkqa = []
with open(input_path, 'r') as fin:
for line in fin:
ex = json.loads(line)
mkqa.append(ex)
mkqadict = {ex['example_id']:ex for ex in mkqa}
langs = ['en', 'ar', 'fi', 'ja', 'ko', 'ru', 'es', 'sv', 'he', 'th', \
'da', 'de', 'fr', 'it', 'nl', 'pl', 'pt', 'hu', 'vi', 'ms', \
'km', 'no', 'tr', 'zh_cn', 'zh_hk', 'zh_tw']
langdata = defaultdict(list)
for ex in mkqa:
answers = []
for a in ex['answers']['en']:
flag = False
if not (a['type'] == 'unanswerable' or a['type'] == 'binary' or a['type'] == 'long_answer'):
flag = True
answers.extend(a.get("aliases", []))
answers.append(a.get("text"))
if flag:
for lang in langs:
langex = {
'id': ex['example_id'],
'lang': lang,
'question': ex['queries'][lang], #question in specific languages
'answers': answers #english answers
}
langdata[lang].append(langex)
for lang, data in langdata.items():
with open(os.path.join(output_dir, f'{lang}.jsonl'), 'w') as fout:
for ex in data:
json.dump(ex, fout, ensure_ascii=False)
fout.write('\n')
if __name__ == '__main__':
preprocess_xmkqa(sys.argv[1], sys.argv[2])
|
contriever-main
|
data_scripts/preprocess_xmkqa.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
import transformers
from transformers import BertModel, XLMRobertaModel
from src import utils
class Contriever(BertModel):
def __init__(self, config, pooling="average", **kwargs):
super().__init__(config, add_pooling_layer=False)
if not hasattr(config, "pooling"):
self.config.pooling = pooling
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
normalize=False,
):
model_output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden = model_output["last_hidden_state"]
last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0)
if self.config.pooling == "average":
emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
elif self.config.pooling == "cls":
emb = last_hidden[:, 0]
if normalize:
emb = torch.nn.functional.normalize(emb, dim=-1)
return emb
class XLMRetriever(XLMRobertaModel):
def __init__(self, config, pooling="average", **kwargs):
super().__init__(config, add_pooling_layer=False)
if not hasattr(config, "pooling"):
self.config.pooling = pooling
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
normalize=False,
):
model_output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden = model_output["last_hidden_state"]
last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0)
if self.config.pooling == "average":
emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
elif self.config.pooling == "cls":
emb = last_hidden[:, 0]
if normalize:
emb = torch.nn.functional.normalize(emb, dim=-1)
return emb
def load_retriever(model_path, pooling="average", random_init=False):
# try: check if model exists locally
path = os.path.join(model_path, "checkpoint.pth")
if os.path.exists(path):
pretrained_dict = torch.load(path, map_location="cpu")
opt = pretrained_dict["opt"]
if hasattr(opt, "retriever_model_id"):
retriever_model_id = opt.retriever_model_id
else:
# retriever_model_id = "bert-base-uncased"
retriever_model_id = "bert-base-multilingual-cased"
tokenizer = utils.load_hf(transformers.AutoTokenizer, retriever_model_id)
cfg = utils.load_hf(transformers.AutoConfig, retriever_model_id)
if "xlm" in retriever_model_id:
model_class = XLMRetriever
else:
model_class = Contriever
retriever = model_class(cfg)
pretrained_dict = pretrained_dict["model"]
if any("encoder_q." in key for key in pretrained_dict.keys()): # test if model is defined with moco class
pretrained_dict = {k.replace("encoder_q.", ""): v for k, v in pretrained_dict.items() if "encoder_q." in k}
elif any("encoder." in key for key in pretrained_dict.keys()): # test if model is defined with inbatch class
pretrained_dict = {k.replace("encoder.", ""): v for k, v in pretrained_dict.items() if "encoder." in k}
retriever.load_state_dict(pretrained_dict, strict=False)
else:
retriever_model_id = model_path
if "xlm" in retriever_model_id:
model_class = XLMRetriever
else:
model_class = Contriever
cfg = utils.load_hf(transformers.AutoConfig, model_path)
tokenizer = utils.load_hf(transformers.AutoTokenizer, model_path)
retriever = utils.load_hf(model_class, model_path)
return retriever, tokenizer, retriever_model_id
|
contriever-main
|
src/contriever.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import os
class Options:
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialize()
def initialize(self):
# basic parameters
self.parser.add_argument(
"--output_dir", type=str, default="./checkpoint/my_experiments", help="models are saved here"
)
self.parser.add_argument(
"--train_data",
nargs="+",
default=[],
help="Data used for training, passed as a list of directories splitted into tensor files.",
)
self.parser.add_argument(
"--eval_data",
nargs="+",
default=[],
help="Data used for evaluation during finetuning, this option is not used during contrastive pre-training.",
)
self.parser.add_argument(
"--eval_datasets", nargs="+", default=[], help="List of datasets used for evaluation, in BEIR format"
)
self.parser.add_argument(
"--eval_datasets_dir", type=str, default="./", help="Directory where eval datasets are stored"
)
self.parser.add_argument("--model_path", type=str, default="none", help="path for retraining")
self.parser.add_argument("--continue_training", action="store_true")
self.parser.add_argument("--num_workers", type=int, default=5)
self.parser.add_argument("--chunk_length", type=int, default=256)
self.parser.add_argument("--loading_mode", type=str, default="split")
self.parser.add_argument("--lower_case", action="store_true", help="perform evaluation after lowercasing")
self.parser.add_argument(
"--sampling_coefficient",
type=float,
default=0.0,
help="coefficient used for sampling between different datasets during training, \
by default sampling is uniform over datasets",
)
self.parser.add_argument("--augmentation", type=str, default="none")
self.parser.add_argument("--prob_augmentation", type=float, default=0.0)
self.parser.add_argument("--dropout", type=float, default=0.1)
self.parser.add_argument("--rho", type=float, default=0.05)
self.parser.add_argument("--contrastive_mode", type=str, default="moco")
self.parser.add_argument("--queue_size", type=int, default=65536)
self.parser.add_argument("--temperature", type=float, default=1.0)
self.parser.add_argument("--momentum", type=float, default=0.999)
self.parser.add_argument("--moco_train_mode_encoder_k", action="store_true")
self.parser.add_argument("--eval_normalize_text", action="store_true")
self.parser.add_argument("--norm_query", action="store_true")
self.parser.add_argument("--norm_doc", action="store_true")
self.parser.add_argument("--projection_size", type=int, default=768)
self.parser.add_argument("--ratio_min", type=float, default=0.1)
self.parser.add_argument("--ratio_max", type=float, default=0.5)
self.parser.add_argument("--score_function", type=str, default="dot")
self.parser.add_argument("--retriever_model_id", type=str, default="bert-base-uncased")
self.parser.add_argument("--pooling", type=str, default="average")
self.parser.add_argument("--random_init", action="store_true", help="init model with random weights")
# dataset parameters
self.parser.add_argument("--per_gpu_batch_size", default=64, type=int, help="Batch size per GPU for training.")
self.parser.add_argument(
"--per_gpu_eval_batch_size", default=256, type=int, help="Batch size per GPU for evaluation."
)
self.parser.add_argument("--total_steps", type=int, default=1000)
self.parser.add_argument("--warmup_steps", type=int, default=-1)
self.parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
self.parser.add_argument("--main_port", type=int, default=10001, help="Master port (for multi-node SLURM jobs)")
self.parser.add_argument("--seed", type=int, default=0, help="random seed for initialization")
# training parameters
self.parser.add_argument("--optim", type=str, default="adamw")
self.parser.add_argument("--scheduler", type=str, default="linear")
self.parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
self.parser.add_argument(
"--lr_min_ratio",
type=float,
default=0.0,
help="minimum learning rate at the end of the optimization schedule as a ratio of the learning rate",
)
self.parser.add_argument("--weight_decay", type=float, default=0.01, help="learning rate")
self.parser.add_argument("--beta1", type=float, default=0.9, help="beta1")
self.parser.add_argument("--beta2", type=float, default=0.98, help="beta2")
self.parser.add_argument("--eps", type=float, default=1e-6, help="eps")
self.parser.add_argument(
"--log_freq", type=int, default=100, help="log train stats every <log_freq> steps during training"
)
self.parser.add_argument(
"--eval_freq", type=int, default=500, help="evaluate model every <eval_freq> steps during training"
)
self.parser.add_argument("--save_freq", type=int, default=50000)
self.parser.add_argument("--maxload", type=int, default=None)
self.parser.add_argument("--label_smoothing", type=float, default=0.0)
# finetuning options
self.parser.add_argument("--negative_ctxs", type=int, default=1)
self.parser.add_argument("--negative_hard_min_idx", type=int, default=0)
self.parser.add_argument("--negative_hard_ratio", type=float, default=0.0)
def print_options(self, opt):
message = ""
for k, v in sorted(vars(opt).items()):
comment = ""
default = self.parser.get_default(k)
if v != default:
comment = f"\t[default: %s]" % str(default)
message += f"{str(k):>40}: {str(v):<40}{comment}\n"
print(message, flush=True)
model_dir = os.path.join(opt.output_dir, "models")
if not os.path.exists(model_dir):
os.makedirs(os.path.join(opt.output_dir, "models"))
file_name = os.path.join(opt.output_dir, "opt.txt")
with open(file_name, "wt") as opt_file:
opt_file.write(message)
opt_file.write("\n")
def parse(self):
opt, _ = self.parser.parse_known_args()
# opt = self.parser.parse_args()
return opt
|
contriever-main
|
src/options.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import numpy as np
import math
import random
import transformers
import logging
import torch.distributed as dist
from src import contriever, dist_utils, utils
logger = logging.getLogger(__name__)
class InBatch(nn.Module):
def __init__(self, opt, retriever=None, tokenizer=None):
super(InBatch, self).__init__()
self.opt = opt
self.norm_doc = opt.norm_doc
self.norm_query = opt.norm_query
self.label_smoothing = opt.label_smoothing
if retriever is None or tokenizer is None:
retriever, tokenizer = self._load_retriever(
opt.retriever_model_id, pooling=opt.pooling, random_init=opt.random_init
)
self.tokenizer = tokenizer
self.encoder = retriever
def _load_retriever(self, model_id, pooling, random_init):
cfg = utils.load_hf(transformers.AutoConfig, model_id)
tokenizer = utils.load_hf(transformers.AutoTokenizer, model_id)
if "xlm" in model_id:
model_class = contriever.XLMRetriever
else:
model_class = contriever.Contriever
if random_init:
retriever = model_class(cfg)
else:
retriever = utils.load_hf(model_class, model_id)
if "bert-" in model_id:
if tokenizer.bos_token_id is None:
tokenizer.bos_token = "[CLS]"
if tokenizer.eos_token_id is None:
tokenizer.eos_token = "[SEP]"
retriever.config.pooling = pooling
return retriever, tokenizer
def get_encoder(self):
return self.encoder
def forward(self, q_tokens, q_mask, k_tokens, k_mask, stats_prefix="", iter_stats={}, **kwargs):
bsz = len(q_tokens)
labels = torch.arange(0, bsz, dtype=torch.long, device=q_tokens.device)
qemb = self.encoder(input_ids=q_tokens, attention_mask=q_mask, normalize=self.norm_query)
kemb = self.encoder(input_ids=k_tokens, attention_mask=k_mask, normalize=self.norm_doc)
gather_fn = dist_utils.gather
gather_kemb = gather_fn(kemb)
labels = labels + dist_utils.get_rank() * len(kemb)
scores = torch.einsum("id, jd->ij", qemb / self.opt.temperature, gather_kemb)
loss = torch.nn.functional.cross_entropy(scores, labels, label_smoothing=self.label_smoothing)
# log stats
if len(stats_prefix) > 0:
stats_prefix = stats_prefix + "/"
iter_stats[f"{stats_prefix}loss"] = (loss.item(), bsz)
predicted_idx = torch.argmax(scores, dim=-1)
accuracy = 100 * (predicted_idx == labels).float().mean()
stdq = torch.std(qemb, dim=0).mean().item()
stdk = torch.std(kemb, dim=0).mean().item()
iter_stats[f"{stats_prefix}accuracy"] = (accuracy, bsz)
iter_stats[f"{stats_prefix}stdq"] = (stdq, bsz)
iter_stats[f"{stats_prefix}stdk"] = (stdk, bsz)
return loss, iter_stats
|
contriever-main
|
src/inbatch.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
from typing import List, Tuple
import faiss
import numpy as np
from tqdm import tqdm
class Indexer(object):
def __init__(self, vector_sz, n_subquantizers=0, n_bits=8):
if n_subquantizers > 0:
self.index = faiss.IndexPQ(vector_sz, n_subquantizers, n_bits, faiss.METRIC_INNER_PRODUCT)
else:
self.index = faiss.IndexFlatIP(vector_sz)
#self.index_id_to_db_id = np.empty((0), dtype=np.int64)
self.index_id_to_db_id = []
def index_data(self, ids, embeddings):
self._update_id_mapping(ids)
embeddings = embeddings.astype('float32')
if not self.index.is_trained:
self.index.train(embeddings)
self.index.add(embeddings)
print(f'Total data indexed {len(self.index_id_to_db_id)}')
def search_knn(self, query_vectors: np.array, top_docs: int, index_batch_size: int = 2048) -> List[Tuple[List[object], List[float]]]:
query_vectors = query_vectors.astype('float32')
result = []
nbatch = (len(query_vectors)-1) // index_batch_size + 1
for k in tqdm(range(nbatch)):
start_idx = k*index_batch_size
end_idx = min((k+1)*index_batch_size, len(query_vectors))
q = query_vectors[start_idx: end_idx]
scores, indexes = self.index.search(q, top_docs)
# convert to external ids
db_ids = [[str(self.index_id_to_db_id[i]) for i in query_top_idxs] for query_top_idxs in indexes]
result.extend([(db_ids[i], scores[i]) for i in range(len(db_ids))])
return result
def serialize(self, dir_path):
index_file = os.path.join(dir_path, 'index.faiss')
meta_file = os.path.join(dir_path, 'index_meta.faiss')
print(f'Serializing index to {index_file}, meta data to {meta_file}')
faiss.write_index(self.index, index_file)
with open(meta_file, mode='wb') as f:
pickle.dump(self.index_id_to_db_id, f)
def deserialize_from(self, dir_path):
index_file = os.path.join(dir_path, 'index.faiss')
meta_file = os.path.join(dir_path, 'index_meta.faiss')
print(f'Loading index from {index_file}, meta data from {meta_file}')
self.index = faiss.read_index(index_file)
print('Loaded index of type %s and size %d', type(self.index), self.index.ntotal)
with open(meta_file, "rb") as reader:
self.index_id_to_db_id = pickle.load(reader)
assert len(
self.index_id_to_db_id) == self.index.ntotal, 'Deserialized index_id_to_db_id should match faiss index size'
def _update_id_mapping(self, db_ids: List):
#new_ids = np.array(db_ids, dtype=np.int64)
#self.index_id_to_db_id = np.concatenate((self.index_id_to_db_id, new_ids), axis=0)
self.index_id_to_db_id.extend(db_ids)
|
contriever-main
|
src/index.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from logging import getLogger
import os
import sys
import torch
import socket
import signal
import subprocess
logger = getLogger()
def sig_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ['SLURM_PROCID'])
logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
logger.warning("Requeuing job " + os.environ['SLURM_JOB_ID'])
os.system('scontrol requeue ' + os.environ['SLURM_JOB_ID'])
else:
logger.warning("Not the main process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- local_rank
- global_rank
- world_size
"""
is_slurm_job = 'SLURM_JOB_ID' in os.environ and not 'WORLD_SIZE' in os.environ
has_local_rank = hasattr(params, 'local_rank')
# SLURM job without torch.distributed.launch
if is_slurm_job and has_local_rank:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
# local rank on the current node / global rank
params.local_rank = int(os.environ['SLURM_LOCALID'])
params.global_rank = int(os.environ['SLURM_PROCID'])
params.world_size = int(os.environ['SLURM_NTASKS'])
# define master address and master port
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']])
params.main_addr = hostnames.split()[0].decode('utf-8')
assert 10001 <= params.main_port <= 20000 or params.world_size == 1
# set environment variables for 'env://'
os.environ['MASTER_ADDR'] = params.main_addr
os.environ['MASTER_PORT'] = str(params.main_port)
os.environ['WORLD_SIZE'] = str(params.world_size)
os.environ['RANK'] = str(params.global_rank)
is_distributed = True
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif has_local_rank and params.local_rank != -1:
assert params.main_port == -1
# read environment variables
params.global_rank = int(os.environ['RANK'])
params.world_size = int(os.environ['WORLD_SIZE'])
is_distributed = True
# local job (single GPU)
else:
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
is_distributed = False
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if is_distributed:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
#print("Initializing PyTorch distributed ...")
torch.distributed.init_process_group(
init_method='env://',
backend='nccl',
#world_size=params.world_size,
#rank=params.global_rank,
)
|
contriever-main
|
src/slurm.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import logging
import regex
import string
import unicodedata
from functools import partial
from multiprocessing import Pool as ProcessPool
from typing import Tuple, List, Dict
import numpy as np
"""
Evaluation code from DPR: https://github.com/facebookresearch/DPR
"""
class SimpleTokenizer(object):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
def tokenize(self, text, uncased=False):
matches = [m for m in self._regexp.finditer(text)]
if uncased:
tokens = [m.group().lower() for m in matches]
else:
tokens = [m.group() for m in matches]
return tokens
logger = logging.getLogger(__name__)
QAMatchStats = collections.namedtuple('QAMatchStats', ['top_k_hits', 'questions_doc_hits'])
def calculate_matches(data: List, workers_num: int):
"""
Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of
documents and results. It internally forks multiple sub-processes for evaluation and then merges results
:param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title)
:param answers: list of answers's list. One list per question
:param closest_docs: document ids of the top results along with their scores
:param workers_num: amount of parallel threads to process data
:param match_type: type of answer matching. Refer to has_answer code for available options
:return: matching information tuple.
top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of
valid matches across an entire dataset.
questions_doc_hits - more detailed info with answer matches for every question and every retrieved document
"""
logger.info('Matching answers in top docs...')
tokenizer = SimpleTokenizer()
get_score_partial = partial(check_answer, tokenizer=tokenizer)
processes = ProcessPool(processes=workers_num)
scores = processes.map(get_score_partial, data)
logger.info('Per question validation results len=%d', len(scores))
n_docs = len(data[0]['ctxs'])
top_k_hits = [0] * n_docs
for question_hits in scores:
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
return QAMatchStats(top_k_hits, scores)
def check_answer(example, tokenizer) -> List[bool]:
"""Search through all the top docs to see if they have any of the answers."""
answers = example['answers']
ctxs = example['ctxs']
hits = []
for i, doc in enumerate(ctxs):
text = doc['text']
if text is None: # cannot find the document for some reason
logger.warning("no doc in db")
hits.append(False)
continue
hits.append(has_answer(answers, text, tokenizer))
return hits
def has_answer(answers, text, tokenizer) -> bool:
"""Check if a document contains an answer string."""
text = _normalize(text)
text = tokenizer.tokenize(text, uncased=True)
for answer in answers:
answer = _normalize(answer)
answer = tokenizer.tokenize(answer, uncased=True)
for i in range(0, len(text) - len(answer) + 1):
if answer == text[i: i + len(answer)]:
return True
return False
#################################################
######## READER EVALUATION ########
#################################################
def _normalize(text):
return unicodedata.normalize('NFD', text)
#Normalization and score functions from SQuAD evaluation script https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
def remove_articles(text):
return regex.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def em(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def f1(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def f1_score(prediction, ground_truths):
return max([f1(prediction, gt) for gt in ground_truths])
def exact_match_score(prediction, ground_truths):
return max([em(prediction, gt) for gt in ground_truths])
####################################################
######## RETRIEVER EVALUATION ########
####################################################
def eval_batch(scores, inversions, avg_topk, idx_topk):
for k, s in enumerate(scores):
s = s.cpu().numpy()
sorted_idx = np.argsort(-s)
score(sorted_idx, inversions, avg_topk, idx_topk)
def count_inversions(arr):
inv_count = 0
lenarr = len(arr)
for i in range(lenarr):
for j in range(i + 1, lenarr):
if (arr[i] > arr[j]):
inv_count += 1
return inv_count
def score(x, inversions, avg_topk, idx_topk):
x = np.array(x)
inversions.append(count_inversions(x))
for k in avg_topk:
# ratio of passages in the predicted top-k that are
# also in the topk given by gold score
avg_pred_topk = (x[:k]<k).mean()
avg_topk[k].append(avg_pred_topk)
for k in idx_topk:
below_k = (x<k)
# number of passages required to obtain all passages from gold top-k
idx_gold_topk = len(x) - np.argmax(below_k[::-1])
idx_topk[k].append(idx_gold_topk)
|
contriever-main
|
src/evaluation.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import random
import json
import sys
import numpy as np
from src import normalize_text
class Dataset(torch.utils.data.Dataset):
def __init__(
self,
datapaths,
negative_ctxs=1,
negative_hard_ratio=0.0,
negative_hard_min_idx=0,
training=False,
global_rank=-1,
world_size=-1,
maxload=None,
normalize=False,
):
self.negative_ctxs = negative_ctxs
self.negative_hard_ratio = negative_hard_ratio
self.negative_hard_min_idx = negative_hard_min_idx
self.training = training
self.normalize_fn = normalize_text.normalize if normalize_text else lambda x: x
self._load_data(datapaths, global_rank, world_size, maxload)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
example = self.data[index]
question = example["question"]
if self.training:
gold = random.choice(example["positive_ctxs"])
n_hard_negatives, n_random_negatives = self.sample_n_hard_negatives(example)
negatives = []
if n_random_negatives > 0:
random_negatives = random.sample(example["negative_ctxs"], n_random_negatives)
negatives += random_negatives
if n_hard_negatives > 0:
hard_negatives = random.sample(
example["hard_negative_ctxs"][self.negative_hard_min_idx :], n_hard_negatives
)
negatives += hard_negatives
else:
gold = example["positive_ctxs"][0]
nidx = 0
if "negative_ctxs" in example:
negatives = [example["negative_ctxs"][nidx]]
else:
negatives = []
gold = gold["title"] + " " + gold["text"] if "title" in gold and len(gold["title"]) > 0 else gold["text"]
negatives = [
n["title"] + " " + n["text"] if ("title" in n and len(n["title"]) > 0) else n["text"] for n in negatives
]
example = {
"query": self.normalize_fn(question),
"gold": self.normalize_fn(gold),
"negatives": [self.normalize_fn(n) for n in negatives],
}
return example
def _load_data(self, datapaths, global_rank, world_size, maxload):
counter = 0
self.data = []
for path in datapaths:
path = str(path)
if path.endswith(".jsonl"):
file_data, counter = self._load_data_jsonl(path, global_rank, world_size, counter, maxload)
elif path.endswith(".json"):
file_data, counter = self._load_data_json(path, global_rank, world_size, counter, maxload)
self.data.extend(file_data)
if maxload is not None and maxload > 0 and counter >= maxload:
break
def _load_data_json(self, path, global_rank, world_size, counter, maxload=None):
examples = []
with open(path, "r") as fin:
data = json.load(fin)
for example in data:
counter += 1
if global_rank > -1 and not counter % world_size == global_rank:
continue
examples.append(example)
if maxload is not None and maxload > 0 and counter == maxload:
break
return examples, counter
def _load_data_jsonl(self, path, global_rank, world_size, counter, maxload=None):
examples = []
with open(path, "r") as fin:
for line in fin:
counter += 1
if global_rank > -1 and not counter % world_size == global_rank:
continue
example = json.loads(line)
examples.append(example)
if maxload is not None and maxload > 0 and counter == maxload:
break
return examples, counter
def sample_n_hard_negatives(self, ex):
if "hard_negative_ctxs" in ex:
n_hard_negatives = sum([random.random() < self.negative_hard_ratio for _ in range(self.negative_ctxs)])
n_hard_negatives = min(n_hard_negatives, len(ex["hard_negative_ctxs"][self.negative_hard_min_idx :]))
else:
n_hard_negatives = 0
n_random_negatives = self.negative_ctxs - n_hard_negatives
if "negative_ctxs" in ex:
n_random_negatives = min(n_random_negatives, len(ex["negative_ctxs"]))
else:
n_random_negatives = 0
return n_hard_negatives, n_random_negatives
class Collator(object):
def __init__(self, tokenizer, passage_maxlength=200):
self.tokenizer = tokenizer
self.passage_maxlength = passage_maxlength
def __call__(self, batch):
queries = [ex["query"] for ex in batch]
golds = [ex["gold"] for ex in batch]
negs = [item for ex in batch for item in ex["negatives"]]
allpassages = golds + negs
qout = self.tokenizer.batch_encode_plus(
queries,
max_length=self.passage_maxlength,
truncation=True,
padding=True,
add_special_tokens=True,
return_tensors="pt",
)
kout = self.tokenizer.batch_encode_plus(
allpassages,
max_length=self.passage_maxlength,
truncation=True,
padding=True,
add_special_tokens=True,
return_tensors="pt",
)
q_tokens, q_mask = qout["input_ids"], qout["attention_mask"].bool()
k_tokens, k_mask = kout["input_ids"], kout["attention_mask"].bool()
g_tokens, g_mask = k_tokens[: len(golds)], k_mask[: len(golds)]
n_tokens, n_mask = k_tokens[len(golds) :], k_mask[len(golds) :]
batch = {
"q_tokens": q_tokens,
"q_mask": q_mask,
"k_tokens": k_tokens,
"k_mask": k_mask,
"g_tokens": g_tokens,
"g_mask": g_mask,
"n_tokens": n_tokens,
"n_mask": n_mask,
}
return batch
|
contriever-main
|
src/finetuning_data.py
|
contriever-main
|
src/__init__.py
|
|
"""
adapted from chemdataextractor.text.normalize
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tools for normalizing text.
https://github.com/mcs07/ChemDataExtractor
:copyright: Copyright 2016 by Matt Swain.
:license: MIT
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#: Control characters.
CONTROLS = {
'\u0001', '\u0002', '\u0003', '\u0004', '\u0005', '\u0006', '\u0007', '\u0008', '\u000e', '\u000f', '\u0011',
'\u0012', '\u0013', '\u0014', '\u0015', '\u0016', '\u0017', '\u0018', '\u0019', '\u001a', '\u001b',
}
# There are further control characters, but they are instead replaced with a space by unicode normalization
# '\u0009', '\u000a', '\u000b', '\u000c', '\u000d', '\u001c', '\u001d', '\u001e', '\u001f'
#: Hyphen and dash characters.
HYPHENS = {
'-', # \u002d Hyphen-minus
'‐', # \u2010 Hyphen
'‑', # \u2011 Non-breaking hyphen
'⁃', # \u2043 Hyphen bullet
'‒', # \u2012 figure dash
'–', # \u2013 en dash
'—', # \u2014 em dash
'―', # \u2015 horizontal bar
}
#: Minus characters.
MINUSES = {
'-', # \u002d Hyphen-minus
'−', # \u2212 Minus
'-', # \uff0d Full-width Hyphen-minus
'⁻', # \u207b Superscript minus
}
#: Plus characters.
PLUSES = {
'+', # \u002b Plus
'+', # \uff0b Full-width Plus
'⁺', # \u207a Superscript plus
}
#: Slash characters.
SLASHES = {
'/', # \u002f Solidus
'⁄', # \u2044 Fraction slash
'∕', # \u2215 Division slash
}
#: Tilde characters.
TILDES = {
'~', # \u007e Tilde
'˜', # \u02dc Small tilde
'⁓', # \u2053 Swung dash
'∼', # \u223c Tilde operator #in mbert vocab
'∽', # \u223d Reversed tilde
'∿', # \u223f Sine wave
'〜', # \u301c Wave dash #in mbert vocab
'~', # \uff5e Full-width tilde #in mbert vocab
}
#: Apostrophe characters.
APOSTROPHES = {
"'", # \u0027
'’', # \u2019
'՚', # \u055a
'Ꞌ', # \ua78b
'ꞌ', # \ua78c
''', # \uff07
}
#: Single quote characters.
SINGLE_QUOTES = {
"'", # \u0027
'‘', # \u2018
'’', # \u2019
'‚', # \u201a
'‛', # \u201b
}
#: Double quote characters.
DOUBLE_QUOTES = {
'"', # \u0022
'“', # \u201c
'”', # \u201d
'„', # \u201e
'‟', # \u201f
}
#: Accent characters.
ACCENTS = {
'`', # \u0060
'´', # \u00b4
}
#: Prime characters.
PRIMES = {
'′', # \u2032
'″', # \u2033
'‴', # \u2034
'‵', # \u2035
'‶', # \u2036
'‷', # \u2037
'⁗', # \u2057
}
#: Quote characters, including apostrophes, single quotes, double quotes, accents and primes.
QUOTES = APOSTROPHES | SINGLE_QUOTES | DOUBLE_QUOTES | ACCENTS | PRIMES
def normalize(text):
for control in CONTROLS:
text = text.replace(control, '')
text = text.replace('\u000b', ' ').replace('\u000c', ' ').replace(u'\u0085', ' ')
for hyphen in HYPHENS | MINUSES:
text = text.replace(hyphen, '-')
text = text.replace('\u00ad', '')
for double_quote in DOUBLE_QUOTES:
text = text.replace(double_quote, '"') # \u0022
for single_quote in (SINGLE_QUOTES | APOSTROPHES | ACCENTS):
text = text.replace(single_quote, "'") # \u0027
text = text.replace('′', "'") # \u2032 prime
text = text.replace('‵', "'") # \u2035 reversed prime
text = text.replace('″', "''") # \u2033 double prime
text = text.replace('‶', "''") # \u2036 reversed double prime
text = text.replace('‴', "'''") # \u2034 triple prime
text = text.replace('‷', "'''") # \u2037 reversed triple prime
text = text.replace('⁗', "''''") # \u2057 quadruple prime
text = text.replace('…', '...').replace(' . . . ', ' ... ') # \u2026
for slash in SLASHES:
text = text.replace(slash, '/')
#for tilde in TILDES:
# text = text.replace(tilde, '~')
return text
|
contriever-main
|
src/normalize_text.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import sys
import logging
import torch
import errno
from typing import Union, Tuple, List, Dict
from collections import defaultdict
from src import dist_utils
Number = Union[float, int]
logger = logging.getLogger(__name__)
def init_logger(args, stdout_only=False):
if torch.distributed.is_initialized():
torch.distributed.barrier()
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
if not stdout_only:
file_handler = logging.FileHandler(filename=os.path.join(args.output_dir, "run.log"))
handlers.append(file_handler)
logging.basicConfig(
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if dist_utils.is_main() else logging.WARN,
format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s",
handlers=handlers,
)
return logger
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def save(model, optimizer, scheduler, step, opt, dir_path, name):
model_to_save = model.module if hasattr(model, "module") else model
path = os.path.join(dir_path, "checkpoint")
epoch_path = os.path.join(path, name) # "step-%s" % step)
os.makedirs(epoch_path, exist_ok=True)
cp = os.path.join(path, "latest")
fp = os.path.join(epoch_path, "checkpoint.pth")
checkpoint = {
"step": step,
"model": model_to_save.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"opt": opt,
}
torch.save(checkpoint, fp)
symlink_force(epoch_path, cp)
if not name == "lastlog":
logger.info(f"Saving model to {epoch_path}")
def load(model_class, dir_path, opt, reset_params=False):
epoch_path = os.path.realpath(dir_path)
checkpoint_path = os.path.join(epoch_path, "checkpoint.pth")
logger.info(f"loading checkpoint {checkpoint_path}")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
opt_checkpoint = checkpoint["opt"]
state_dict = checkpoint["model"]
model = model_class(opt_checkpoint)
model.load_state_dict(state_dict, strict=True)
model = model.cuda()
step = checkpoint["step"]
if not reset_params:
optimizer, scheduler = set_optim(opt_checkpoint, model)
scheduler.load_state_dict(checkpoint["scheduler"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
optimizer, scheduler = set_optim(opt, model)
return model, optimizer, scheduler, opt_checkpoint, step
############ OPTIM
class WarmupLinearScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(WarmupLinearScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return (1 - self.ratio) * step / float(max(1, self.warmup))
return max(
0.0,
1.0 + (self.ratio - 1) * (step - self.warmup) / float(max(1.0, self.total - self.warmup)),
)
class CosineScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio=0.1, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(CosineScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return float(step) / self.warmup
s = float(step - self.warmup) / (self.total - self.warmup)
return self.ratio + (1.0 - self.ratio) * math.cos(0.5 * math.pi * s)
def set_optim(opt, model):
if opt.optim == "adamw":
optimizer = torch.optim.AdamW(
model.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), eps=opt.eps, weight_decay=opt.weight_decay
)
else:
raise NotImplementedError("optimizer class not implemented")
scheduler_args = {
"warmup": opt.warmup_steps,
"total": opt.total_steps,
"ratio": opt.lr_min_ratio,
}
if opt.scheduler == "linear":
scheduler_class = WarmupLinearScheduler
elif opt.scheduler == "cosine":
scheduler_class = CosineScheduler
else:
raise ValueError
scheduler = scheduler_class(optimizer, **scheduler_args)
return optimizer, scheduler
def get_parameters(net, verbose=False):
num_params = 0
for param in net.parameters():
num_params += param.numel()
message = "[Network] Total number of parameters : %.6f M" % (num_params / 1e6)
return message
class WeightedAvgStats:
"""provides an average over a bunch of stats"""
def __init__(self):
self.raw_stats: Dict[str, float] = defaultdict(float)
self.total_weights: Dict[str, float] = defaultdict(float)
def update(self, vals: Dict[str, Tuple[Number, Number]]) -> None:
for key, (value, weight) in vals.items():
self.raw_stats[key] += value * weight
self.total_weights[key] += weight
@property
def stats(self) -> Dict[str, float]:
return {x: self.raw_stats[x] / self.total_weights[x] for x in self.raw_stats.keys()}
@property
def tuple_stats(self) -> Dict[str, Tuple[float, float]]:
return {x: (self.raw_stats[x] / self.total_weights[x], self.total_weights[x]) for x in self.raw_stats.keys()}
def reset(self) -> None:
self.raw_stats = defaultdict(float)
self.total_weights = defaultdict(float)
@property
def average_stats(self) -> Dict[str, float]:
keys = sorted(self.raw_stats.keys())
if torch.distributed.is_initialized():
torch.distributed.broadcast_object_list(keys, src=0)
global_dict = {}
for k in keys:
if not k in self.total_weights:
v = 0.0
else:
v = self.raw_stats[k] / self.total_weights[k]
v, _ = dist_utils.weighted_average(v, self.total_weights[k])
global_dict[k] = v
return global_dict
def load_hf(object_class, model_name):
try:
obj = object_class.from_pretrained(model_name, local_files_only=True)
except:
obj = object_class.from_pretrained(model_name, local_files_only=False)
return obj
def init_tb_logger(output_dir):
try:
from torch.utils import tensorboard
if dist_utils.is_main():
tb_logger = tensorboard.SummaryWriter(output_dir)
else:
tb_logger = None
except:
logger.warning("Tensorboard is not available.")
tb_logger = None
return tb_logger
|
contriever-main
|
src/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import logging
import copy
import transformers
from src import contriever, dist_utils, utils
logger = logging.getLogger(__name__)
class MoCo(nn.Module):
def __init__(self, opt):
super(MoCo, self).__init__()
self.queue_size = opt.queue_size
self.momentum = opt.momentum
self.temperature = opt.temperature
self.label_smoothing = opt.label_smoothing
self.norm_doc = opt.norm_doc
self.norm_query = opt.norm_query
self.moco_train_mode_encoder_k = opt.moco_train_mode_encoder_k # apply the encoder on keys in train mode
retriever, tokenizer = self._load_retriever(
opt.retriever_model_id, pooling=opt.pooling, random_init=opt.random_init
)
self.tokenizer = tokenizer
self.encoder_q = retriever
self.encoder_k = copy.deepcopy(retriever)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
# create the queue
self.register_buffer("queue", torch.randn(opt.projection_size, self.queue_size))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def _load_retriever(self, model_id, pooling, random_init):
cfg = utils.load_hf(transformers.AutoConfig, model_id)
tokenizer = utils.load_hf(transformers.AutoTokenizer, model_id)
if "xlm" in model_id:
model_class = contriever.XLMRetriever
else:
model_class = contriever.Contriever
if random_init:
retriever = model_class(cfg)
else:
retriever = utils.load_hf(model_class, model_id)
if "bert-" in model_id:
if tokenizer.bos_token_id is None:
tokenizer.bos_token = "[CLS]"
if tokenizer.eos_token_id is None:
tokenizer.eos_token = "[SEP]"
retriever.config.pooling = pooling
return retriever, tokenizer
def get_encoder(self, return_encoder_k=False):
if return_encoder_k:
return self.encoder_k
else:
return self.encoder_q
def _momentum_update_key_encoder(self):
"""
Update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + param_q.data * (1.0 - self.momentum)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = dist_utils.gather_nograd(keys.contiguous())
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0, f"{batch_size}, {self.queue_size}" # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr : ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr
def _compute_logits(self, q, k):
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
logits = torch.cat([l_pos, l_neg], dim=1)
return logits
def forward(self, q_tokens, q_mask, k_tokens, k_mask, stats_prefix="", iter_stats={}, **kwargs):
bsz = q_tokens.size(0)
q = self.encoder_q(input_ids=q_tokens, attention_mask=q_mask, normalize=self.norm_query)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
if not self.encoder_k.training and not self.moco_train_mode_encoder_k:
self.encoder_k.eval()
k = self.encoder_k(input_ids=k_tokens, attention_mask=k_mask, normalize=self.norm_doc)
logits = self._compute_logits(q, k) / self.temperature
# labels: positive key indicators
labels = torch.zeros(bsz, dtype=torch.long).cuda()
loss = torch.nn.functional.cross_entropy(logits, labels, label_smoothing=self.label_smoothing)
self._dequeue_and_enqueue(k)
# log stats
if len(stats_prefix) > 0:
stats_prefix = stats_prefix + "/"
iter_stats[f"{stats_prefix}loss"] = (loss.item(), bsz)
predicted_idx = torch.argmax(logits, dim=-1)
accuracy = 100 * (predicted_idx == labels).float().mean()
stdq = torch.std(q, dim=0).mean().item()
stdk = torch.std(k, dim=0).mean().item()
iter_stats[f"{stats_prefix}accuracy"] = (accuracy, bsz)
iter_stats[f"{stats_prefix}stdq"] = (stdq, bsz)
iter_stats[f"{stats_prefix}stdk"] = (stdk, bsz)
return loss, iter_stats
|
contriever-main
|
src/moco.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import glob
import torch
import random
import json
import csv
import numpy as np
import numpy.random
import logging
from collections import defaultdict
import torch.distributed as dist
from src import dist_utils
logger = logging.getLogger(__name__)
def load_data(opt, tokenizer):
datasets = {}
for path in opt.train_data:
data = load_dataset(path, opt.loading_mode)
if data is not None:
datasets[path] = Dataset(data, opt.chunk_length, tokenizer, opt)
dataset = MultiDataset(datasets)
dataset.set_prob(coeff=opt.sampling_coefficient)
return dataset
def load_dataset(data_path, loading_mode):
files = glob.glob(os.path.join(data_path, "*.p*"))
files.sort()
tensors = []
if loading_mode == "split":
files_split = list(np.array_split(files, dist_utils.get_world_size()))[dist_utils.get_rank()]
for filepath in files_split:
try:
tensors.append(torch.load(filepath, map_location="cpu"))
except:
logger.warning(f"Unable to load file {filepath}")
elif loading_mode == "full":
for fin in files:
tensors.append(torch.load(fin, map_location="cpu"))
elif loading_mode == "single":
tensors.append(torch.load(files[0], map_location="cpu"))
if len(tensors) == 0:
return None
tensor = torch.cat(tensors)
return tensor
class MultiDataset(torch.utils.data.Dataset):
def __init__(self, datasets):
self.datasets = datasets
self.prob = [1 / len(self.datasets) for _ in self.datasets]
self.dataset_ids = list(self.datasets.keys())
def __len__(self):
return sum([len(dataset) for dataset in self.datasets.values()])
def __getitem__(self, index):
dataset_idx = numpy.random.choice(range(len(self.prob)), 1, p=self.prob)[0]
did = self.dataset_ids[dataset_idx]
index = random.randint(0, len(self.datasets[did]) - 1)
sample = self.datasets[did][index]
sample["dataset_id"] = did
return sample
def generate_offset(self):
for dataset in self.datasets.values():
dataset.generate_offset()
def set_prob(self, coeff=0.0):
prob = np.array([float(len(dataset)) for _, dataset in self.datasets.items()])
prob /= prob.sum()
prob = np.array([p**coeff for p in prob])
prob /= prob.sum()
self.prob = prob
class Dataset(torch.utils.data.Dataset):
"""Monolingual dataset based on a list of paths"""
def __init__(self, data, chunk_length, tokenizer, opt):
self.data = data
self.chunk_length = chunk_length
self.tokenizer = tokenizer
self.opt = opt
self.generate_offset()
def __len__(self):
return (self.data.size(0) - self.offset) // self.chunk_length
def __getitem__(self, index):
start_idx = self.offset + index * self.chunk_length
end_idx = start_idx + self.chunk_length
tokens = self.data[start_idx:end_idx]
q_tokens = randomcrop(tokens, self.opt.ratio_min, self.opt.ratio_max)
k_tokens = randomcrop(tokens, self.opt.ratio_min, self.opt.ratio_max)
q_tokens = apply_augmentation(q_tokens, self.opt)
q_tokens = add_bos_eos(q_tokens, self.tokenizer.bos_token_id, self.tokenizer.eos_token_id)
k_tokens = apply_augmentation(k_tokens, self.opt)
k_tokens = add_bos_eos(k_tokens, self.tokenizer.bos_token_id, self.tokenizer.eos_token_id)
return {"q_tokens": q_tokens, "k_tokens": k_tokens}
def generate_offset(self):
self.offset = random.randint(0, self.chunk_length - 1)
class Collator(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, batch_examples):
batch = defaultdict(list)
for example in batch_examples:
for k, v in example.items():
batch[k].append(v)
q_tokens, q_mask = build_mask(batch["q_tokens"])
k_tokens, k_mask = build_mask(batch["k_tokens"])
batch["q_tokens"] = q_tokens
batch["q_mask"] = q_mask
batch["k_tokens"] = k_tokens
batch["k_mask"] = k_mask
return batch
def randomcrop(x, ratio_min, ratio_max):
ratio = random.uniform(ratio_min, ratio_max)
length = int(len(x) * ratio)
start = random.randint(0, len(x) - length)
end = start + length
crop = x[start:end].clone()
return crop
def build_mask(tensors):
shapes = [x.shape for x in tensors]
maxlength = max([len(x) for x in tensors])
returnmasks = []
ids = []
for k, x in enumerate(tensors):
returnmasks.append(torch.tensor([1] * len(x) + [0] * (maxlength - len(x))))
ids.append(torch.cat((x, torch.tensor([0] * (maxlength - len(x))))))
ids = torch.stack(ids, dim=0).long()
returnmasks = torch.stack(returnmasks, dim=0).bool()
return ids, returnmasks
def add_token(x, token):
x = torch.cat((torch.tensor([token]), x))
return x
def deleteword(x, p=0.1):
mask = np.random.rand(len(x))
x = [e for e, m in zip(x, mask) if m > p]
return x
def replaceword(x, min_random, max_random, p=0.1):
mask = np.random.rand(len(x))
x = [e if m > p else random.randint(min_random, max_random) for e, m in zip(x, mask)]
return x
def maskword(x, mask_id, p=0.1):
mask = np.random.rand(len(x))
x = [e if m > p else mask_id for e, m in zip(x, mask)]
return x
def shuffleword(x, p=0.1):
count = (np.random.rand(len(x)) < p).sum()
"""Shuffles any n number of values in a list"""
indices_to_shuffle = random.sample(range(len(x)), k=count)
to_shuffle = [x[i] for i in indices_to_shuffle]
random.shuffle(to_shuffle)
for index, value in enumerate(to_shuffle):
old_index = indices_to_shuffle[index]
x[old_index] = value
return x
def apply_augmentation(x, opt):
if opt.augmentation == "mask":
return torch.tensor(maskword(x, mask_id=opt.mask_id, p=opt.prob_augmentation))
elif opt.augmentation == "replace":
return torch.tensor(
replaceword(x, min_random=opt.start_id, max_random=opt.vocab_size - 1, p=opt.prob_augmentation)
)
elif opt.augmentation == "delete":
return torch.tensor(deleteword(x, p=opt.prob_augmentation))
elif opt.augmentation == "shuffle":
return torch.tensor(shuffleword(x, p=opt.prob_augmentation))
else:
if not isinstance(x, torch.Tensor):
x = torch.Tensor(x)
return x
def add_bos_eos(x, bos_token_id, eos_token_id):
if not isinstance(x, torch.Tensor):
x = torch.Tensor(x)
if bos_token_id is None and eos_token_id is not None:
x = torch.cat([x.clone().detach(), torch.tensor([eos_token_id])])
elif bos_token_id is not None and eos_token_id is None:
x = torch.cat([torch.tensor([bos_token_id]), x.clone().detach()])
elif bos_token_id is None and eos_token_id is None:
pass
else:
x = torch.cat([torch.tensor([bos_token_id]), x.clone().detach(), torch.tensor([eos_token_id])])
return x
# Used for passage retrieval
def load_passages(path):
if not os.path.exists(path):
logger.info(f"{path} does not exist")
return
logger.info(f"Loading passages from: {path}")
passages = []
with open(path) as fin:
if path.endswith(".jsonl"):
for k, line in enumerate(fin):
ex = json.loads(line)
passages.append(ex)
else:
reader = csv.reader(fin, delimiter="\t")
for k, row in enumerate(reader):
if not row[0] == "id":
ex = {"id": row[0], "title": row[2], "text": row[1]}
passages.append(ex)
return passages
|
contriever-main
|
src/data.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.distributed as dist
class Gather(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.tensor):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def gather(x: torch.tensor):
if not dist.is_initialized():
return x
x_gather = Gather.apply(x)
x_gather = torch.cat(x_gather, dim=0)
return x_gather
@torch.no_grad()
def gather_nograd(x: torch.tensor):
if not dist.is_initialized():
return x
x_gather = [torch.ones_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(x_gather, x, async_op=False)
x_gather = torch.cat(x_gather, dim=0)
return x_gather
@torch.no_grad()
def varsize_gather_nograd(x: torch.Tensor):
"""gather tensors of different sizes along the first dimension"""
if not dist.is_initialized():
return x
# determine max size
size = torch.tensor([x.shape[0]], device=x.device, dtype=torch.int)
allsizes = [torch.zeros_like(size) for _ in range(dist.get_world_size())]
dist.all_gather(allsizes, size)
max_size = max([size.cpu().max() for size in allsizes])
padded = torch.empty(max_size, *x.shape[1:], dtype=x.dtype, device=x.device)
padded[: x.shape[0]] = x
output = [torch.zeros_like(padded) for _ in range(dist.get_world_size())]
dist.all_gather(output, padded)
output = [tensor[: allsizes[k]] for k, tensor in enumerate(output)]
output = torch.cat(output, dim=0)
return output
@torch.no_grad()
def get_varsize(x: torch.Tensor):
"""gather tensors of different sizes along the first dimension"""
if not dist.is_initialized():
return [x.shape[0]]
# determine max size
size = torch.tensor([x.shape[0]], device=x.device, dtype=torch.int)
allsizes = [torch.zeros_like(size) for _ in range(dist.get_world_size())]
dist.all_gather(allsizes, size)
allsizes = torch.cat(allsizes)
return allsizes
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main():
return get_rank() == 0
def get_world_size():
if not dist.is_initialized():
return 1
else:
return dist.get_world_size()
def barrier():
if dist.is_initialized():
dist.barrier()
def average_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
if is_main():
x = x / dist.get_world_size()
return x
def sum_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
return x
def weighted_average(x, count):
if not dist.is_initialized():
if isinstance(x, torch.Tensor):
x = x.item()
return x, count
t_loss = torch.tensor([x * count]).cuda()
t_total = torch.tensor([count]).cuda()
t_loss = sum_main(t_loss)
t_total = sum_main(t_total)
return (t_loss / t_total).item(), t_total.item()
|
contriever-main
|
src/dist_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from collections import defaultdict
from typing import List, Dict
import numpy as np
import torch
import torch.distributed as dist
import beir.util
from beir.datasets.data_loader import GenericDataLoader
from beir.retrieval.evaluation import EvaluateRetrieval
from beir.retrieval.search.dense import DenseRetrievalExactSearch
from beir.reranking.models import CrossEncoder
from beir.reranking import Rerank
import src.dist_utils as dist_utils
from src import normalize_text
class DenseEncoderModel:
def __init__(
self,
query_encoder,
doc_encoder=None,
tokenizer=None,
max_length=512,
add_special_tokens=True,
norm_query=False,
norm_doc=False,
lower_case=False,
normalize_text=False,
**kwargs,
):
self.query_encoder = query_encoder
self.doc_encoder = doc_encoder
self.tokenizer = tokenizer
self.max_length = max_length
self.add_special_tokens = add_special_tokens
self.norm_query = norm_query
self.norm_doc = norm_doc
self.lower_case = lower_case
self.normalize_text = normalize_text
def encode_queries(self, queries: List[str], batch_size: int, **kwargs) -> np.ndarray:
if dist.is_initialized():
idx = np.array_split(range(len(queries)), dist.get_world_size())[dist.get_rank()]
else:
idx = range(len(queries))
queries = [queries[i] for i in idx]
if self.normalize_text:
queries = [normalize_text.normalize(q) for q in queries]
if self.lower_case:
queries = [q.lower() for q in queries]
allemb = []
nbatch = (len(queries) - 1) // batch_size + 1
with torch.no_grad():
for k in range(nbatch):
start_idx = k * batch_size
end_idx = min((k + 1) * batch_size, len(queries))
qencode = self.tokenizer.batch_encode_plus(
queries[start_idx:end_idx],
max_length=self.max_length,
padding=True,
truncation=True,
add_special_tokens=self.add_special_tokens,
return_tensors="pt",
)
qencode = {key: value.cuda() for key, value in qencode.items()}
emb = self.query_encoder(**qencode, normalize=self.norm_query)
allemb.append(emb.cpu())
allemb = torch.cat(allemb, dim=0)
allemb = allemb.cuda()
if dist.is_initialized():
allemb = dist_utils.varsize_gather_nograd(allemb)
allemb = allemb.cpu().numpy()
return allemb
def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int, **kwargs):
if dist.is_initialized():
idx = np.array_split(range(len(corpus)), dist.get_world_size())[dist.get_rank()]
else:
idx = range(len(corpus))
corpus = [corpus[i] for i in idx]
corpus = [c["title"] + " " + c["text"] if len(c["title"]) > 0 else c["text"] for c in corpus]
if self.normalize_text:
corpus = [normalize_text.normalize(c) for c in corpus]
if self.lower_case:
corpus = [c.lower() for c in corpus]
allemb = []
nbatch = (len(corpus) - 1) // batch_size + 1
with torch.no_grad():
for k in range(nbatch):
start_idx = k * batch_size
end_idx = min((k + 1) * batch_size, len(corpus))
cencode = self.tokenizer.batch_encode_plus(
corpus[start_idx:end_idx],
max_length=self.max_length,
padding=True,
truncation=True,
add_special_tokens=self.add_special_tokens,
return_tensors="pt",
)
cencode = {key: value.cuda() for key, value in cencode.items()}
emb = self.doc_encoder(**cencode, normalize=self.norm_doc)
allemb.append(emb.cpu())
allemb = torch.cat(allemb, dim=0)
allemb = allemb.cuda()
if dist.is_initialized():
allemb = dist_utils.varsize_gather_nograd(allemb)
allemb = allemb.cpu().numpy()
return allemb
def evaluate_model(
query_encoder,
doc_encoder,
tokenizer,
dataset,
batch_size=128,
add_special_tokens=True,
norm_query=False,
norm_doc=False,
is_main=True,
split="test",
score_function="dot",
beir_dir="BEIR/datasets",
save_results_path=None,
lower_case=False,
normalize_text=False,
):
metrics = defaultdict(list) # store final results
if hasattr(query_encoder, "module"):
query_encoder = query_encoder.module
query_encoder.eval()
if doc_encoder is not None:
if hasattr(doc_encoder, "module"):
doc_encoder = doc_encoder.module
doc_encoder.eval()
else:
doc_encoder = query_encoder
dmodel = DenseRetrievalExactSearch(
DenseEncoderModel(
query_encoder=query_encoder,
doc_encoder=doc_encoder,
tokenizer=tokenizer,
add_special_tokens=add_special_tokens,
norm_query=norm_query,
norm_doc=norm_doc,
lower_case=lower_case,
normalize_text=normalize_text,
),
batch_size=batch_size,
)
retriever = EvaluateRetrieval(dmodel, score_function=score_function)
data_path = os.path.join(beir_dir, dataset)
if not os.path.isdir(data_path) and is_main:
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
data_path = beir.util.download_and_unzip(url, beir_dir)
dist_utils.barrier()
if not dataset == "cqadupstack":
corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)
results = retriever.retrieve(corpus, queries)
if is_main:
ndcg, _map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)
for metric in (ndcg, _map, recall, precision, "mrr", "recall_cap", "hole"):
if isinstance(metric, str):
metric = retriever.evaluate_custom(qrels, results, retriever.k_values, metric=metric)
for key, value in metric.items():
metrics[key].append(value)
if save_results_path is not None:
torch.save(results, f"{save_results_path}")
elif dataset == "cqadupstack": # compute macroaverage over datasets
paths = glob.glob(data_path)
for path in paths:
corpus, queries, qrels = GenericDataLoader(data_folder=data_folder).load(split=split)
results = retriever.retrieve(corpus, queries)
if is_main:
ndcg, _map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)
for metric in (ndcg, _map, recall, precision, "mrr", "recall_cap", "hole"):
if isinstance(metric, str):
metric = retriever.evaluate_custom(qrels, results, retriever.k_values, metric=metric)
for key, value in metric.items():
metrics[key].append(value)
for key, value in metrics.items():
assert (
len(value) == 12
), f"cqadupstack includes 12 datasets, only {len(value)} values were compute for the {key} metric"
metrics = {key: 100 * np.mean(value) for key, value in metrics.items()}
return metrics
|
contriever-main
|
src/beir_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import numpy as np
from utils.meshutils import read_mesh, process_head_model
from utils.strandsutils import smooth_strands, downsample_strands, duplicate_strands, merge_strands
from datautils.datautils import load_bin_strands, save_bin_strands
from modules.neural_strands import NeuralStrands
def neural_interp(conf):
output_folder = os.path.join(conf['output']['dir'], conf['output']['name']) # for synthesized data
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# prepressing
strands = load_bin_strands(conf['strands']['guide_strds'])
strands = smooth_strands(strands, lap_constraint=4.0, pos_constraint=2.0)
# strands = downsample_strands(strands)
# fit head model
head_mesh = read_mesh(conf['head']['head_path'])
head_texture = cv2.imread(conf['head']['head_scalp_tex'])
head_mesh, scalp_mesh, scalp_faces_idx = process_head_model(head_mesh, head_texture,
conf['head']['roots_path'],
np.array(conf['head']['target_face_base'], dtype=np.float32),
is_deformation=True)
head_write = head_mesh.export('%s/%sface_reg.ply'%(output_folder, conf['output']['name']))
neural_strands = NeuralStrands(is_resampled=False)
neural_strands.prep_strands_data(strands, head_mesh, scalp_mesh, scalp_faces_idx)
# interpolation
neural_strands.get_neural_representations(iter_opt=0)
# neural_strands.get_neural_representations(iter_opt=300, lr=1e-2) # new trained model fits very well, no need to fit again
denoised_strds_idxs = neural_strands.denoise_neural_texture(num_del_cls=0, do_denoise=False)
texel_roots_mask = cv2.imread(conf['head']['head_scalp_tex'], 2) / 255.
neural_strands.interpolation_knn(texel_roots_mask, interp_kernel_size=5, interp_neig_pts=3)
interp_strds = neural_strands.world_strands_from_texels(neural_strands.interp_neural_texture, neural_strands.interp_strds_idx_map)
# save results
save_bin_strands('%s/%s_interp.bin'%(output_folder, conf['output']['name']), interp_strds.detach().cpu().numpy().astype(np.float32))
merged_strands = merge_strands([neural_strands.original_strands, interp_strds.detach().cpu().numpy().astype(np.float32)])
merged_strands = downsample_strands(merged_strands) # TODO use neural spline with GPU to speed up.
merged_strands = duplicate_strands(merged_strands, ratio=4)
save_bin_strands('%s/%s_merged.bin'%(output_folder, conf['output']['name']), merged_strands)
print('Saving done!')
|
CT2Hair-main
|
CT2Hair/interp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from utils.pcutils import load_pc
from utils.strandsutils import strandspc2strands, smooth_strands
from datautils.datautils import load_bin_strands, save_bin_strandspc, save_bin_strands
from modules.strands_opt import StrandsOptimizerNeuralCubic
def strands_opt(conf):
input_strands = load_bin_strands(conf['strands']['interp_strds'])
print("Load strands finished!")
# target_pc = load_pc(conf['pc']['pc_path'], load_color=False, load_normal=False)
target_pc, target_pc_colors = load_pc(conf['pc']['pc_path'], load_color=True, load_normal=False)
print("Load point cloud finished!")
strands_opt = StrandsOptimizerNeuralCubic(input_strands, target_pc, target_pc_colors[:, 0], num_strd_pts=64)
ori_splined_pts, opted_splined_pts, strands_seps, opted_strands_pc, input_num_strds_pts = strands_opt.optimization()
output_folder = os.path.join(conf['output']['dir'], conf['output']['name'])
if not os.path.exists(output_folder):
os.makedirs(output_folder)
opted_strands = smooth_strands(strandspc2strands(opted_splined_pts, sep=strands_seps))
save_bin_strands('%s/%s_opted.bin'%(output_folder, conf['output']['name']), opted_strands)
|
CT2Hair-main
|
CT2Hair/optim.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import torch.utils.data as th_data
from utils.strandsutils import spline_strand, pad_strand
class TbnStrandsBinDataset(th_data.Dataset):
def __init__(self, tbn_strands, is_resampled=True, num_strds_points=100):
self.num_strands = len(tbn_strands)
self.tbn_strands = tbn_strands
self.batch_size = 300
self.num_workers = 12
self.num_strds_points = num_strds_points
self.is_resampled = is_resampled
def __len__(self):
return self.num_strands
def __getitem__(self, idx):
strand = self.tbn_strands[idx].astype(np.float32)
out_dict = {}
if not self.is_resampled:
if strand.shape[0] > self.num_strds_points:
strand = spline_strand(strand, num_strand_points=self.num_strds_points)
strand, time = pad_strand(strand, num_strand_points=self.num_strds_points)
out_dict['times'] = torch.tensor(time).float()
assert strand.shape[0] == self.num_strds_points, 'Need resample strands to a fixed number.'
# Scale unit from mm to m
strand = strand / 1000.
out_dict['points'] = torch.tensor(strand).float()
return out_dict
def get_dataloader(self):
return th_data.DataLoader(dataset=self, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, drop_last=False)
|
CT2Hair-main
|
CT2Hair/datautils/dataloaders.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import pathlib
import struct
import numpy as np
from utils.pcutils import pc_voxelization, save_pc
def load_raw(path, raw_shape=[2048, 2048, 2048], offset=0, drop_masks=[], crop=[[0, -1], [0, -1], [0, -1]], is_downsample=True, downsample_ratio=2):
start_time = time.time()
if pathlib.Path(path).suffix == '.npy':
raw_data = np.load(path)
else:
raw_data = np.fromfile(path, dtype=np.ushort)
raw_data = raw_data[offset:]
raw_data = raw_data.reshape(raw_shape)
for i_del in range(len(drop_masks)):
drop_mask = drop_masks[i_del]
raw_data[drop_mask[0][0]:drop_mask[0][1], drop_mask[1][0]:drop_mask[1][1], drop_mask[2][0]:drop_mask[2][1]] = 0
raw_data = raw_data[crop[0][0]:crop[0][1], crop[1][0]:crop[1][1], crop[2][0]:crop[2][1]]
current_shape = np.array(raw_data.shape)
if is_downsample:
downsample_shape = current_shape // downsample_ratio
x_even = (np.arange(downsample_shape[0]) * downsample_ratio).astype(np.int16)
y_even = (np.arange(downsample_shape[1]) * downsample_ratio).astype(np.int16)
z_even = (np.arange(downsample_shape[2]) * downsample_ratio).astype(np.int16)
raw_data = raw_data[x_even]
raw_data = raw_data[:, y_even]
raw_data = raw_data[:, :, z_even]
path_ds = path.replace(pathlib.Path(path).suffix, '_ds.npy')
np.save(path_ds, raw_data)
print('Finish load the volume, used %fs. Original and final shapes are: '%(time.time() - start_time), current_shape, np.array(raw_data.shape))
return raw_data
def crop_hair(raw_data, hair_density_range):
start_time = time.time()
hair_mask = (raw_data > hair_density_range[0]) & (raw_data < hair_density_range[1])
cropped_hair = raw_data * hair_mask
print('Finish crop hair, used %fs.'%(time.time() - start_time))
return cropped_hair, hair_mask
def crop_scalp(raw_data, scalp_range):
start_time = time.time()
scalp_mask = (raw_data > scalp_range[0]) & (raw_data < scalp_range[1])
cropped_hair = raw_data * scalp_mask
print('Finish crop scalp, used %fs.'%(time.time() - start_time))
return cropped_hair, scalp_mask
def get_hair_mask(raw_data, hair_density_range):
hair_mask = (raw_data > hair_density_range[0]) & (raw_data < hair_density_range[1])
return hair_mask
def expand_vidx(vidx_x, vidx_y, vidx_z, scale_rate=3):
size = int(2 ** scale_rate)
o_x, o_y, o_z = np.meshgrid(np.linspace(0, size - 1, size),
np.linspace(0, size - 1, size),
np.linspace(0, size - 1, size))
vidx_x = vidx_x[:, None].repeat(size ** 3, axis=-1).reshape(-1, size, size, size)
vidx_y = vidx_y[:, None].repeat(size ** 3, axis=-1).reshape(-1, size, size, size)
vidx_z = vidx_z[:, None].repeat(size ** 3, axis=-1).reshape(-1, size, size, size)
vidx_x = vidx_x * size + o_x[None, ...]
vidx_y = vidx_y * size + o_y[None, ...]
vidx_z = vidx_z * size + o_z[None, ...]
vidx_x = vidx_x.reshape(-1).astype(np.uint16)
vidx_y = vidx_y.reshape(-1).astype(np.uint16)
vidx_z = vidx_z.reshape(-1).astype(np.uint16)
return vidx_x, vidx_y, vidx_z
def del_wig_net(hair_data, scalp_mesh, voxel_size, scale_rate=3):
start_time = time.time()
print('Start delete wig net...')
hair_voxel_shape = hair_data.shape
scalp_voxel_shape = np.array(hair_voxel_shape) / (2 ** scale_rate)
scalp_points = scalp_mesh.sample(100000) * (1 / voxel_size[None, :]) / (2 ** scale_rate)
vidx_x, vidx_y, vidx_z = pc_voxelization(scalp_points, scalp_voxel_shape.astype(np.uint16))
vidx_x, vidx_y, vidx_z = expand_vidx(vidx_x, vidx_y, vidx_z, scale_rate)
hair_data[vidx_x, vidx_y, vidx_z] = 0
print('Delete wig net finished, used %fs.'%(time.time() - start_time))
return hair_data
def save_raw(data, path):
data.astype('int16').tofile(path)
def get_slide(data, id=0, axis='x', range=1):
# x switch z
if axis == 'z':
slide = data[(id - range + 1):(id + range), :, :]
slide = np.sum(slide, axis=0, keepdims=False)
return slide
elif axis == 'y':
slide = data[:, (id - range + 1):(id + range), :]
slide = np.sum(slide, axis=1, keepdims=False)
return slide
elif axis == 'x':
slide = data[:, :, (id - range + 1):(id + range)]
slide = np.sum(slide, axis=2, keepdims=False)
return slide
def load_bin_strands(bin_path):
file = open(bin_path, 'rb')
num_strands = struct.unpack('i', file.read(4))[0]
strands = []
max_strds_pts = 0
for i in range(num_strands):
num_verts = struct.unpack('i', file.read(4))[0]
strand = np.zeros((num_verts, 6), dtype=np.float32)
for j in range(num_verts):
x = struct.unpack('f', file.read(4))[0]
y = struct.unpack('f', file.read(4))[0]
z = struct.unpack('f', file.read(4))[0]
nx = struct.unpack('f', file.read(4))[0]
ny = struct.unpack('f', file.read(4))[0]
nz = struct.unpack('f', file.read(4))[0]
label = struct.unpack('f', file.read(4))[0]
strand[j][0] = x
strand[j][1] = y
strand[j][2] = z
strand[j][3] = nx
strand[j][4] = ny
strand[j][5] = nz
if np.isnan(np.sum(strand)): # FIXME why did I save some nan data
continue
if num_verts < 5:
continue
if max_strds_pts < num_verts:
max_strds_pts = num_verts
strands.append(strand)
strands = np.array(strands, dtype=object)
return strands
def load_usc_data_strands(data_path):
file = open(data_path, 'rb')
num_strands = struct.unpack('i', file.read(4))[0]
strands = []
for i in range(num_strands):
num_verts = struct.unpack('i', file.read(4))[0]
strand = np.zeros((num_verts, 3), dtype=np.float32)
for j in range(num_verts):
x = struct.unpack('f', file.read(4))[0]
y = struct.unpack('f', file.read(4))[0]
z = struct.unpack('f', file.read(4))[0]
strand[j][0] = x
strand[j][1] = y
strand[j][2] = z
if num_verts <= 1:
continue
if np.isnan(np.sum(strand)):
continue
strands.append(strand)
strands = np.array(strands, dtype=object)
return strands
def save_bin_strands(filepath, strands, tangents=None):
num_strands = strands.shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
if tangents is None:
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
else:
file.write(struct.pack('f', tangents[i_strand][j_point, 0]))
file.write(struct.pack('f', tangents[i_strand][j_point, 1]))
file.write(struct.pack('f', tangents[i_strand][j_point, 2]))
file.write(struct.pack('f', 0.0))
# save strands with colors (PCA features mapping)
def save_color_strands(filepath, strands, colors=None):
num_strands = strands.shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
if colors is not None:
file.write(struct.pack('f', colors[i_strand, 0]))
file.write(struct.pack('f', colors[i_strand, 1]))
file.write(struct.pack('f', colors[i_strand, 2]))
else:
assert strands[i_strand].shape[1] == 6, 'DataUtils::DataUtils No color of strands.'
file.write(struct.pack('f', strands[i_strand][j_point, 3]))
file.write(struct.pack('f', strands[i_strand][j_point, 4]))
file.write(struct.pack('f', strands[i_strand][j_point, 5]))
file.write(struct.pack('f', 0.0))
def save_bin_strandspc(filepath, pc, sep, tangents=None):
num_strands = len(sep)
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
point_count = 0
for i_strand in range(num_strands):
num_points = int(sep[i_strand])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', pc[point_count, 0]))
file.write(struct.pack('f', pc[point_count, 1]))
file.write(struct.pack('f', pc[point_count, 2]))
if tangents is None:
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
else:
file.write(struct.pack('f', tangents[point_count, 0]))
file.write(struct.pack('f', tangents[point_count, 1]))
file.write(struct.pack('f', tangents[point_count, 2]))
file.write(struct.pack('f', 0.0))
point_count += 1
def merge_save_bin_strands(filepath, strands_list, tangents_list=None):
num_all_strands = 0
num_groups = len(strands_list)
for i_group in range(num_groups):
num_all_strands += strands_list[i_group].shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_all_strands))
for i_group in range(num_groups):
strands = strands_list[i_group]
num_strands = strands.shape[0]
if tangents_list is None:
tangents = strands
else:
tangents = tangents_list[i_group]
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
file.write(struct.pack('f', tangents[i_strand][j_point, 0]))
file.write(struct.pack('f', tangents[i_strand][j_point, 1]))
file.write(struct.pack('f', tangents[i_strand][j_point, 2]))
file.write(struct.pack('f', 0.0))
def save_usc_strands(filepath, strands):
num_strands = strands.shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
|
CT2Hair-main
|
CT2Hair/datautils/datautils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import splines
import torch
import numpy as np
from tqdm import tqdm
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import spsolve
from utils.pcutils import get_bbox
def scale_roots_positions(roots_points, scale_ratio):
_, bbox_center = get_bbox(roots_points)
temp_points = (roots_points - bbox_center) * scale_ratio + bbox_center
roots_points = temp_points
return roots_points
def get_roots_normals(roots_points):
_, bbox_center = get_bbox(roots_points)
normals = roots_points - bbox_center
normals = normals / np.linalg.norm(normals, axis=1)[:, None]
return normals
def get_strand_length(strand):
delta = strand[:-1] - strand[1:]
delta_length = np.sqrt(np.sum(delta**2, axis=1, keepdims=False))
length = np.sum(delta_length, axis=0, keepdims=False)
return length, delta_length
def get_strands_length(strands):
deltas = strands[:, :-1] - strands[:, 1:]
if torch.torch.is_tensor(strands):
delta_lengths = torch.sqrt(torch.sum(deltas**2, dim=2, keepdim=False))
lengths = torch.sum(delta_lengths, dim=1, keepdim=False)
else:
delta_lengths = np.sqrt(np.sum(deltas**2, axis=2, keepdims=False))
lengths = np.sum(delta_lengths, axis=1, keepdims=False)
return lengths, delta_lengths
def get_strands_roots(strands, scale_ratio=1.0):
roots = []
num_strands = strands.shape[0]
for i_strand in range(num_strands):
roots.append(strands[i_strand][0][:3])
points = np.array(roots)
if not scale_ratio == 1.0:
points = scale_roots_positions(points, scale_ratio)
normals = get_roots_normals(points)
return points, normals
def line_interpolate(start_point, end_point, interp_count):
interped_points = []
if interp_count == 0:
return interped_points
delta = end_point - start_point
delta_length = math.sqrt(np.sum(delta**2, axis=0, keepdims=True))
step_dir = delta / delta_length
step_size = delta_length / (interp_count + 1)
for i in range(interp_count):
interped_points.append(start_point + step_dir * (i + 1) * step_size)
return interped_points
def resample_strand(strand, tangents=None, num_strand_points=200):
num_ori_points = strand.shape[0]
assert num_ori_points < num_strand_points, "number of resampled points must larger than the original one"
strand_length, delta_length = get_strand_length(strand)
step_length = strand_length / (num_strand_points - 1)
resampled_strand = []
if tangents is None:
interp_idxs = np.where(delta_length > step_length)[0]
interp_segs = delta_length[interp_idxs]
interp_segs_rank_idxs = np.argsort(-1 * interp_segs)
new_step_length = np.sum(interp_segs) / (num_strand_points - (num_ori_points - interp_idxs.shape[0]))
interp_counts = np.clip((interp_segs / new_step_length).astype(np.int16) - 1, 0, num_strand_points - 1) # supposed to always be postive or zero
interp_counts_sum = np.sum(interp_counts, axis=0, keepdims=False) # supposed to always less than num_strand_points
assert interp_counts_sum + num_ori_points <= num_strand_points, "utils:strandsutils.py, FIXME, strand resample error, Interp counts: %d, Original Counts: %d"%(interp_counts_sum, num_ori_points)
num_ext_interp = num_strand_points - num_ori_points - interp_counts_sum
ext_interp_segs = interp_segs_rank_idxs[:num_ext_interp]
interp_counts[ext_interp_segs] += 1 # Interpolate one more point in this segs
interp_delta_count = 0
for i_delta in range(num_ori_points - 1):
resampled_strand.append(strand[i_delta])
if delta_length[i_delta] > step_length:
interped_points = line_interpolate(strand[i_delta], strand[i_delta + 1], interp_counts[interp_delta_count])
resampled_strand.extend(interped_points)
interp_delta_count += 1
resampled_strand.append(strand[num_ori_points - 1])
resampled_strand = np.array(resampled_strand)
assert resampled_strand.shape[0] == 200, "interpolation failed, number of resampled: %d."%(resampled_strand.shape[0])
return resampled_strand
def augment_strand(strand, aug_config):
if aug_config["rotation_z_max_angle"] > 0:
theta_z = aug_config["rotation_z_max_angle"]
rtheta = (np.random.rand() * 2. - 1.) * theta_z * np.pi / 180.
rot_mat = np.asarray([[np.cos(rtheta), -np.sin(rtheta), 0.],
[np.sin(rtheta), np.cos(rtheta), 0.],
[ 0., 0., 1.]], dtype=np.float32)
strand = (rot_mat[:, :] @ strand.T).T
if np.sum(aug_config["random_stretch_xyz_magnitude"]) > 0:
sc = np.random.rand(3) * 2 - 1
sc = 1 + np.asarray(aug_config["random_stretch_xyz_magnitude"]) * sc
strand = strand * sc
return strand
def spline_strand(strand, num_strand_points=100):
num_ori_points = strand.shape[0]
interp_spline = splines.CatmullRom(strand)
interp_idx = np.arange(num_strand_points) / (num_strand_points / (num_ori_points - 1))
interp_strand = interp_spline.evaluate(interp_idx)
assert interp_strand.shape[0] == num_strand_points, "Spline error."
return interp_strand
def pad_strand(strand, num_strand_points=100):
num_ori_points = strand.shape[0]
if num_ori_points > num_strand_points:
return strand[:num_strand_points]
num_pad = num_strand_points - num_ori_points
last_delta = strand[-1] - strand[-2]
offsets = np.arange(num_pad) + 1
offsets = offsets[:, None]
last_delta = last_delta[None, :]
offsets = offsets * last_delta
# padded_strand = np.zeros_like(offsets) + strand[-1]
padded_strand = offsets + strand[-1]
padded_strand = np.concatenate((strand, padded_strand), axis=0)
ori_time = np.linspace(0, 1, num_ori_points)
strd_len, delta_len = get_strand_length(strand) # modify time by length
ori_time[1:] = delta_len / strd_len
ori_time = np.add.accumulate(ori_time)
padded_time = 1. + (np.arange(num_pad) + 1) * (1. / num_ori_points)
padded_time = np.concatenate((ori_time, padded_time), axis=0)
return padded_strand, padded_time
def tridiagonal_solve(b, A_upper, A_diagonal, A_lower):
A_upper, _ = torch.broadcast_tensors(A_upper[:, None, :], b[..., :-1])
A_lower, _ = torch.broadcast_tensors(A_lower[:, None, :], b[..., :-1])
A_diagonal, b = torch.broadcast_tensors(A_diagonal[:, None, :], b)
channels = b.size(-1)
new_b = np.empty(channels, dtype=object)
new_A_diagonal = np.empty(channels, dtype=object)
outs = np.empty(channels, dtype=object)
new_b[0] = b[..., 0]
new_A_diagonal[0] = A_diagonal[..., 0]
for i in range(1, channels):
w = A_lower[..., i - 1] / new_A_diagonal[i - 1]
new_A_diagonal[i] = A_diagonal[..., i] - w * A_upper[..., i - 1]
new_b[i] = b[..., i] - w * new_b[i - 1]
outs[channels - 1] = new_b[channels - 1] / new_A_diagonal[channels - 1]
for i in range(channels - 2, -1, -1):
outs[i] = (new_b[i] - A_upper[..., i] * outs[i + 1]) / new_A_diagonal[i]
return torch.stack(outs.tolist(), dim=-1)
def cubic_spline_coeffs(t, x):
# x should be a tensor of shape (..., length)
# Will return the b, two_c, three_d coefficients of the derivative of the cubic spline interpolating the path.
length = x.size(-1)
if length < 2:
# In practice this should always already be caught in __init__.
raise ValueError("Must have a time dimension of size at least 2.")
elif length == 2:
a = x[..., :1]
b = (x[..., 1:] - x[..., :1]) / (t[..., 1:] - t[..., :1])
two_c = torch.zeros(*x.shape[:-1], 1, dtype=x.dtype, device=x.device)
three_d = torch.zeros(*x.shape[:-1], 1, dtype=x.dtype, device=x.device)
else:
# Set up some intermediate values
time_diffs = t[..., 1:] - t[..., :-1]
time_diffs_reciprocal = time_diffs.reciprocal()
time_diffs_reciprocal_squared = time_diffs_reciprocal ** 2
three_path_diffs = 3 * (x[..., 1:] - x[..., :-1])
six_path_diffs = 2 * three_path_diffs
path_diffs_scaled = three_path_diffs * time_diffs_reciprocal_squared[:, None, :]
# Solve a tridiagonal linear system to find the derivatives at the knots
system_diagonal = torch.empty((x.shape[0], length), dtype=x.dtype, device=x.device)
system_diagonal[..., :-1] = time_diffs_reciprocal
system_diagonal[..., -1] = 0
system_diagonal[..., 1:] += time_diffs_reciprocal
system_diagonal *= 2
system_rhs = torch.empty_like(x)
system_rhs[..., :-1] = path_diffs_scaled
system_rhs[..., -1] = 0
system_rhs[..., 1:] += path_diffs_scaled
knot_derivatives = tridiagonal_solve(system_rhs, time_diffs_reciprocal,
system_diagonal, time_diffs_reciprocal)
# Do some algebra to find the coefficients of the spline
time_diffs_reciprocal = time_diffs_reciprocal[:, None, :]
time_diffs_reciprocal_squared = time_diffs_reciprocal_squared[:, None, :]
a = x[..., :-1]
b = knot_derivatives[..., :-1]
two_c = (six_path_diffs * time_diffs_reciprocal
- 4 * knot_derivatives[..., :-1]
- 2 * knot_derivatives[..., 1:]) * time_diffs_reciprocal
three_d = (-six_path_diffs * time_diffs_reciprocal
+ 3 * (knot_derivatives[..., :-1]
+ knot_derivatives[..., 1:])) * time_diffs_reciprocal_squared
return a, b, two_c, three_d
def natural_cubic_spline_coeffs(t, x):
a, b, two_c, three_d = cubic_spline_coeffs(t, x.transpose(-1, -2))
# These all have shape (..., length - 1, channels)
a = a.transpose(-1, -2)
b = b.transpose(-1, -2)
c = two_c.transpose(-1, -2) / 2
d = three_d.transpose(-1, -2) / 3
return t, a, b, c, d
class NaturalCubicSpline:
def __init__(self, coeffs, **kwargs):
super(NaturalCubicSpline, self).__init__(**kwargs)
t, a, b, c, d = coeffs
self._t = t
self._a = a
self._b = b
self._c = c
self._d = d
def evaluate(self, t):
maxlen = self._b.size(-2) - 1
inners = torch.zeros((t.shape[0], t.shape[1], 3)).to(t.device)
for i_b in range(self._t.shape[0]):
index = torch.bucketize(t.detach()[i_b], self._t[i_b]) - 1
index = index.clamp(0, maxlen) # clamp because t may go outside of [t[0], t[-1]]; this is fine
# will never access the last element of self._t; this is correct behaviour
fractional_part = t[i_b] - self._t[i_b][index]
fractional_part = fractional_part.unsqueeze(-1)
inner = self._c[i_b, index, :] + self._d[i_b, index, :] * fractional_part
inner = self._b[i_b, index, :] + inner * fractional_part
inner = self._a[i_b, index, :] + inner * fractional_part
inners[i_b] = inner
return inners
def derivative(self, t, order=1):
fractional_part, index = self._interpret_t(t)
fractional_part = fractional_part.unsqueeze(-1)
if order == 1:
inner = 2 * self._c[..., index, :] + 3 * self._d[..., index, :] * fractional_part
deriv = self._b[..., index, :] + inner * fractional_part
elif order == 2:
deriv = 2 * self._c[..., index, :] + 6 * self._d[..., index, :] * fractional_part
else:
raise ValueError('Derivative is not implemented for orders greater than 2.')
return deriv
# post-processing
def merge_strands(strands_list):
strands_all = []
for strands in strands_list:
for i_strand in range(strands.shape[0]):
strands_all.append(strands[i_strand])
strands_all = np.array(strands_all, dtype=object)
return strands_all
def strandspc2strands(strandspc, sep):
num_strands = len(sep)
strands = []
num_pts = 0
for i_strand in range(num_strands):
strands.append(strandspc[num_pts : num_pts + int(sep[i_strand])])
num_pts += sep[i_strand]
strands = np.array(strands, dtype=object)
return strands
def smnooth_strand(strand, lap_constraint=2.0, pos_constraint=1.0, fix_tips=False):
num_pts = strand.shape[0]
num_value = num_pts * 3 - 2 + num_pts
smoothed_strand = np.copy(strand)
# construct laplacian sparse matrix
i, j, v = np.zeros(num_value, dtype=np.int16), np.zeros(num_value, dtype=np.int16), np.zeros(num_value)
i[0], i[1], i[2 + (num_pts - 2) * 3], i[2 + (num_pts - 2) * 3 + 1] = 0, 0, num_pts - 1, num_pts - 1
i[2 : num_pts * 3 - 4] = np.repeat(np.arange(1, num_pts - 1), 3)
i[num_pts * 3 - 2:] = np.arange(num_pts) + num_pts
j[0], j[1], j[2 + (num_pts - 2) * 3], j[2 + (num_pts - 2) * 3 + 1] = 0, 1, num_pts - 2, num_pts - 1
j[2 : num_pts * 3 - 4] = np.repeat(np.arange(1, num_pts - 1), 3) \
+ np.repeat(np.array([-1, 0, 1], dtype=np.int16), num_pts - 2).reshape(num_pts - 2, 3, order='F').ravel()
j[num_pts * 3 - 2:] = np.arange(num_pts)
v[0], v[1], v[2 + (num_pts - 2) * 3], v[2 + (num_pts - 2) * 3 + 1] = 1, -1, -1, 1
v[2 : num_pts * 3 - 4] = np.repeat(np.array([-1, 2, -1], dtype=np.int16), num_pts - 2).reshape(num_pts - 2, 3, order='F').ravel()
v = v * lap_constraint
v[num_pts * 3 - 2:] = pos_constraint
A = coo_matrix((v, (i, j)), shape=(num_pts * 2, num_pts))
At = A.transpose()
AtA = At.dot(A)
# solving
for j_axis in range(3):
b = np.zeros(num_pts * 2)
b[num_pts:] = smoothed_strand[:, j_axis] * pos_constraint
Atb = At.dot(b)
x = spsolve(AtA, Atb)
smoothed_strand[:, j_axis] = x[:num_pts]
if fix_tips:
strand[1:-1] = smoothed_strand[1:-1]
else:
strand = smoothed_strand
return strand
def smooth_strands(strands, lap_constraint=2.0, pos_constraint=1.0, fix_tips=False):
loop = tqdm(range(strands.shape[0]))
loop.set_description("Smoothing strands")
for i_strand in loop:
strands[i_strand] = smnooth_strand(strands[i_strand], lap_constraint, pos_constraint, fix_tips)
return strands
def downsample_strands(strands, min_num_pts=5, tgt_num_pts=64):
loop = tqdm(range(strands.shape[0]))
loop.set_description("Downsampling strands points")
for i_strand in loop:
num_pts = strands[i_strand].shape[0]
downsampled_strand = np.copy(strands[i_strand][:, 0:3])
if num_pts <= 2:
pass
elif num_pts > 2 and num_pts < min_num_pts:
interp_pts = (downsampled_strand[:-1] + downsampled_strand[1:]) / 2.
interp_strand = np.zeros((num_pts * 2 - 1, 3))
interp_strand[::2] = downsampled_strand
interp_strand[1::2] = interp_pts
downsampled_strand = interp_strand
elif num_pts > min_num_pts and num_pts < tgt_num_pts:
pass
else:
interp_spline = splines.CatmullRom(downsampled_strand)
interp_idx = np.arange(tgt_num_pts) / (tgt_num_pts / (num_pts - 1))
downsampled_strand = interp_spline.evaluate(interp_idx)
strands[i_strand] = downsampled_strand
return strands
def duplicate_strands(strands, ratio=5, perturation=1.0):
loop = tqdm(range(strands.shape[0]))
loop.set_description('Duplicating strands')
duplicated_strands_list = []
for i_strand in loop:
strand = strands[i_strand][:, 0:3]
num_pts = strand.shape[0]
duplicated_strands = np.repeat(strand.reshape(1, num_pts, 3), ratio, axis=0)
start_tangent = strand[1] - strand[0]
offsets = np.random.rand(ratio, 3)
offsets[:, 2] = -(offsets[:, 0] * start_tangent[0] + offsets[:, 1] * start_tangent[1]) / (start_tangent[2] + 1e-6)
offsets = offsets / np.linalg.norm(offsets, axis=1, keepdims=True)
offsets[0] *= 0
scale_ratio = np.random.rand(ratio, 1) * perturation + perturation
offsets = offsets * scale_ratio
offsets = np.repeat(offsets.reshape(ratio, 1, 3), num_pts, axis=1)
duplicated_strands = duplicated_strands + offsets
for j in range(ratio):
duplicated_strands_list.append(duplicated_strands[j])
strands = np.array(duplicated_strands_list, dtype=object)
return strands
|
CT2Hair-main
|
CT2Hair/utils/strandsutils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import copy
import igl
import trimesh
import numpy as np
from scipy.spatial.transform import Rotation as R
from datautils.datautils import save_bin_strands
from utils.pcutils import load_pc
from utils.utils import translate2mat, homo_rot_mat
def read_mesh(mesh_path):
mesh = trimesh.load(mesh_path, force='mesh', process=True)
return mesh
def write_mesh(mesh_path, mesh):
mesh.export(mesh_path)
def quad2trimesh(quad_faces, vertices):
assert quad_faces.shape[1] == 4, "Mesh is not a quad mesh."
num_quad_faces = quad_faces.shape[0]
num_tri_faces = num_quad_faces * 2
tri_faces = np.zeros((num_tri_faces, 3), dtype=np.uint32)
tri_faces[::2] = quad_faces[:, [0, 1, 2]]
tri_faces[1::2] = quad_faces[:, [0, 2, 3]]
return trimesh.Trimesh(vertices=vertices, faces=tri_faces)
def vertices_pairwise_dis(vertices):
inner_vertices = -2 * (vertices @ vertices.T)
vertices_2 = np.sum(vertices**2, axis=1, keepdims=True)
pairwise_dis = vertices_2 + inner_vertices + vertices_2.T
return pairwise_dis
def mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth=True, smooth_iterations=10, thres_min_movement=10.0):
target_kdtree = target_pc.kdtree
v = head_mesh.vertices
f = head_mesh.faces
u = v.copy()
num_vertices = v.shape[0]
dis, idx = target_kdtree.query(v, 1)
s = np.zeros(num_vertices)
for i_face in range(head_mesh.faces.shape[0]):
if scalp_faces_mask[i_face]:
for i_v in range(3):
v_idx = f[i_face, i_v]
if dis[v_idx] <= thres_min_movement:
s[v_idx] = 1
b = np.array([[t[0] for t in [(i, s[i]) for i in range(0, v.shape[0])] if t[1] > 0]]).T
# Boundary conditions directly on deformed positions
u_bc = np.zeros((b.shape[0], v.shape[1]))
v_bc = np.zeros((b.shape[0], v.shape[1]))
for bi in range(b.shape[0]):
v_bc[bi] = v[b[bi]]
offset = target_pc.vertices[idx[b[bi]]] - v[b[bi]]
u_bc[bi] = v[b[bi]] + offset
u_bc_anim = v_bc + (u_bc - v_bc)
d_bc = u_bc_anim - v_bc
d = igl.harmonic_weights(v, f, b.astype(f.dtype), d_bc, 1)
u = v + d
head_mesh.vertices = u
if smooth:
smoothe_head_mesh = copy.deepcopy(head_mesh)
trimesh.smoothing.filter_mut_dif_laplacian(smoothe_head_mesh, iterations=smooth_iterations)
# trimesh.smoothing.filter_laplacian(head_mesh, iterations=smooth_iterations)
head_mesh.vertices[head_mesh.faces[scalp_faces_mask]] = smoothe_head_mesh.vertices[head_mesh.faces[scalp_faces_mask]]
return head_mesh
def get_alignment_matrix(head_mesh, head_texture, target_roots_pc_path, target_face_base):
uv_coords = head_mesh.visual.uv # num_vertices X 2
head_tex_width, head_tex_height, _ = head_texture.shape
head_mesh_pairwise_dis = vertices_pairwise_dis(head_mesh.vertices)
head_mesh_eye = np.eye(head_mesh_pairwise_dis.shape[0])
head_mesh_pairwise_dis = head_mesh_pairwise_dis + head_mesh_eye
UV_bound_vertices = np.where(head_mesh_pairwise_dis < 1e-4) # boundary vertices in UV
# for each face determiner whether it is scalp
num_faces = head_mesh.faces.shape[0]
face_uv_coords = uv_coords[head_mesh.faces] * [head_tex_height, head_tex_width]
face_uv_coords = np.around(face_uv_coords).astype(np.uint16)
face_uv_coords = np.clip(face_uv_coords, [0, 1], [head_tex_width - 1, head_tex_height])
face_uv_colors = head_texture[head_tex_height - face_uv_coords[:, :, 1], face_uv_coords[:, :, 0], :]
face_avg_colors = np.sum(face_uv_colors, axis=1, keepdims=False)
scalp_faces_mask = face_avg_colors[:, 0] > 255 * 0.3
scalp_faces_idx = np.where(face_avg_colors[:, 0] > 255 * 0.3)[0]
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
scalp_sampled_points = scalp_mesh.sample(50000)
target_points, target_normals = load_pc(target_roots_pc_path, load_color=False, load_normal=True)
source_pc = trimesh.points.PointCloud(scalp_sampled_points)
target_pc = trimesh.points.PointCloud(target_points)
trans_mat = np.eye(4)
# align bound sphere size
scale_ratio = math.pow(target_pc.bounding_sphere.volume / source_pc.bounding_sphere.volume, 1./3.)
scalp_sampled_points = scalp_sampled_points * scale_ratio
trans_offset = [0., 0., 0.] - (source_pc.centroid * scale_ratio)
scalp_sampled_points += trans_offset
trans_mat[0:3] = trans_mat[0:3] * scale_ratio
trans_mat = translate2mat(trans_offset) @ trans_mat
# base rotate to original coord
base_rot = R.from_euler('xyz', [[0., 0., 0.]]) # MannequinHeadB
base_rot_mat = base_rot.as_matrix()[0]
scalp_sampled_points = np.dot(base_rot_mat, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(base_rot_mat) @ trans_mat
# change of basis
# target_face_base = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1]])
target_face_base_inv = target_face_base.T
scalp_sampled_points = np.dot(target_face_base_inv, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(target_face_base_inv) @ trans_mat
# move to same center with target
scalp_sampled_points += target_pc.centroid
trans_mat = translate2mat(target_pc.centroid) @ trans_mat
# registration
reg_mat, reg_points, cost = trimesh.registration.icp(scalp_sampled_points, target_points)
trans_mat = reg_mat @ trans_mat
return trans_mat
def process_head_model(head_mesh, head_texture, target_roots_pc_path, target_face_base, is_deformation=True):
print('Utils::MeshUtils Start processing head model (registration & deformation)...')
uv_coords = head_mesh.visual.uv # num_vertices X 2
head_tex_width, head_tex_height, _ = head_texture.shape
head_mesh_pairwise_dis = vertices_pairwise_dis(head_mesh.vertices)
head_mesh_eye = np.eye(head_mesh_pairwise_dis.shape[0])
head_mesh_pairwise_dis = head_mesh_pairwise_dis + head_mesh_eye
UV_bound_vertices = np.where(head_mesh_pairwise_dis < 1e-4) # boundary vertices in UV
# for each face determiner whether it is scalp
num_faces = head_mesh.faces.shape[0]
face_uv_coords = uv_coords[head_mesh.faces] * [head_tex_height, head_tex_width]
face_uv_coords = np.around(face_uv_coords).astype(np.uint16)
face_uv_coords = np.clip(face_uv_coords, [0, 1], [head_tex_width - 1, head_tex_height])
face_uv_colors = head_texture[head_tex_height - face_uv_coords[:, :, 1], face_uv_coords[:, :, 0], :]
face_avg_colors = np.sum(face_uv_colors, axis=1, keepdims=False)
scalp_faces_mask = face_avg_colors[:, 0] > 255 * 0.3
scalp_faces_idx = np.where(face_avg_colors[:, 0] > 255 * 0.3)[0]
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
scalp_sampled_points = scalp_mesh.sample(50000)
target_points = load_pc(target_roots_pc_path, load_color=False, load_normal=False)
source_pc = trimesh.points.PointCloud(scalp_sampled_points)
target_pc = trimesh.points.PointCloud(target_points)
trans_mat = np.eye(4)
# align bound sphere size
scale_ratio = math.pow(target_pc.bounding_sphere.volume / source_pc.bounding_sphere.volume, 1./3.)
scalp_sampled_points = scalp_sampled_points * scale_ratio
trans_offset = [0., 0., 0.] - (source_pc.centroid * scale_ratio)
scalp_sampled_points += trans_offset
trans_mat = translate2mat(trans_offset) @ trans_mat
# base rotate to original coord
# base_rot = R.from_euler('yzx', [[211. / 180. * np.pi, -8. / 180. * np.pi, 0.]]) # Mugsy Head
# base_rot = R.from_euler('xzy', [[180. / 180. * np.pi, 2. / 180. * np.pi, 3. / 180. * np.pi]]) # old MannequinHeadA
base_rot = R.from_euler('xyz', [[0., 0., 0.]]) # MannequinHeadA and B
base_rot_mat = base_rot.as_matrix()[0]
scalp_sampled_points = np.dot(base_rot_mat, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(base_rot_mat) @ trans_mat
# change of basis
# target_face_base = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1]])
target_face_base_inv = target_face_base.T
scalp_sampled_points = np.dot(target_face_base_inv, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(target_face_base_inv) @ trans_mat
# move to same center with target
scalp_sampled_points += target_pc.centroid
trans_mat = translate2mat(target_pc.centroid) @ trans_mat
# registration
reg_mat, reg_points, cost = trimesh.registration.icp(scalp_sampled_points, target_points) # type: ignore (for avoid pyplace error report)
trans_mat = reg_mat @ trans_mat
# apply transformatioin to the head model
head_mesh.apply_scale(scale_ratio)
head_mesh.apply_transform(trans_mat)
# head_mesh.export('temp/reg_head.ply')
if is_deformation:
# head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth=False)
# head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask)
head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth_iterations=12, thres_min_movement=24)
# head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth_iterations=6, thres_min_movement=24)
# head_mesh.export('temp/smoothed_deform_reg_head.ply')
# sew vertices
sewed_v = head_mesh.vertices.copy()
for i_v in range(UV_bound_vertices[0].shape[0]):
sewed_v[UV_bound_vertices[0][i_v]] = (head_mesh.vertices[UV_bound_vertices[0][i_v]] + head_mesh.vertices[UV_bound_vertices[1][i_v]]) / 2.
sewed_v[UV_bound_vertices[1][i_v]] = (head_mesh.vertices[UV_bound_vertices[0][i_v]] + head_mesh.vertices[UV_bound_vertices[1][i_v]]) / 2.
head_mesh.vertices = sewed_v
# compute transed & registered & deformed scalp mesh again
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
print('Utils::MeshUtils End processing.')
return head_mesh, scalp_mesh, scalp_faces_idx
def seg_head_model(head_mesh, head_texture):
uv_coords = head_mesh.visual.uv # num_vertices X 2
head_tex_width, head_tex_height, _ = head_texture.shape
head_mesh_pairwise_dis = vertices_pairwise_dis(head_mesh.vertices)
head_mesh_eye = np.eye(head_mesh_pairwise_dis.shape[0])
head_mesh_pairwise_dis = head_mesh_pairwise_dis + head_mesh_eye
UV_bound_vertices = np.where(head_mesh_pairwise_dis < 1e-4) # boundary vertices in UV
# for each face determiner whether it is scalp
face_uv_coords = uv_coords[head_mesh.faces] * [head_tex_height, head_tex_width]
face_uv_coords = np.around(face_uv_coords).astype(np.uint16)
face_uv_coords = np.clip(face_uv_coords, [0, 1], [head_tex_width - 1, head_tex_height])
face_uv_colors = head_texture[head_tex_height - face_uv_coords[:, :, 1], face_uv_coords[:, :, 0], :]
face_avg_colors = np.sum(face_uv_colors, axis=1, keepdims=False)
scalp_faces_mask = face_avg_colors[:, 0] > 255 * 0.3
scalp_faces_idx = np.where(face_avg_colors[:, 0] > 255 * 0.3)[0]
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
return head_mesh, scalp_mesh, scalp_faces_idx
import torch
from typing import Union, Tuple
from trimesh import Trimesh
# from trimesh.proximity import closest_point # Too slow
from trimesh.triangles import points_to_barycentric
def closest_point_barycentrics(v, vi, points, filtering=False, filter_dis_thres=2.):
"""Given a 3D mesh and a set of query points, return closest point barycentrics
Args:
v: np.array (float)
[N, 3] mesh vertices
vi: np.array (int)
[N, 3] mesh triangle indices
points: np.array (float)
[M, 3] query points
Returns:
Tuple[approx, barys, interp_idxs, face_idxs]
approx: [M, 3] approximated (closest) points on the mesh
barys: [M, 3] barycentric weights that produce "approx"
interp_idxs: [M, 3] vertex indices for barycentric interpolation
face_idxs: [M] face indices for barycentric interpolation. interp_idxs = vi[face_idxs]
"""
mesh = Trimesh(vertices=v, faces=vi)
# p, distances, face_idxs = closest_point(mesh, points) # Slow, Change to IGL
sqr_distances, face_idxs, p = igl.point_mesh_squared_distance(points, mesh.vertices, mesh.faces) # type: ignore for avoiding pylance error
if filtering:
valid_q_idx = np.where(np.sqrt(sqr_distances) < filter_dis_thres)[0]
p = p[valid_q_idx]
face_idxs = face_idxs[valid_q_idx]
else:
valid_q_idx = np.arange(p.shape[0])
barys = points_to_barycentric(mesh.triangles[face_idxs], p)
b0, b1, b2 = np.split(barys, 3, axis=1)
interp_idxs = vi[face_idxs]
v0 = v[interp_idxs[:, 0]]
v1 = v[interp_idxs[:, 1]]
v2 = v[interp_idxs[:, 2]]
approx = b0 * v0 + b1 * v1 + b2 * v2
return approx, barys, interp_idxs, face_idxs, valid_q_idx
def make_closest_uv_barys(
vt: torch.Tensor,
vti: torch.Tensor,
uv_shape: Union[Tuple[int, int], int],
flip_uv: bool = True,
):
"""Compute a UV-space barycentric map where each texel contains barycentric
coordinates for the closest point on a UV triangle.
Args:
vt: torch.Tensor
Texture coordinates. Shape = [n_texcoords, 2]
vti: torch.Tensor
Face texture coordinate indices. Shape = [n_faces, 3]
uv_shape: Tuple[int, int] or int
Shape of the texture map. (HxW)
flip_uv: bool
Whether or not to flip UV coordinates along the V axis (OpenGL -> numpy/pytorch convention).
Returns:
torch.Tensor: index_img: Face index image, shape [uv_shape[0], uv_shape[1]]
torch.Tensor: Barycentric coordinate map, shape [uv_shape[0], uv_shape[1], 3]Â
"""
if isinstance(uv_shape, int):
uv_shape = (uv_shape, uv_shape)
if flip_uv:
# Flip here because texture coordinates in some of our topo files are
# stored in OpenGL convention with Y=0 on the bottom of the texture
# unlike numpy/torch arrays/tensors.
vt = vt.clone()
vt[:, 1] = 1 - vt[:, 1]
# Texel to UV mapping (as per OpenGL linear filtering)
# https://www.khronos.org/registry/OpenGL/specs/gl/glspec46.core.pdf
# Sect. 8.14, page 261
# uv=(0.5,0.5)/w is at the center of texel [0,0]
# uv=(w-0.5, w-0.5)/w is the center of texel [w-1,w-1]
# texel = floor(u*w - 0.5)
# u = (texel+0.5)/w
uv_grid = torch.meshgrid(
torch.linspace(0.5, uv_shape[0] - 1 + 0.5, uv_shape[0]) / uv_shape[0],
torch.linspace(0.5, uv_shape[1] - 1 + 0.5, uv_shape[1]) / uv_shape[1], indexing='ij') # HxW, v,u
uv_grid = torch.stack(uv_grid[::-1], dim=2) # HxW, u, v
uv = uv_grid.reshape(-1, 2).data.to("cpu").numpy()
vth = np.hstack((vt, vt[:, 0:1] * 0 + 1))
uvh = np.hstack((uv, uv[:, 0:1] * 0 + 1))
approx, barys, interp_idxs, face_idxs, _ = closest_point_barycentrics(vth, vti, uvh)
index_img = torch.from_numpy(face_idxs.reshape(uv_shape[0], uv_shape[1])).long()
bary_img = torch.from_numpy(barys.reshape(uv_shape[0], uv_shape[1], 3)).float()
return index_img, bary_img
def compute_tbn_uv(tri_xyz, tri_uv, eps=1e-5):
"""Compute tangents, bitangents, normals.
Args:
tri_xyz: [B,N,3,3] vertex coordinates
tri_uv: [B,N,3,2] texture coordinates
Returns:
tangents, bitangents, normals
"""
v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0]
v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0]
normals = torch.cross(v01, v02, dim=-1)
normals = normals / torch.norm(normals, dim=-1, keepdim=True).clamp(min=eps)
vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0]
vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0]
f = 1.0 / (vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0])
tangents = f[..., np.newaxis] * (
v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis])
tangents = tangents / torch.norm(tangents, dim=-1, keepdim=True).clamp(min=eps)
bitangents = torch.cross(normals, tangents, dim=-1)
bitangents = bitangents / torch.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps)
return tangents, bitangents, normals
def strands_world2tbn(strands, head_mesh, scalp_mesh, scalp_faces_idx):
# print('Utils::MeshUtils Convert strands to TBN space...')
num_strands = strands.shape[0]
# get all roots points
roots_pc = []
for i_strand in range(num_strands):
roots_pc.append(strands[i_strand][0, 0:3])
roots_pc = np.array(roots_pc)
approx, barys, interp_idxs, faces_idxs, valid_q_idxs = closest_point_barycentrics(scalp_mesh.vertices, scalp_mesh.faces, roots_pc, filtering=True, filter_dis_thres=6.4) # 3.6 -> 6.4, 7.2
valid_strands = strands[valid_q_idxs]
# invalid_q_idxs = list(set(np.arange(roots_pc.shape[0])) - set(valid_q_idxs))
# invalid_strands = strands[invalid_q_idxs]
# save_bin_strands('temp/valid_strands.bin', valid_strands)
# save_bin_strands('temp/invalid_strands.bin', invalid_strands)
num_valid_strands = valid_strands.shape[0]
triangled_vertices = torch.tensor(head_mesh.vertices[head_mesh.faces, :])[None, :]
triangled_vertices_uv = torch.tensor(head_mesh.visual.uv[head_mesh.faces, :])[None, :]
tangents, bitangents, normals = compute_tbn_uv(triangled_vertices, triangled_vertices_uv) # get tbn for each face
scalp_tangents = tangents[0][scalp_faces_idx].detach().cpu().numpy()
scalp_bitangents = bitangents[0][scalp_faces_idx].detach().cpu().numpy()
scalp_normals = normals[0][scalp_faces_idx].detach().cpu().numpy()
tbn_strands = []
for i_strand in range(num_valid_strands):
tangent = scalp_tangents[faces_idxs[i_strand]]
bitangent = scalp_bitangents[faces_idxs[i_strand]]
normal = scalp_normals[faces_idxs[i_strand]]
tbn_basis_T = np.array([tangent, bitangent, normal])
tbn_strand = (tbn_basis_T @ valid_strands[i_strand][:, 0:3].T).T
tbn_strand = tbn_strand - tbn_strand[0]
tbn_strands.append(tbn_strand)
# print('Utils::MeshUtils End converting, number of original strands: %d, number of valid strands: %d'%(num_strands, num_valid_strands))
return tbn_strands, barys, interp_idxs, faces_idxs, valid_q_idxs, tangents, bitangents, normals
def strands_align_normal(strands, head_mesh):
num_strands = len(strands)
# get all roots points
roots_pc = []
for i_strand in range(num_strands):
roots_pc.append(strands[i_strand][0])
roots_pc = np.array(roots_pc)[:, 0:3]
sqr_distances, face_idxs, p = igl.point_mesh_squared_distance(roots_pc, head_mesh.vertices, head_mesh.faces)
closest_faces = head_mesh.faces[face_idxs]
closest_triangles = torch.tensor(head_mesh.vertices[closest_faces, :])[None, :]
v01 = closest_triangles[:, :, 1] - closest_triangles[:, :, 0]
v02 = closest_triangles[:, :, 2] - closest_triangles[:, :, 0]
normals = torch.cross(v01, v02, dim=-1)
normals = normals / torch.norm(normals, dim=-1, keepdim=True).clamp(min=1e-5)
z_aixs = torch.zeros_like(normals)
z_aixs[:, :, 2] = 1
t_axises = torch.cross(normals, z_aixs)
t_axises = t_axises / torch.norm(t_axises, dim=-1, keepdim=True).clamp(min=1e-5)
b_axises = torch.cross(normals, t_axises)
b_axises = b_axises / torch.norm(b_axises, dim=-1, keepdim=True).clamp(min=1e-5)
tangents = t_axises[0].detach().cpu().numpy()
bitangents = b_axises[0].detach().cpu().numpy()
normals = normals[0].detach().cpu().numpy()
aligned_strands = []
valid_rot_mats = []
valid_roots_pts = []
for i_strand in range(num_strands):
tangent = tangents[i_strand]
bitangent = bitangents[i_strand]
normal = normals[i_strand]
strand = np.array(strands[i_strand])
root_pts = strand[0]
strand = strand - root_pts
tbn_basis_T = np.array([tangent, bitangent, normal])
aligned_strand = (tbn_basis_T @ strand.T).T
if np.sum(aligned_strand ** 2) < 1e-7 or np.isnan(np.sum(aligned_strand)): # delete some noise data for avoiding nan
continue
aligned_strands.append(aligned_strand)
valid_rot_mats.append(tbn_basis_T)
valid_roots_pts.append(root_pts)
return aligned_strands, valid_rot_mats, valid_roots_pts
|
CT2Hair-main
|
CT2Hair/utils/meshutils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import numpy as np
import open3d as o3d
from copy import deepcopy
from matplotlib import cm
def volume2pc(voxels, threshold=1e-1, scale_ratio=np.array([0.125, 0.125, 0.125]), get_colors=True):
start_time = time.time()
x, y, z = np.where(voxels > threshold)
points = np.concatenate((x[:, None], y[:, None], z[:, None]), axis=1).astype(np.float32)
points = points * scale_ratio
values = voxels[x, y, z]
if get_colors:
BuGn_color_map = cm.get_cmap('BuGn', 256)
colors = np.array(BuGn_color_map(values))[:, 0:3]
print('Finish volume to pc stage, used %fs'%(time.time() - start_time))
return points, colors
else:
print('Finish volume to pc stage, used %fs'%(time.time() - start_time))
return points, values
def pc2volume(points, colors=None, normals=None, num_angles=12):
min_bound = np.min(points, axis=0).astype(np.int16)
max_bound = np.max(points, axis=0).astype(np.int16)
voxel_size = max_bound - min_bound + 1
voxel_size = np.append(voxel_size, [4])
voxels = np.zeros(voxel_size)
points = points.astype(np.int16)
points = points - min_bound
if colors is not None:
voxels[points[:, 0], points[:, 1], points[:, 2], 0] = colors[:, 0] # confidence
voxels[points[:, 0], points[:, 1], points[:, 2], 1] = colors[:, 1] * num_angles # thete
voxels[points[:, 0], points[:, 1], points[:, 2], 2] = colors[:, 2] * num_angles # phi
voxels[points[:, 0], points[:, 1], points[:, 2], 3] = np.arange(points.shape[0]) # point_index
elif normals is not None:
voxels[points[:, 0], points[:, 1], points[:, 2], 0:3] = normals # confidence
voxels[points[:, 0], points[:, 1], points[:, 2], 3] = np.arange(points.shape[0]) # point_index
return voxels, min_bound
def strands2pc(strands, step_size=None, rand_color=True):
num_strands = strands.shape[0]
if step_size == None:
strands_points = []
strands_normals = []
strands_colors = []
strands_tangents = []
strands_sep = [] # number of points for each strand
for i_strand in range(num_strands):
num_points = strands[i_strand].shape[0]
points = strands[i_strand][:, :3]
normals = strands[i_strand][:, 3:]
tangents = points[1:] - points[:-1]
tangents = tangents / np.linalg.norm(tangents, axis=-1, keepdims=True)
tangents = np.concatenate((tangents, tangents[-1:]), axis=0)
points = points.tolist()
normals = normals.tolist()
tangents = tangents.tolist()
strands_points.extend(points)
strands_normals.extend(normals)
strands_tangents.extend(tangents)
if rand_color:
strand_color = np.random.rand(1, 3)
strand_colors = np.repeat(strand_color, num_points, axis=0)
strand_colors = strand_colors.tolist()
strands_colors.extend(strand_colors)
strands_sep.append(num_points)
strands_points = np.array(strands_points)
strands_tangents = np.array(strands_tangents)
if rand_color:
strands_colors = np.array(strands_colors)
return strands_points, strands_colors, strands_sep
else:
return strands_points, strands_tangents, strands_sep
else:
max_step_lenght = 0
strands_steps_pos_norm = []
strands_steps_colors = []
for i_strand in range(num_strands):
num_steps = strands[i_strand].shape[0] // step_size
num_points = num_steps * step_size
strand = np.reshape(strands[i_strand][:num_points], (num_steps, step_size, strands[i_strand].shape[-1]))
strands_steps_pos_norm.append(strand)
if rand_color:
strand_color = np.random.rand(1, 3)
strand_colors = np.repeat(strand_color, num_points, axis=0)
strand_colors = np.reshape(strand_colors, (num_steps, step_size, 3))
strands_steps_colors.append(strand_colors)
if num_steps > max_step_lenght:
max_step_lenght = num_steps
steps_points = []
steps_normals = []
steps_colors = []
for i_step in range(max_step_lenght):
step_points = []
step_normals = []
step_colors = []
for j_strand in range(num_strands):
step_lenght = strands_steps_pos_norm[j_strand].shape[0]
if (step_lenght <= i_step):
continue
step_points.append(strands_steps_pos_norm[j_strand][i_step, :, :3])
step_normals.append(strands_steps_pos_norm[j_strand][i_step, :, 3:])
if rand_color:
step_colors.append(strands_steps_colors[j_strand][i_step])
steps_points.append(np.array(step_points).reshape(-1, 3))
steps_normals.append(np.array(step_normals).reshape(-1, 3))
if rand_color:
steps_colors.append(np.array(step_colors).reshape(-1, 3))
if rand_color:
return max_step_lenght, steps_points, steps_colors
else:
return max_step_lenght, steps_points, None
def read_pc(pc_path):
point_cloud = o3d.io.read_point_cloud(pc_path)
return point_cloud
def load_pc(pc_path, load_color=True, load_normal=False):
point_cloud = o3d.io.read_point_cloud(pc_path)
points = np.asarray(point_cloud.points)
if load_color:
assert point_cloud.has_colors(), "Loaded point cloud has no colors"
colors = np.asarray(point_cloud.colors)
return points, colors
elif load_normal:
assert point_cloud.has_normals(), "Loaded point cloud has no normals"
normals = np.asarray(point_cloud.normals)
return points, normals
else:
return points
def save_pc_float64(pc_path, points, colors=None, normals=None):
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(points)
if colors is not None:
assert points.shape[0] == colors.shape[0], "points and colors should have same numbers"
point_cloud.colors = o3d.utility.Vector3dVector(colors)
if normals is not None:
point_cloud.normals = o3d.utility.Vector3dVector(normals)
return o3d.io.write_point_cloud(pc_path, point_cloud)
def save_pc(pc_path, points, colors=None, normals=None):
pc_device = o3d.core.Device("CPU:0")
pc_type = o3d.core.float32
point_cloud = o3d.t.geometry.PointCloud(pc_device)
point_cloud.point["positions"] = o3d.core.Tensor(points.astype(np.float32), pc_type, pc_device)
if normals is not None:
point_cloud.point["normals"] = o3d.core.Tensor(normals.astype(np.float32), pc_type, pc_device)
if colors is not None:
assert points.shape[0] == colors.shape[0], "points and colors should have same numbers"
colors = (colors * 255).astype(np.int8) # need to do this for open3d version 0.15.1, after this I can vis it via meshlab
point_cloud.point["colors"] = o3d.core.Tensor(colors, o3d.core.uint8, pc_device)
return o3d.t.io.write_point_cloud(pc_path, point_cloud, compressed=True, print_progress=True)
def get_bbox(points):
x_min = np.min(points[:, 0])
x_max = np.max(points[:, 0])
y_min = np.min(points[:, 1])
y_max = np.max(points[:, 1])
z_min = np.min(points[:, 2])
z_max = np.max(points[:, 2])
bbox = np.array([[x_min, x_max],
[y_min, y_max],
[z_min, z_max]])
center = ((bbox[:, 1] + bbox[:, 0]) / 2.).T
return bbox, center
def pc_voxelization(points, shape):
segments = []
steps = []
for i in range(3):
s, step = np.linspace(0, shape[i] - 1, num=shape[i], retstep=True)
segments.append(s)
steps.append(step)
vidx_x = np.clip(np.searchsorted(segments[0], points[:, 0]), 0, shape[0] - 1)
vidx_y = np.clip(np.searchsorted(segments[1], points[:, 1]), 0, shape[1] - 1)
vidx_z = np.clip(np.searchsorted(segments[2], points[:, 2]), 0, shape[2] - 1)
vidx = np.concatenate((vidx_x[:, None], vidx_y[:, None], vidx_z[:, None]), axis=-1)
vidx = np.unique(vidx, axis=0)
return vidx[:, 0], vidx[:, 1], vidx[:, 2]
def patch_filter_major(points, voxels, weights, kernel_size=5):
assert voxels.ndim == 3, "Only works for 1-dim voxel"
assert voxels.dtype == np.int16, "Only works for int voxel"
num_points = points.shape[0]
offset = kernel_size // 2
padded_voxels = np.pad(voxels, ((offset, offset), (offset, offset), (offset, offset)), mode='reflect')
padded_weights = np.pad(weights, ((offset, offset), (offset, offset), (offset, offset)), mode='reflect')
filtered_voxels = deepcopy(voxels)
for i_point in range(num_points):
grid_idx = points[i_point]
# selected_region_start_pos = grid_idx - offset
selected_region = padded_voxels[grid_idx[0] : grid_idx[0] + kernel_size,
grid_idx[1] : grid_idx[1] + kernel_size,
grid_idx[2] : grid_idx[2] + kernel_size,]
selected_weights = padded_weights[grid_idx[0] : grid_idx[0] + kernel_size,
grid_idx[1] : grid_idx[1] + kernel_size,
grid_idx[2] : grid_idx[2] + kernel_size,]
major_value = np.bincount(selected_region.reshape(-1), selected_weights.reshape(-1)).argmax()
filtered_voxels[grid_idx[0], grid_idx[1], grid_idx[2]] = major_value
return filtered_voxels
|
CT2Hair-main
|
CT2Hair/utils/pcutils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2
import math
import torch
import torch.nn as nn
import numpy as np
from matplotlib import cm
def polar2vector(theta, phi, step_length=1, start_vector=np.array([1, 0, 0])):
sin_a, cos_a = math.sin(0), math.cos(0)
sin_b, cos_b = math.sin(phi), math.cos(phi)
sin_g, cos_g = math.sin(theta), math.cos(theta)
R_x = np.array([[1, 0, 0],
[0, cos_a, -sin_a],
[0, sin_a, cos_a]])
R_y = np.array([[ cos_b, 0, sin_b],
[ 0, 1, 0],
[-sin_b, 0, cos_b]])
R_z = np.array([[cos_g, -sin_g, 0],
[sin_g, cos_g, 0],
[ 0, 0, 1]],)
R = R_z @ R_y @ R_x
vector = start_vector * step_length
vector = vector.T
vector = R @ vector
return vector
def polar2vector_torch(theta, phi, step_length=1, start_vector=torch.tensor([1, 0, 0]), device='cuda'):
if not torch.is_tensor(theta):
theta = torch.tensor(theta, device=device)
if not torch.is_tensor(phi):
phi = torch.tensor(phi, device=device)
start_vector = start_vector.float().to(device)
num = theta.shape[0]
sin_a, cos_a = torch.sin(torch.zeros(num, device=device)), torch.cos(torch.zeros(num, device=device))
sin_b, cos_b = torch.sin(phi), torch.cos(phi)
sin_g, cos_g = torch.sin(theta), torch.cos(theta)
R_x = torch.zeros(size=(num, 3, 3)).to(device)
R_x[:, 1, 1] = cos_a
R_x[:, 1, 2] = -sin_a
R_x[:, 2, 1] = sin_a
R_x[:, 2, 2] = cos_a
R_x[:, 0, 0] = 1
R_y = torch.zeros(size=(num, 3, 3)).to(device)
R_y[:, 0, 0] = cos_b
R_y[:, 0, 2] = sin_b
R_y[:, 2, 0] = -sin_b
R_y[:, 2, 2] = cos_b
R_y[:, 1, 1] = 1
R_z = torch.zeros(size=(num, 3, 3)).to(device)
R_z[:, 0, 0] = cos_g
R_z[:, 0, 1] = -sin_g
R_z[:, 1, 0] = sin_g
R_z[:, 1, 1] = cos_g
R_z[:, 2, 2] = 1
with torch.no_grad():
R = R_z @ R_y @ R_x
vector = start_vector * step_length
vector = R @ vector
return vector.detach().cpu().numpy()
def downsample3dpool(data, ratio=2, mode='avg', dtype=torch.float32):
data_shape = data.shape
if not torch.is_tensor(data):
data = torch.tensor(data, dtype=dtype, device='cuda')
data = data.view((1, 1, data_shape[0], data_shape[1], data_shape[2])).contiguous()
if mode == 'max':
pool = nn.MaxPool3d(kernel_size=ratio)
elif mode == 'avg':
pool = nn.AvgPool3d(kernel_size=ratio)
data = pool(data) # type: ignore (for avoid pyplace error report)
return data[0, 0].detach().cpu().numpy()
def get_color_mapping(samples=1024):
# hsv_color_map = cm.get_cmap('hsv', 256)
twi_color_map = cm.get_cmap('twilight', 256)
twi_shift_color_map = cm.get_cmap('twilight_shifted', 256)
x, y = np.meshgrid(np.linspace(0, 1, samples), np.linspace(0, 1, samples))
# hsv_rgb = np.float32(hsv_color_map(x))
# hsv_bgr = cv2.cvtColor(hsv_rgb, cv2.COLOR_RGBA2BGRA)
# cv2.imwrite('temp/mapping.png', hsv_bgr * 255)
twi_rgb = np.float32(twi_color_map(x)) # type: ignore (for avoid pyplace error report)
twi_bgr = cv2.cvtColor(twi_rgb, cv2.COLOR_RGBA2BGRA)
twi_sh_rgb = np.float32(twi_shift_color_map(y)) # type: ignore (for avoid pyplace error report)
twi_sh_bgr = cv2.cvtColor(twi_sh_rgb, cv2.COLOR_RGBA2BGRA)
cv2.imwrite('temp/mapping_theta.png', twi_bgr * 255)
cv2.imwrite('temp/mapping_phi.png', twi_sh_bgr * 255)
def batched_index_select(t, dim, inds):
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out
def scale2mat(scale_ratio):
mat44 = np.eye(4)
for i in range(3):
mat44[i, i] = scale_ratio
return mat44
def translate2mat(offset):
mat44 = np.eye(4)
mat44[0:3, 3] = offset.T
return mat44
def homo_rot_mat(mat33):
mat44 = np.eye(4)
mat44[0:3, 0:3] = mat33
return mat44
def idx_map_2_rgb(idx_map):
[map_height, map_width] = idx_map.shape[:2]
idx_map_rgb = np.zeros((map_height, map_width, 3))
# R G B for cv2.imwrite
# TODO convert to binary operator later
idx_map_rgb[:, :, 2] = idx_map // (256 * 256)
idx_map_rgb[:, :, 1] = (idx_map - (idx_map_rgb[:, :, 2] * 256 * 256)) // 256
idx_map_rgb[:, :, 0] = (idx_map - (idx_map_rgb[:, :, 2] * 256 * 256 +
idx_map_rgb[:, :, 1] * 256))
return idx_map_rgb
def idx_rgb_recover(idx_bgr):
[map_height, map_width] = idx_bgr.shape[:2]
idx_map = np.zeros((map_height, map_width))
idx_rgb = cv2.cvtColor(idx_bgr, cv2.COLOR_BGR2RGB).astype(np.int64)
idx_map = idx_rgb[:, :, 0] * 256 * 256 + idx_rgb[:, :, 1] * 256 + idx_rgb[:, :, 2] - 1
return idx_map
def cheap_stack(tensors, dim):
if len(tensors) == 1:
return tensors[0].unsqueeze(dim)
else:
return torch.stack(tensors, dim=dim)
|
CT2Hair-main
|
CT2Hair/utils/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv3dGaussian(nn.Module):
'''
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
'''
def __init__(self,
size: int,
sigma=3,
gamma_y=1.0,
gamma_z=1.0,
padding=None,
device='cuda'):
super().__init__()
self.size = size
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
self.sigma = sigma
self.gamma_y = gamma_y
self.gamma_z = gamma_z
self.kernels = self.init_kernel()
def init_kernel(self):
sigma_x = self.sigma
sigma_y = self.sigma * self.gamma_y
sigma_z = self.sigma * self.gamma_z
c_max, c_min = int(self.size / 2), -int(self.size / 2)
(x, y, z) = torch.meshgrid(torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1), indexing='ij') # for future warning
x = x.to(self.device)
y = y.to(self.device)
z = z.to(self.device)
kernel = torch.exp(-.5 * (x ** 2 / sigma_x ** 2 + y ** 2 / sigma_y ** 2 + z ** 2 / sigma_z ** 2))
# normalize
kernel = F.normalize(kernel)
return kernel.reshape(1, 1, self.size, self.size, self.size).contiguous()
def forward(self, x):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
return x
class Conv3dLaplacian():
'''
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
'''
def __init__(self,
padding=None,
device='cuda'):
super().__init__()
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
self.kernels = self.init_kernel()
def init_kernel(self):
kernel = torch.ones((3, 3, 3), device=self.device) * -1
kernel[1, 1, 1] = 26
return kernel.reshape(1, 1, 3, 3, 3)
def forward(self, x):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
mask = x[0, 0] > 0
return mask.float()
class Conv3dErosion(nn.Module):
'''
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
'''
def __init__(self,
size=3,
padding=None,
device='cuda'):
super().__init__()
self.size = size
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
self.kernels = self.init_kernel()
def init_kernel(self):
kernel = torch.ones((self.size, self.size, self.size), device=self.device)
return kernel.reshape(1, 1, self.size, self.size, self.size)
def forward(self, x, ration=1):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
mask = x[0, 0] >= self.size ** 3 * ration
return mask.float()
class Conv3dGabor():
'''
Applies a 3d convolution over an input signal using Gabor filter banks.
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
Refer to https://github.com/m-evdokimov/pytorch-gabor3d
'''
def __init__(self,
in_channels: int,
out_channels: int,
size: int,
sigma=3,
gamma_y=0.5,
gamma_z=0.5,
lambd=6,
psi=0.,
padding=None,
device='cuda'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.num_filters = in_channels * out_channels
self.size = size
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
# all additional axes are made for correct broadcast
# the bounds of uniform distribution adjust manually for every size (rn they're adjusted for 5x5x5 filters)
# for better understanding: https://medium.com/@anuj_shah/through-the-eyes-of-gabor-filter-17d1fdb3ac97
self.sigma = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * sigma
self.gamma_y = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * gamma_y
self.gamma_z = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * gamma_z
self.lambd = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * lambd
self.psi = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * psi
self.angles = torch.zeros(size=(self.num_filters, 3)).to(self.device)
num_angles_per_axis = round(math.sqrt(self.num_filters))
angle_step = math.pi / num_angles_per_axis
# use polar coordinate, theta round with x, phi round with y
for i_theta in range(num_angles_per_axis):
for j_phi in range(num_angles_per_axis):
rot_angle = torch.tensor([0, j_phi * angle_step, i_theta * angle_step]).to(self.device)
self.angles[i_theta * num_angles_per_axis + j_phi] = rot_angle
self.kernels = self.init_kernel()
def init_kernel(self):
'''
Initialize a gabor kernel with given parameters
Returns torch.Tensor with size (out_channels, in_channels, size, size, size)
'''
lambd = self.lambd
psi = self.psi
sigma_x = self.sigma
sigma_y = self.sigma * self.gamma_y
sigma_z = self.sigma * self.gamma_z
R = self.get_rotation_matrix().reshape(self.num_filters, 3, 3, 1, 1, 1)
c_max, c_min = int(self.size / 2), -int(self.size / 2)
(x, y, z) = torch.meshgrid(torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1), indexing='ij') # for future warning
x = x.to(self.device)
y = y.to(self.device)
z = z.to(self.device)
# meshgrid for every filter
x = x.unsqueeze(0).repeat(self.num_filters, 1, 1, 1)
y = y.unsqueeze(0).repeat(self.num_filters, 1, 1, 1)
z = z.unsqueeze(0).repeat(self.num_filters, 1, 1, 1)
x_prime = z * R[:, 2, 0] + y * R[:, 2, 1] + x * R[:, 2, 2]
y_prime = z * R[:, 1, 0] + y * R[:, 1, 1] + x * R[:, 1, 2]
z_prime = z * R[:, 0, 0] + y * R[:, 0, 1] + x * R[:, 0, 2]
yz_prime = torch.sqrt(y_prime ** 2 + z_prime ** 2)
# gabor formula
kernel = torch.exp(-.5 * (x_prime ** 2 / sigma_x ** 2 + y_prime ** 2 / sigma_y ** 2 + z_prime ** 2 / sigma_z ** 2)) \
* torch.cos(2 * math.pi * yz_prime / (lambd + 1e-6) + psi)
return kernel.reshape(self.out_channels, self.in_channels, self.size, self.size, self.size).contiguous()
def get_rotation_matrix(self):
'''
Makes 3d rotation matrix.
R_x = torch.Tensor([[cos_a, -sin_a, 0],
[sin_a, cos_a, 0],
[0, 0, 1]],)
R_y = torch.Tensor([[cos_b, 0, sin_b],
[0 , 1, 0],
[-sin_b, 0, cos_b]])
R_z = torch.Tensor([[1, 0, 0],
[0, cos_g, -sin_g],
[0, sin_g, cos_g]])
'''
sin_a, cos_a = torch.sin(self.angles[:, 0]), torch.cos(self.angles[:, 0])
sin_b, cos_b = torch.sin(self.angles[:, 1]), torch.cos(self.angles[:, 1])
sin_g, cos_g = torch.sin(self.angles[:, 2]), torch.cos(self.angles[:, 2])
R_x = torch.zeros(size=(self.num_filters, 3, 3)).to(self.device)
R_x[:, 0, 0] = cos_a
R_x[:, 0, 1] = -sin_a
R_x[:, 1, 0] = sin_a
R_x[:, 1, 1] = cos_a
R_x[:, 2, 2] = 1
R_y = torch.zeros(size=(self.num_filters, 3, 3)).to(self.device)
R_y[:, 0, 0] = cos_b
R_y[:, 0, 2] = sin_b
R_y[:, 2, 0] = -sin_b
R_y[:, 2, 2] = cos_b
R_y[:, 1, 1] = 1
R_z = torch.zeros(size=(self.num_filters, 3, 3)).to(self.device)
R_z[:, 1, 1] = cos_g
R_z[:, 1, 2] = -sin_g
R_z[:, 2, 1] = sin_g
R_z[:, 2, 2] = cos_g
R_z[:, 0, 0] = 1
return R_x @ R_y @ R_z
def forward(self, x):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
return x
|
CT2Hair-main
|
CT2Hair/utils/kernels.py
|
from .chamfer_distance import ChamferDistance
|
CT2Hair-main
|
CT2Hair/libs/chamfer_distance/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/chrdiller/pyTorchChamferDistance/tree/master
import torch
from torch.utils.cpp_extension import load
cd = load(name="cd",
sources=["CT2Hair/libs/chamfer_distance/chamfer_distance.cpp",
"CT2Hair/libs/chamfer_distance/chamfer_distance.cu"])
class ChamferDistanceFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n, dtype=torch.int)
idx2 = torch.zeros(batchsize, m, dtype=torch.int)
if not xyz1.is_cuda:
cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
else:
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return dist1, dist2
@staticmethod
def backward(ctx, graddist1, graddist2):
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
if not graddist1.is_cuda:
cd.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
else:
gradxyz1 = gradxyz1.cuda()
gradxyz2 = gradxyz2.cuda()
cd.backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
return gradxyz1, gradxyz2
class ChamferDistance(torch.nn.Module):
def forward(self, xyz1, xyz2):
return ChamferDistanceFunction.apply(xyz1, xyz2)
|
CT2Hair-main
|
CT2Hair/libs/chamfer_distance/chamfer_distance.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import torch
import numpy as np
from tqdm import tqdm
from sklearn.cluster import MeanShift
from scipy.spatial import KDTree
from utils.meshutils import strands_world2tbn, make_closest_uv_barys
from datautils.dataloaders import TbnStrandsBinDataset
from modules.strands_codec import StrandCodec
class NeuralStrands():
def __init__(self, is_resampled=True):
self.is_resampled = is_resampled
self.texture_height = 1024
self.texture_width = 1024
self.feature_channels = 128 # 64 for old, 128 for new
self.num_strds_points = 256 # 100 for old, 256 for new
self.neural_texture = np.zeros((self.texture_width, self.texture_height, self.feature_channels))
self.neural_texture_pca_rgb = np.zeros((self.texture_width, self.texture_height, 3))
self.strds_idx_map = np.zeros((self.texture_width, self.texture_height, 1), dtype=np.uint32)
train_param = {"num_pts": self.num_strds_points, "code_channels": self.feature_channels}
self.ckpt_path = 'CT2Hair/ckpt/neuralstrands_model.pt'
self.model = StrandCodec(do_vae=True, decode_direct_xyz=False, decode_random_verts=False, train_params=train_param, is_train=False).to("cuda")
checkpoint = torch.load(self.ckpt_path)
self.model.load_state_dict(checkpoint['model_state_dict'], strict=False)
self.model.eval()
def prep_strands_data(self, strands, head_mesh, scalp_mesh, scalp_faces_idx):
self.original_strands = strands
self.head_mesh = head_mesh
self.tbn_strands, barys, interp_idxs, face_idxs, self.valid_strds_idxs, \
self.tangents, self.bitangents, self.normals = strands_world2tbn(strands, head_mesh, scalp_mesh, scalp_faces_idx)
self.head_index_map, self.head_bary_map = make_closest_uv_barys(torch.tensor(head_mesh.visual.uv), torch.tensor(head_mesh.faces),
[self.texture_height, self.texture_width]) # type: ignore for avoiding pylance error
# get uv coords for hair strands roots
head_interp_idxs = head_mesh.faces[scalp_faces_idx][face_idxs]
head_uv_coords = head_mesh.visual.uv # num_vertices x 2
v0 = head_uv_coords[head_interp_idxs[:, 0]]
v1 = head_uv_coords[head_interp_idxs[:, 1]]
v2 = head_uv_coords[head_interp_idxs[:, 2]]
b0, b1, b2 = np.split(barys, 3, axis=1)
self.strds_uv_coords = b0 * v0 + b1 * v1 + b2 * v2
# try to save a texture map for demonstration
self.strds_texel_coords = self.strds_uv_coords * [self.texture_height, self.texture_width]
self.strds_texel_coords = np.around(self.strds_texel_coords).astype(np.int32)
tbn_strds_dataset = TbnStrandsBinDataset(self.tbn_strands, is_resampled=self.is_resampled, num_strds_points=self.num_strds_points)
self.tbn_strds_dataloader = tbn_strds_dataset.get_dataloader()
def decode(self, strds_code):
strds_code_dict = {}
strds_code_dict['s_shape'] = strds_code
pred_dict = self.model.decode(strds_code_dict)
pred_points = pred_dict["pred_points"]
return pred_points
def get_neural_representations(self, iter_opt=0, lr=1e-4):
# loss_writer = SummaryWriter('log/neural_rep/')
self.regular_strands = torch.zeros((0, self.num_strds_points, 3)).cuda() # valid strands in TBN space with the unified number of points
self.strds_features = torch.zeros((0, self.feature_channels)).cuda()
hair_loss_l2 = []
hair_loss_dir = []
loop = tqdm(enumerate(self.tbn_strds_dataloader, 0))
for i_data, input_data in loop:
self.model.diff_spline(input_data)
encoded_dict = self.model.encode()
strds_code = encoded_dict['s_shape'].clone().detach()
# setup optimization
strds_code = strds_code.requires_grad_(True)
strds_code_dict = {}
strds_code_dict['s_shape'] = strds_code
code_optimizer = torch.optim.Adam([strds_code_dict['s_shape']], lr=lr)
if iter_opt == 0:
prediction_dict = self.model.decode(strds_code_dict)
loss_l2 = self.model.compute_loss_l2(prediction_dict)
loss_dir = self.model.compute_loss_dir(prediction_dict)
loss = loss_l2 + loss_dir * 1e-4
hair_loss_l2.append(loss_l2.item())
hair_loss_dir.append(loss_dir.item())
else:
for i_iter in range(iter_opt):
self.model.train()
prediction_dict = self.model.decode(strds_code_dict)
loss_l2 = self.model.compute_loss_l2(prediction_dict)
loss_dir = self.model.compute_loss_dir(prediction_dict)
loss = loss_l2 + loss_dir * 0.001
code_optimizer.zero_grad()
loss.backward()
code_optimizer.step()
hair_loss_l2.append(loss_l2.item())
hair_loss_dir.append(loss_dir.item())
loop.set_description("Getting neural representations, batch loss: l2: %f, dir: %f"%(loss_l2.item(), loss_dir.item()))
self.regular_strands = torch.concat((self.regular_strands, self.model.splined_points), dim=0)
self.strds_features = torch.concat((self.strds_features, strds_code_dict['s_shape']), dim=0)
hair_loss_l2 = np.array(hair_loss_l2)
hair_loss_dir = np.array(hair_loss_dir)
print('Average reconstruction errors: l2: %f, dir: %f'%(np.mean(hair_loss_l2), np.mean(hair_loss_dir)))
self.regular_strands = self.regular_strands.reshape(-1, self.num_strds_points, 3).detach().cpu().numpy()
self.strds_features = self.strds_features.reshape(-1, self.feature_channels).detach().cpu().numpy()
self.neural_texture[np.clip(self.texture_height - self.strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.strds_texel_coords[:, 0], 0, self.texture_width - 1), :] = self.strds_features[:, :]
strds_idxs = np.arange(self.strds_features.shape[0]) + 1
self.strds_idx_map[np.clip(self.texture_height - self.strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.strds_texel_coords[:, 0], 0, self.texture_width - 1), 0] = strds_idxs
valid_texel_in_map = np.where(self.strds_idx_map > 0)
self.texel_strds_idxs = self.strds_idx_map[valid_texel_in_map[0], valid_texel_in_map[1], 0] - 1
self.used_strands = self.original_strands[self.valid_strds_idxs][self.texel_strds_idxs]
def denoise_on_strds(self, num_init_clusters=16, num_iters=64, max_cls_thres=2., num_strds_thres=64):
'''
Denoising on regular TBN strands.
Return denoised strands index on valid strands (tbn strands).
'''
# Try classic K-means on points
valid_texel_in_map = np.where(self.strds_idx_map > 0)
texel_strds_idxs = self.strds_idx_map[valid_texel_in_map[0], valid_texel_in_map[1], 0] - 1
num_texel_strds = texel_strds_idxs.shape[0]
init_centers_idxs = np.arange(num_init_clusters) * (num_texel_strds // num_init_clusters)
init_strds_centriods = self.regular_strands[init_centers_idxs]
num_clusters = num_init_clusters
strds_centriods = init_strds_centriods
adaptive_iter = 0
while(True):
repeated_strds = self.regular_strands[:, None, :, :].repeat(num_clusters, axis=1) # type: ignore for avoiding pylance error
for i_iter in range(num_iters):
pts_dis_centriods = np.sqrt(np.sum((repeated_strds - strds_centriods) ** 2, axis=-1, keepdims=False))
strd_dis_centriods = np.sum(pts_dis_centriods, axis=-1, keepdims=False) # naive sum without weights
strd_clusters = np.argmin(strd_dis_centriods, axis=-1)
# update means
pre_strds_centroids = copy.deepcopy(strds_centriods)
for j_cls in range(num_clusters):
cluster_strds = self.regular_strands[np.where(strd_clusters == j_cls)[0]]
strds_centriods[j_cls] = np.sum(cluster_strds, axis=0, keepdims=False) / cluster_strds.shape[0]
# centroid_dis = np.sum(np.sqrt(np.sum((strds_centriods - pre_strds_centroids) ** 2, axis=-1, keepdims=False)))
# print(centroid_dis)
# recalculate strands cluster use the final center
pts_dis_centriods = np.sqrt(np.sum((repeated_strds - strds_centriods) ** 2, axis=-1, keepdims=False))
strd_dis_centriods = np.sum(pts_dis_centriods, axis=-1, keepdims=False) # naive sum without weights
strd_clusters = np.argmin(strd_dis_centriods, axis=-1)
# calculate the max distances in clusters
strd_clusters_dis = np.min(strd_dis_centriods, axis=-1)
num_currt_clusters = num_clusters
for i_cls in range(num_currt_clusters):
strd_cluster_idx = np.where(strd_clusters == i_cls)[0]
cluster_dis = strd_clusters_dis[strd_cluster_idx]
max_cls_dis = np.max(cluster_dis)
max_strd_idx = np.argmax(cluster_dis)
if max_cls_dis > max_cls_thres:
num_clusters += 1
strds_centriods = np.concatenate((strds_centriods, self.regular_strands[strd_cluster_idx][max_strd_idx:max_strd_idx+1]), axis=0)
if num_clusters == num_currt_clusters:
break
num_iters = num_iters // 2
if num_iters < 1:
break
adaptive_iter += 1
print('Adaptive K-means iter %d...'%(adaptive_iter))
denoised_strds_idxs = [] # for valid tbn_strands
for i_cls in range(num_clusters):
cluster_idxs = np.where(strd_clusters == i_cls)[0].tolist()
if len(cluster_idxs) >= num_strds_thres: # type: ignore for avoiding pylance error
denoised_strds_idxs.extend(cluster_idxs)
# # temp visualization
# cluster_strds = world_strands[cluster_idxs]
# cluster_rgb = strd_clusters_rgb[cluster_idxs]
# save_color_strands('../temp/KMeans/kmeans_strands_cls_%d.cin'%(i_cls), cluster_strds, cluster_rgb)
print('Final number of clusters: %d, remove noise strands: %d.'%(num_clusters, self.regular_strands.shape[0] - len(denoised_strds_idxs)))
self.denoised_regular_strds = self.regular_strands[denoised_strds_idxs]
self.denoised_strds_features = self.strds_features[denoised_strds_idxs]
self.denoised_strds_texel_coords = self.strds_texel_coords[denoised_strds_idxs]
self.denoised_neural_texture = np.zeros((self.texture_height, self.texture_width, self.feature_channels))
self.denoised_strds_idx_map = np.zeros((self.texture_height, self.texture_width, 1), dtype=np.uint32)
self.denoised_neural_texture[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), :] = self.denoised_strds_features[:, :]
strds_idxs = np.arange(self.denoised_strds_features.shape[0]) + 1
self.denoised_strds_idx_map[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), 0] = strds_idxs
return denoised_strds_idxs, strd_clusters
def interpolation_on_strds(self, texel_roots_map, interp_kernel_size=5, interp_neig_pts=3, max_dis_thres=16):
steps_height = self.texture_height // interp_kernel_size
steps_width = self.texture_width // interp_kernel_size
# build kd-tree for points
texel_strds_pts = np.where(self.denoised_strds_idx_map > 0)
texel_strds_pts = np.concatenate((texel_strds_pts[0][:, None], texel_strds_pts[1][:, None]), axis=1)
texel_pts_kdtree = KDTree(texel_strds_pts)
texel_strds_mask = self.denoised_strds_idx_map > 0
texel_roots_mask = texel_roots_map > 0.5
interped_strands = []
interped_strds_face_idxs = []
interped_strds_face_barys = []
self.interp_count = 0
for i_h in range(steps_height):
for i_w in range(steps_width):
cen_h = i_h * interp_kernel_size + (interp_kernel_size // 2)
cen_w = i_w * interp_kernel_size + (interp_kernel_size // 2)
if texel_roots_mask[cen_h, cen_w] == False or texel_strds_mask[cen_h, cen_w] == True:
continue
num_existing_features = np.sum(texel_strds_mask[cen_h - (interp_kernel_size // 2) : cen_h + (interp_kernel_size // 2) + 1,
cen_w - (interp_kernel_size // 2) : cen_w + (interp_kernel_size // 2) + 1].astype(np.int16))
if num_existing_features > 0:
continue
dis, idx = texel_pts_kdtree.query(np.array([cen_h, cen_w]), interp_neig_pts)
dis = np.array(dis)
if np.sum(dis) > max_dis_thres * 3:
continue
dis = 1. / dis
normalized_dis = dis / np.linalg.norm(dis) # add np.array for avoiding pylance error
knn_strds_idxs = self.denoised_strds_idx_map[texel_strds_pts[idx, 0], texel_strds_pts[idx, 1], 0] # type: ignore for avoiding pylance error # for valid strands in TBN space
knn_strands = self.regular_strands[knn_strds_idxs]
if interp_neig_pts == 1:
interped_strand = knn_strands
else:
interped_strand = np.average(knn_strands, axis=0, weights=normalized_dis)
interped_strands.append(interped_strand)
interped_strds_face_idxs.append(self.head_index_map[cen_h, cen_w].detach().numpy())
interped_strds_face_barys.append(self.head_bary_map[cen_h, cen_w].detach().numpy())
self.interp_count += 1
interped_strands = np.array(interped_strands)
interped_strds_face_idxs = np.array(interped_strds_face_idxs)
interped_strds_face_barys = np.array(interped_strds_face_barys)
return interped_strands, interped_strds_face_idxs, interped_strds_face_barys
def denoise_neural_texture(self, num_del_cls=4, do_denoise=True):
if do_denoise:
clustering = MeanShift().fit(self.strds_features)
num_cls = np.max(clustering.labels_) + 1
strds_cls = clustering.labels_
cls_amount = np.zeros(num_cls)
for i_cls in range(num_cls):
cls_idx = np.where(strds_cls == i_cls)[0]
cls_amount[i_cls] = cls_idx.shape[0]
argsort_cls_idx = np.argsort(cls_amount)
if num_del_cls == 0:
num_del_cls = num_cls - 1
denoised_cls_idx = argsort_cls_idx[num_del_cls:]
num_denoised_cls = denoised_cls_idx.shape[0]
denoised_strds_idxs = []
for i_cls in range(num_denoised_cls):
strds_idx = np.where(strds_cls == denoised_cls_idx[i_cls])[0].tolist()
denoised_strds_idxs.extend(strds_idx)
else:
denoised_strds_idxs = np.arange(self.strds_features.shape[0]).tolist()
self.denoised_strds_features = self.strds_features[denoised_strds_idxs]
self.denoised_strds_texel_coords = self.strds_texel_coords[denoised_strds_idxs]
self.denoised_neural_texture = np.zeros((self.texture_height, self.texture_width, self.feature_channels))
self.denoised_strds_idx_map = np.zeros((self.texture_height, self.texture_width, 1), dtype=np.uint32)
self.denoised_neural_texture[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), :] = self.denoised_strds_features[:, :]
strds_idxs = np.arange(self.denoised_strds_features.shape[0]) + 1
self.denoised_strds_idx_map[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), 0] = strds_idxs
return denoised_strds_idxs
def interpolation_local_average(self, texel_roots_map, interp_kernel_size=5, interp_neig_radius=16):
steps_height = self.texture_height // interp_kernel_size
steps_width = self.texture_width // interp_kernel_size
texel_strds_mask = self.denoised_strds_idx_map > 0
texel_roots_mask = texel_roots_map > 0.5
self.interp_neural_texture = np.zeros_like(self.denoised_neural_texture)
self.interp_strds_idx_map = np.zeros_like(self.denoised_strds_idx_map, dtype=np.uint32)
self.interp_count = 0
for i_h in range(steps_height):
for i_w in range(steps_width):
cen_h = i_h * interp_kernel_size + (interp_kernel_size // 2)
cen_w = i_w * interp_kernel_size + (interp_kernel_size // 2)
if texel_roots_mask[cen_h, cen_w] == False or texel_strds_mask[cen_h, cen_w] == True:
continue
num_existing_features = np.sum(texel_strds_mask[cen_h - (interp_kernel_size // 2) : cen_h + (interp_kernel_size // 2) + 1,
cen_w - (interp_kernel_size // 2) : cen_w + (interp_kernel_size // 2) + 1].astype(np.int16))
if num_existing_features > 0:
continue
# get the neighbor for centroid using neig_radius
neig_ul = np.clip(np.array([cen_h, cen_w]) - interp_neig_radius, 0, [self.texture_height, self.texture_width])
neig_br = np.clip(np.array([cen_h, cen_w]) + interp_neig_radius, 0, [self.texture_height, self.texture_width])
neig = self.neural_texture[neig_ul[0]:neig_br[0], neig_ul[1]:neig_br[1]]
num_features = np.sum(texel_strds_mask[neig_ul[0]:neig_br[0], neig_ul[1]:neig_br[1]].astype(np.int16))
if num_features == 0:
continue
self.interp_neural_texture[cen_h, cen_w] = np.sum(np.sum(neig, axis=1, keepdims=False), axis=0, keepdims=False) / num_features
self.interp_strds_idx_map[cen_h, cen_w] = self.interp_count + 1
self.interp_count += 1
def interpolation_knn(self, texel_roots_map, interp_kernel_size=5, interp_neig_pts=3, is_bilateral=True, max_dis_thres=16):
steps_height = self.texture_height // interp_kernel_size
steps_width = self.texture_width // interp_kernel_size
# build kd-tree for points
texel_strds_pts = np.where(self.denoised_strds_idx_map > 0)
texel_strds_pts = np.concatenate((texel_strds_pts[0][:, None], texel_strds_pts[1][:, None]), axis=1)
texel_pts_kdtree = KDTree(texel_strds_pts)
texel_strds_mask = self.denoised_strds_idx_map > 0
texel_roots_mask = texel_roots_map > 0.5
self.interp_neural_texture = np.zeros_like(self.denoised_neural_texture)
self.interp_strds_idx_map = np.zeros_like(self.denoised_strds_idx_map, dtype=np.uint32)
self.interp_count = 0
for i_h in range(steps_height):
for i_w in range(steps_width):
cen_h = i_h * interp_kernel_size + (interp_kernel_size // 2)
cen_w = i_w * interp_kernel_size + (interp_kernel_size // 2)
if texel_roots_mask[cen_h, cen_w] == False or texel_strds_mask[cen_h, cen_w] == True:
continue
num_existing_features = np.sum(texel_strds_mask[cen_h - (interp_kernel_size // 2) : cen_h + (interp_kernel_size // 2) + 1,
cen_w - (interp_kernel_size // 2) : cen_w + (interp_kernel_size // 2) + 1].astype(np.int16))
if num_existing_features > 0:
continue
dis, idx = texel_pts_kdtree.query(np.array([cen_h, cen_w]), interp_neig_pts)
dis = np.array(dis)
if np.sum(dis) > max_dis_thres * 3:
continue
dis = 1. / dis
normalized_dis = dis / np.linalg.norm(dis)
knn_strds_codes = self.denoised_neural_texture[texel_strds_pts[idx, 0], texel_strds_pts[idx, 1]] # for valid strands in TBN space
nn_strds_code = knn_strds_codes[0]
similarities = np.abs(np.dot(knn_strds_codes, nn_strds_code.T)
/ (np.linalg.norm(knn_strds_codes, axis=-1) * np.linalg.norm(nn_strds_code, axis=-1)))
if is_bilateral:
interp_weigths = similarities * normalized_dis
interp_weigths = interp_weigths / np.linalg.norm(interp_weigths)
else:
interp_weigths = normalized_dis
if interp_neig_pts == 1:
self.interp_neural_texture[cen_h, cen_w] = knn_strds_codes
else:
self.interp_neural_texture[cen_h, cen_w] = np.average(knn_strds_codes, axis=0, weights=interp_weigths)
self.interp_strds_idx_map[cen_h, cen_w] = self.interp_count + 1
self.interp_count += 1
print('Interpolation done!')
def world_strands_from_tbn(self, strands, face_idxs, face_barys):
if not torch.is_tensor(strands):
strands = torch.tensor(strands, dtype=torch.float32).cuda()
if not torch.is_tensor(face_barys):
face_barys = torch.tensor(face_barys, dtype=torch.float32)
tbn_basis = torch.stack((self.tangents[0], self.bitangents[0], self.normals[0]), dim=2)[face_idxs]
# basis change
orig_points = torch.matmul(tbn_basis.float().cuda(), strands.permute(0, 2, 1)).permute(0, 2, 1)
# scale
orig_points = orig_points * 1000. # m -> mm
# translate to world space with brad and triangle vertices
triangled_vertices = torch.tensor(self.head_mesh.vertices[self.head_mesh.faces, :])
roots_triangles = triangled_vertices[face_idxs]
roots_positions = roots_triangles[:, 0] * face_barys[:, 0:1] + \
roots_triangles[:, 1] * face_barys[:, 1:2] + \
roots_triangles[:, 2] * face_barys[:, 2:3]
strds_points = orig_points + roots_positions[:, None, :].cuda()
return strds_points
def world_strands_from_texels(self, neural_texture, strds_idx_map, batch_size=300):
texel_idx = np.where(strds_idx_map > 0)
strds_codes = neural_texture[texel_idx[0], texel_idx[1], :]
num_interped = strds_codes.shape[0]
if not torch.is_tensor(strds_codes):
strds_codes = torch.tensor(strds_codes, dtype=torch.float32).cuda()
pred_points = torch.zeros((num_interped, self.num_strds_points, 3)).cuda()
num_batches = math.ceil(num_interped / batch_size)
loop = tqdm(range(num_batches))
loop.set_description('Decoding strands')
for i_b in loop:
i_start = i_b * batch_size
i_end = min((i_b + 1) * batch_size, num_interped)
pred_points[i_start:i_end] = self.decode(strds_codes[i_start:i_end])
face_idxs = self.head_index_map[texel_idx[0], texel_idx[1]]
tbn_basis = torch.stack((self.tangents[0], self.bitangents[0], self.normals[0]), dim=2)[face_idxs]
# basis change
orig_points = torch.matmul(tbn_basis.float().cuda(), pred_points.permute(0, 2, 1)).permute(0, 2, 1)
# scale
orig_points = orig_points * 1000. # m -> mm
# translate to world space with brad and triangle vertices
triangled_vertices = torch.tensor(self.head_mesh.vertices[self.head_mesh.faces, :])
roots_triangles = triangled_vertices[face_idxs]
face_barys = self.head_bary_map[texel_idx[0], texel_idx[1]]
roots_positions = roots_triangles[:, 0] * face_barys[:, 0:1] + \
roots_triangles[:, 1] * face_barys[:, 1:2] + \
roots_triangles[:, 2] * face_barys[:, 2:3]
strds_points = orig_points + roots_positions[:, None, :].cuda()
return strds_points
|
CT2Hair-main
|
CT2Hair/modules/neural_strands.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.