python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import logging
import time
from typing import Any, Tuple, cast
import torch
import torch.distributed as dist
from torch import Tensor
from torch.nn import Module, ModuleList
from .global_groups import get_all2all_group, get_moe_group
try:
from fairseq.modules.moe import MOELayer
has_fairseq = True
Base = MOELayer
except ModuleNotFoundError:
Base = Module
has_fairseq = False
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/Agora/tutel@v0.1.x
from tutel import moe as tutel_moe
has_tutel, fused_cumsum_sub_one = True, tutel_moe.fast_cumsum_sub_one
except ModuleNotFoundError:
has_tutel, fused_cumsum_sub_one = False, lambda mask: torch.cumsum(mask, dim=0) - 1
logger = logging.getLogger(__name__)
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
if torch.distributed.is_initialized():
dist.all_to_all_single(output, input, group=group)
else:
assert group is None
output = input
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self, gate, experts, args):
if has_fairseq:
super(Base, self).__init__()
else:
super().__init__()
self.gate = gate
if type(experts) == ModuleList:
self.experts = cast(ModuleList, experts)
else:
self.experts = ModuleList([experts])
_, self.expert_group = get_moe_group(args.moe_expert_count)
self.all2all_group = get_all2all_group(args.moe_expert_count)
self.world_size = dist.get_world_size(group=self.expert_group)
self.all2all_size = dist.get_world_size(group=self.all2all_group)
for p in experts.parameters():
p.expert = True # type: ignore
self.num_local_experts = len(self.experts)
self.args = args
self.in_generation = False
self.a2a_cuda_event_intervals = []
self.a2a_cpu_time_ms = 0.0
def forward(self, *input: Tensor, input_padding_mask=None, **kwargs: Any) -> Tensor:
assert len(input) == 1, "only single input Tensor supported"
input = input[0]
assert (
len(input.shape) == 3
), "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel"
if input_padding_mask is not None:
assert (
len(input_padding_mask.shape) == 2
), "input Tensor must have dimensions: (s)equence, (t)oken"
assert input_padding_mask.shape[0] == input.shape[0]
assert input_padding_mask.shape[1] == input.shape[1]
# assert input.shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts"
# Implement Algorithm 2 from GShard paper.
d_model = input.shape[2]
# Pad to expected batch size
input_shape = list(input.shape)
expected_bsz = (
getattr(self.args, "batch_size", 0)
if self.training
else getattr(self.args, "batch_size_valid", 0)
)
# This indicates that --batch-size or --max-sentences is not specified
if expected_bsz is None:
expected_bsz = 0
# Note: Padding is not necessary at generation time at present
# because all DDP workers process the same batch. Also, batch size at generation time
# can be different from that present in the checkpoint state
if (
not self.in_generation
and expected_bsz != 0
and input_shape[0] != expected_bsz
):
logger.warning(
f"padding batch with unexpected size {input_shape[0]} (expected: {expected_bsz})"
)
assert input_shape[0] < expected_bsz, f"{input_shape[0]} < {expected_bsz}"
padded_input = torch.zeros(
(expected_bsz, input_shape[1], input_shape[2]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: input_shape[0], :, :] = input
input = padded_input
padded_input_padding_mask = torch.ones(
(
expected_bsz,
input_shape[1],
),
dtype=torch.bool,
device=input.device,
)
if input_padding_mask is not None:
padded_input_padding_mask[: input_shape[0], :] = input_padding_mask
else:
padded_input_padding_mask[: input_shape[0], :] = False
input_padding_mask = padded_input_padding_mask
# Reshape into S tokens by dropping sequence dimension.
reshaped_input = input.reshape(-1, d_model)
reshaped_input_shape = reshaped_input.shape
reshaped_input_padding_mask = (
input_padding_mask.reshape(-1) if input_padding_mask is not None else None
)
# Doing padding here when --max-tokens is specified and not --batch-size or --max-sentences
# Pro of --max-tokens: more flexible for MT variable sequence lengths
# Con of --max-tokens: extra all-reduce needed to figure out optimal padding without running OOM
if expected_bsz == 0:
expected_dim = reshaped_input_shape[0] * torch.ones(
(1,), dtype=torch.long, device=input.device
)
dist.all_reduce(expected_dim, group=dist.group.WORLD, op=dist.ReduceOp.MAX)
expected_dim = int(expected_dim.item())
padded_input = torch.zeros(
(expected_dim, reshaped_input_shape[1]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: reshaped_input_shape[0], :] = reshaped_input
reshaped_input = padded_input
padded_input_padding_mask = torch.ones(
(expected_dim,), dtype=torch.bool, device=padded_input.device
)
if reshaped_input_padding_mask is not None:
padded_input_padding_mask[
: reshaped_input_shape[0]
] = reshaped_input_padding_mask
else:
padded_input_padding_mask[: reshaped_input_shape[0]] = False
reshaped_input_padding_mask = padded_input_padding_mask
if has_tutel:
l_aux, self.metadata, C, E, indices_, locations_, gates_ = self.gate(
reshaped_input, reshaped_input_padding_mask
)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, "_tutel_dispatcher"):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(
E, C, M, dispatch_dtype=reshaped_input.dtype
)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
l_aux, combine_weights, dispatch_mask, self.metadata = self.gate(
reshaped_input, reshaped_input_padding_mask
)
dispatch_mask = dispatch_mask.to(input.dtype).permute(
1, 2, 0
) # S,E,C -> E,C,S
E, C, S = dispatch_mask.size()
M = reshaped_input.size(1)
assert reshaped_input.size() == (S, M)
# einsum("sec,sm->ecm")
dispatched_input = torch.mm(
dispatch_mask.view(E * C, S), reshaped_input
) # -> (E*C),M
if self.all2all_size > 1:
dispatched_input = self.all_to_all_wrapper(dispatched_input)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(
self.all2all_size, self.num_local_experts, -1, d_model
)
chunks = dispatched_input.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.experts):
expert_outputs += [expert(chunk)]
expert_output = torch.cat(expert_outputs, dim=1)
if self.all2all_size > 1:
expert_output = self.all_to_all_wrapper(expert_output)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(
self.all2all_size * self.num_local_experts, -1, d_model
)
if has_tutel:
combined_output = self._tutel_dispatcher.decode(
expert_output.view(E * C, M)
)
else:
# einsum("sec,ecm->sm")
combined_output = combine_weights.view(S, E * C).mm(
expert_output.view(E * C, M)
)
# Remove padding here when --max-tokens is specified and not --batch-size or --max-sentences
combined_output = combined_output[: reshaped_input_shape[0], :]
combined_output = combined_output.reshape(input.shape)
combined_output = combined_output[: input_shape[0], :, :]
self.record_all_to_all_stats()
return combined_output, l_aux
def prepare_for_inference_(self):
self.in_generation = True
def all_to_all_wrapper(self, input: Tensor):
dummy_a2a = getattr(self.args, "dummy_a2a", False)
if dummy_a2a:
input = input.contiguous()
output = input.detach().clone()
return input
# always record times, since it is not a lot of overhead
# if we do not log it we simply clear it off in record_all_to_all_stats
cuda_start = torch.cuda.Event(enable_timing=True)
cuda_end = torch.cuda.Event(enable_timing=True)
cpu_start = time.time() * 1000
cuda_start.record()
output = _AllToAll.apply(self.all2all_group, input)
cuda_end.record()
cpu_end = time.time() * 1000
self.a2a_cpu_time_ms += cpu_end - cpu_start
self.a2a_cuda_event_intervals.append((cuda_start, cuda_end))
return output
def record_all_to_all_stats(self):
# controlled via an argument as we want to minimize any impact from torch.cuda.synchronize()
record_a2a_perf_stats = getattr(self.args, "record_a2a_perf_stats", False)
if record_a2a_perf_stats:
torch.cuda.synchronize()
self.metadata["all_to_all_cpu_time_ms"] = self.a2a_cpu_time_ms
a2a_cuda_time_ms = 0.0
for ev_start, ev_end in self.a2a_cuda_event_intervals:
a2a_cuda_time_ms += ev_start.elapsed_time(ev_end)
self.metadata["all_to_all_cuda_time_ms"] = a2a_cuda_time_ms
# reset stats
self.a2a_cpu_time_ms = 0.0
self.a2a_cuda_event_intervals = []
|
zeta-main
|
zeta/nn/modules/xmoe/moe_layer.py
|
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf
# Code is inspired by Top2GatingOnLogits from lingvo:
# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import math
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from .moe_layer import fused_cumsum_sub_one, has_tutel
# use a fixed temperature to compute balance loss
TEMPERATURE_FOR_L_UAX = 0.07
# maximum capacity of 1 expert as a fraction of number of tokens in the batch
# Note: setting this to 1.0 causes inference to significantly slow down
EVAL_CAPACITY_TOKEN_FRACTION = 0.25
# logging
SAMPLE_FRACTION = 0.2
def top1gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
capacity_factor=1.0,
eval_mode=False,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
gate_obj=None,
) -> Tuple[Tensor, Tensor, Tensor, Dict]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = capacity_factor * S/E
capacity = int(capacity_factor * math.ceil(num_tokens / num_experts))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True)
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
gates1_s = (gates * mask1).sum(dim=1)
# Compute locations in capacity buffer
locations1 = fused_cumsum_sub_one(mask1)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
if has_tutel:
locations1_s = torch.sum(locations1 * mask1, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[
indices1_s,
],
[
locations1_s,
],
[
gates1_s,
],
)
# Remove locations outside capacity from mask
mask1 = mask1 * torch.lt(locations1, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
# locations1_sc = num_tokens * capacity
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
dispatch_mask = combine1_sec.bool()
if use_fp32:
return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine1_sec, dispatch_mask, metadata
class Top1Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
input_noise_type=None,
capacity_factor=1.0,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
) -> None:
# TODO: merge this to top2gate.py
#
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_xmoe = use_xmoe
self.use_fp32 = use_fp32
self.input_noise_type = input_noise_type
self.capacity_factor = capacity_factor
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top1gating(
logits,
mask,
use_fp32=self.use_fp32,
capacity_factor=self.capacity_factor,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
use_xmoe=self.use_xmoe,
gate_obj=self,
)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
def _get_gating_temperature(self, eps=1e-4):
if self.gating_t.data.item() < eps:
return eps
return self.gating_t
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
gumbel_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
def one_hot(indices: torch.Tensor, num_classes: int, unsqueeze_indices=False) -> Tensor:
if unsqueeze_indices:
indices = indices.unsqueeze(-1)
assert indices.shape[-1] == 1, "last dimension of indices must be have size 1"
output = torch.zeros(
indices.shape[:-1] + (num_classes,), device=indices.device, dtype=indices.dtype
)
output.scatter_(len(output.shape) - 1, indices, 1)
return output
def entropy(probs):
logits = torch.distributions.utils.probs_to_logits(probs)
p_log_p = probs * logits
return -p_log_p.sum(-1)
def top2gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
eval_mode=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = 2S/E
capacity = 2 * math.ceil(num_tokens / num_experts)
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1, keepdim=True)
mask1 = one_hot(indices1_s, num_experts)
if second_expert_policy == "sampling":
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
else:
logits_w_noise = logits
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True)
mask2 = one_hot(indices2_s, num_experts)
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
if normalize_gate_prob_before_dropping:
# Normalize gate probabilities
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s = gates1_s / denom_s
gates2_s = gates2_s / denom_s
if second_expert_policy == "random":
sampled = (2 * gates2_s) > torch.rand_like(gates2_s)
mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0)
# Compute locations in capacity buffer
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype)
if batch_prioritized_routing:
# if batch_prioritized_routing:
importance_scores = -1 * gates.max(dim=1)[0]
sorted_mask1 = mask1[importance_scores.argsort(dim=0)]
sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1
importance_sorted_locations1 = sorted_cumsum1[
importance_scores.argsort(dim=0).argsort(dim=0)
]
sorted_mask2 = mask2[importance_scores.argsort(dim=0)]
sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2
importance_sorted_locations2 = sorted_cumsum2[
importance_scores.argsort(dim=0).argsort(dim=0)
]
importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True)
locations1, locations2 = (
importance_sorted_locations1,
importance_sorted_locations2,
)
else:
locations1 = fused_cumsum_sub_one(mask1)
locations2 = fused_cumsum_sub_one(mask2)
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
# for logging purposes
metadata["overflow_expert1"] = (
100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1)
)
metadata["overflow_expert2"] = (
100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2)
)
# Remove locations outside capacity from mask
mask1_, mask2_ = mask1, mask2
mask1 = mask1 * torch.lt(locations1, capacity)
mask2 = mask2 * torch.lt(locations2, capacity)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
expert2_hist = (
100
* torch.histc(
(indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert2_count"] = (expert2_hist == 0).sum()
expert2_hist = (
torch.sort(expert2_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum()
metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum()
if not normalize_gate_prob_before_dropping:
# Normalize gate probabilities
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
if has_tutel:
locations1_s = torch.sum(locations1 * mask1_, dim=1)
locations2_s = torch.sum(locations2 * mask2_, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[indices1_s, indices2_s],
[locations1_s, locations2_s],
[gates1_s, gates2_s],
)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se")
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
combine2_sec = torch.bmm(
# einsum("se,sc->sec")
gates2.unsqueeze(-1),
locations2_sc.to(gates2.dtype).unsqueeze(1),
)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
if use_fp32:
return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine_weights, dispatch_mask, metadata
class Top2Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
use_xmoe=False,
) -> None:
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_fp32 = use_fp32
self.second_expert_policy = second_expert_policy
self.normalize_gate_prob_before_dropping = normalize_gate_prob_before_dropping
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
self.batch_prioritized_routing = batch_prioritized_routing
self.use_xmoe = use_xmoe
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top2gating(
logits,
mask,
use_fp32=self.use_fp32,
second_expert_policy=self.second_expert_policy,
normalize_gate_prob_before_dropping=self.normalize_gate_prob_before_dropping,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
batch_prioritized_routing=self.batch_prioritized_routing,
)
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
|
zeta-main
|
zeta/nn/modules/xmoe/routing.py
|
from functools import partial
import torch
from torch.distributed.fsdp import (
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy
)
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
TransformerBlock = None
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
LongNet_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
TransformerBlock,
},
)
else:
LongNet_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=LongNet_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
|
zeta-main
|
zeta/training/fsdp.py
|
from functools import partial
import torch
from accelerate import Accelerator
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper)
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
TransformerBlock = None
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, TransformerBlock)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
|
zeta-main
|
zeta/training/activation_checkpoint.py
|
from zeta.tokenizers.multi_modal_tokenizer import MultiModalTokenizer
from zeta.tokenizers.language_tokenizer import LanguageTokenizerGPTX
from zeta.training.train import Trainer, train
from zeta.optim.decoupled_optimizer import decoupled_optimizer
from zeta.optim.stable_adam import StableAdamWUnfused
from zeta.optim.decoupled_lion import DecoupledLionW
from zeta.optim.decoupled_sophia import SophiaG
|
zeta-main
|
zeta/training/__init__.py
|
import math
import os
from datetime import timedelta
import torch
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import default_data_collator, set_seed
from zeta.training.dataloader import build_dataloaders, build_pre_tokenized
from zeta.training.fsdp import fsdp
from zeta.optim.decoupled_optimizer import decoupled_optimizer
from zeta.training.scheduler import get_lr_scheduler_with_warmup
from zeta.training.activation_checkpoint import activation_checkpointing
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
def Trainer(
gradient_accumulate_every: int = None,
batch_size: int = None,
seq_len: int = None,
entity_name: str = None,
model = None,
use_fsdp: bool = False,
use_activation_checkpointing: bool = False,
learning_rate = None,
seed = None,
use_pretokenized: bool = False,
resume_from_checkpoint = None,
checkpointing_steps = None,
output_dir = None,
weight_decay = None,
use_deepspeed = None
):
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=gradient_accumulate_every,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 4 #??????
accelerator.init_trackers(
project_name="LongNet",
config={
"batch_size": batch_size,
"gradient_accumulate_every": gradient_accumulate_every,
"learning_rate": learning_rate,
"seq_len": seq_len,
},
init_kwargs={"wandb": {"entity": entity_name}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(seed)
model = model().to(accelerator.device)
print_num_params(model, accelerator)
if use_fsdp:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if use_activation_checkpointing:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if use_pretokenized:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=batch_size, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=learning_rate,
weight_decay=weight_decay,
beta_1=0.90,
beta_2=0.95,
optimizer_type='Adam8bit',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / gradient_accumulate_every)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=gradient_accumulate_every,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / gradient_accumulate_every)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
batch_size * accelerator.num_processes * gradient_accumulate_every
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if resume_from_checkpoint:
if resume_from_checkpoint is not None or resume_from_checkpoint != "":
accelerator.print(f"Resuming from checkpoint {resume_from_checkpoint}")
accelerator.load_state(resume_from_checkpoint)
path = os.path.basename(resume_from_checkpoint)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* gradient_accumulate_every
)
if resume_from_checkpoint and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if output_dir is not None:
output_dir = os.path.join(output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {output_dir}")
if output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{output_dir}/final/final_model.pt"
)
def train(
MASTER_ADDR = None,
MASTER_PORT = None,
RANK = None,
WORLD_SIZE = None):
os.environ['MASTER_ADDR'] or MASTER_ADDR # = 'localhost'
os.environ['MASTER_PORT'] or MASTER_PORT #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] or RANK #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] or WORLD_SIZE #= str(torch.cuda.device_count())
torch.distributed.init_process_group()
Trainer()
|
zeta-main
|
zeta/training/train.py
|
from itertools import chain
from datasets import load_dataset
from transformers import (AutoTokenizer)
def build_dataloaders(
seq_len: int = None,
num_cpu: int = None
):
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=seq_len,
remove_columns=["text"],
)
block_size = seq_len
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=num_cpu,
)
return train_dataset
def build_pre_tokenized(
dataset_name: str = None
):
d0 = load_dataset(dataset_name)
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
|
zeta-main
|
zeta/training/dataloader.py
|
import torch
from accelerate import Accelerator
from transformers import (get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup)
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
|
zeta-main
|
zeta/training/scheduler.py
|
zeta-main
|
zeta/training/loss/__init__.py
|
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import logging
# Helpers
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
# Losses
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplementedError("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
self.logger.addHandler(handler)
def determine_loss_function(self, y_pred, y_true):
self.logger.info("Determining the loss function")
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.logger.info("Determined problem as classification. Using CrossEntropyLoss")
self.loss_function = CrossEntropyLoss()
else:
self.logger.info("Determining loss function for this dataset")
self.loss_function = MSELoss()
def __call__(self, y_pred, y_true):
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.logger.info("Determining loss function for the dataset")
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
|
zeta-main
|
zeta/training/loss/nebula.py
|
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List
class SophiaG(Optimizer):
"""
SophiaG optimizer class.
"""
def __init__(self, params, lr=1e-4, betas=(0.965, 0.99), rho = 0.04,
weight_decay=1e-1, *, maximize: bool = False,
capturable: bool = False, dynamic: bool = False):
"""
Initialize the optimizer.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= rho:
raise ValueError("Invalid rho parameter at index 1: {}".format(rho))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, rho=rho,
weight_decay=weight_decay,
maximize=maximize, capturable=capturable, dynamic=dynamic)
super(SophiaG, self).__init__(params, defaults)
def __setstate__(self, state):
"""
Set the state of the optimizer.
"""
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('maximize', False)
group.setdefault('capturable', False)
group.setdefault('dynamic', False)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def update_hessian(self):
"""
Update the hessian.
"""
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \
if self.defaults['capturable'] else torch.tensor(0.)
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if 'hessian' not in state.keys():
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'].mul_(beta2).addcmul_(p.grad, p.grad, value=1 - beta2)
@torch.no_grad()
def update_exp_avg(self):
"""
Update the exponential average.
"""
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['exp_avg'].mul_(beta1).add_(p.grad, alpha=1 - beta1)
@torch.no_grad()
def step(self, closure=None, bs=5120):
"""
Perform a step of the optimizer.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
self.update_hessian()
self.update_exp_avg()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
state_steps = []
hessian = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Hero does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \
if self.defaults['capturable'] else torch.tensor(0.)
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if 'hessian' not in state.keys():
state['hessian'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
state_steps.append(state['step'])
hessian.append(state['hessian'])
if self.defaults['capturable']:
bs = torch.ones((1,), dtype=torch.float, device=p.device) * bs
self._sophiag(params_with_grad,
grads,
exp_avgs,
hessian,
state_steps,
bs=bs,
beta1=beta1,
beta2=beta2,
rho=group['rho'],
lr=group['lr'],
weight_decay=group['weight_decay'],
maximize=group['maximize'],
capturable=group['capturable'])
return loss
def _sophiag(self, params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
hessian: List[Tensor],
state_steps: List[Tensor],
capturable: bool = False,
*,
bs: int,
beta1: float,
beta2: float,
rho: float,
lr: float,
weight_decay: float,
maximize: bool):
"""
SophiaG function.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
self._single_tensor_sophiag(params,
grads,
exp_avgs,
hessian,
state_steps,
bs=bs,
beta1=beta1,
beta2=beta2,
rho=rho,
lr=lr,
weight_decay=weight_decay,
maximize=maximize,
capturable=capturable)
def _single_tensor_sophiag(self, params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
hessian: List[Tensor],
state_steps: List[Tensor],
*,
bs: int,
beta1: float,
beta2: float,
rho: float,
lr: float,
weight_decay: float,
maximize: bool,
capturable: bool):
"""
SophiaG function for single tensor.
"""
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
hess = hessian[i]
step_t = state_steps[i]
if capturable:
assert param.is_cuda and step_t.is_cuda and bs.is_cuda
if torch.is_complex(param):
grad = torch.view_as_real(grad)
exp_avg = torch.view_as_real(exp_avg)
hess = torch.view_as_real(hess)
param = torch.view_as_real(param)
# update step
step_t += 1
# Perform stepweight decay
param.mul_(1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
if capturable:
step_size = lr
step_size_neg = step_size.neg()
ratio = (exp_avg.abs() / (rho * bs * hess + 1e-15)).clamp(None,1)
param.addcmul_(exp_avg.sign(), ratio, value=step_size_neg)
else:
step_t.item()
step_size_neg = - lr
ratio = (exp_avg.abs() / (rho * bs * hess + 1e-15)).clamp(None,1)
param.addcmul_(exp_avg.sign(), ratio, value=step_size_neg)
|
zeta-main
|
zeta/optim/decoupled_sophia.py
|
zeta-main
|
zeta/optim/__init__.py
|
|
import torch
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1
|
zeta-main
|
zeta/optim/stable_adam.py
|
import bitsandbytes as bnb
import torch
from accelerate import Accelerator
from lion_pytorch import Lion
from torch.nn import LayerNorm
from torch.optim import AdamW
from zeta.optim.stable_adam import StableAdamWUnfused
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
elif optimizer_type=="Adam8bit":
optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
elif optimizer_type=="Lion8Bit":
optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
|
zeta-main
|
zeta/optim/decoupled_optimizer.py
|
import logging
import math
from typing import Callable, Optional, Tuple
import torch
from torch.optim.optimizer import Optimizer
log = logging.getLogger(__name__)
class DecoupledLionW(Optimizer):
"""
DecoupledLionW is an optimizer designed to improve training performance and convergence for deep learning models.
It is an extension of the Lion optimizer, incorporating decoupled weight decay and a momentum-based update rule.
The optimizer utilizes the Adam-like update rule, where the weight decay is applied separately from the gradient update.
The update rule consists of three steps: weight decay, momentum update, and momentum decay.
Weight decay reduces the magnitude of the model's weights, preventing overfitting and improving generalization.
The momentum update is an interpolation between the current gradient and the previous momentum state, allowing for faster convergence and smoother optimization.
Momentum decay gradually reduces the momentum term over time, preventing it from becoming too large and destabilizing the optimization process.
The optimizer supports both single-node and multi-node distributed training, enabling efficient training on parallel computing environments.
It provides various metric functions to track the optimization process, such as L2 norm of moments, parameters, updates, and gradients, as well as cosine similarity between updates and gradients.
The optimizer allows reporting per-parameter metrics to analyze the behavior of individual model parameters during training.
"""
metric_functions = {
'l2_norm/moment': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(optim_state['exp_avg']),
'l2_norm/param': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.data),
'l2_norm/update': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(step_tensor),
'l2_norm/grad': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.grad),
'cosine/update_grad': lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad': lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0),
}
def __init__(
self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
):
if lr <= 0.:
raise Exception(f'Invalid LR: {lr}. LR must be > 0')
if not all([0. <= beta <= 1. for beta in betas]):
raise Exception(f'Invalid beta values: {betas}. All betas must be between 0 and 1.')
if weight_decay >= 1e-3:
log.warning(f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledLionW` optimizer. Are you sure you want to do this? Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!')
defaults = {'lr': lr, 'betas': betas, 'weight_decay': weight_decay}
super().__init__(params, defaults)
for group in self.param_groups:
group['initial_lr'] = group['lr']
@staticmethod
def lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2) -> None:
if wd != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
p.data.mul_(1 - decay_factor * wd)
update = exp_avg.lerp(grad, 1 - beta1).sign_()
p.add_(update, alpha=-lr)
exp_avg.lerp_(grad, 1 - beta2)
@torch.no_grad()
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: p.grad is not None and p.requires_grad, group['params']):
grad, lr, initial_lr, wd, beta1, beta2, state = p.grad, group['lr'], group['initial_lr'], group['weight_decay'], *group['betas'], self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
exp_avg = state['exp_avg']
self.lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2)
return loss
def pre_reduce_metrics(self, optimizer_metrics):
metrics = optimizer_metrics.keys()
metrics = sorted(metrics, key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str, optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, _ = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
step_tensor = param_optim_state['exp_avg'].clone().lerp_(param.grad, 1 - beta1).sign_().mul_(lr)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[metric](param, param_optim_state, step_tensor)
return optimizer_metrics
|
zeta-main
|
zeta/optim/decoupled_lion.py
|
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
from zeta.utils.main import *
|
zeta-main
|
zeta/utils/__init__.py
|
import math
from functools import partial, wraps
from math import ceil
import einops
import numpy as np
import torch
import torch.functional as F
import torch.nn as nn
from accelerate import Accelerator
from einops import rearrange
from PIL import Image
####
from torchvision import transforms as T
def exists(val):
"""
Check if the value is not None.
Args:
val: The value to check.
Returns:
bool: True if value exists (is not None), False otherwise.
"""
return val is not None
def default(val, d):
"""
Return the value if it exists, otherwise return a default value.
Args:
val: The value to check.
d: The default value to return if val is None.
Returns:
The value if it exists, otherwise the default value.
"""
return val if exists(val) else d
def once(fn):
"""
Decorator to ensure the function is only called once.
Args:
fn (function): The function to wrap.
Returns:
function: The wrapped function.
"""
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def eval_decorator(fn):
"""
Decorator to ensure a method switches to eval mode before execution
and returns to its original mode afterwards. For torch.nn.Module objects.
Args:
fn (function): The function to wrap.
Returns:
function: The wrapped function.
"""
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def cast_tuple(val, depth):
"""
Cast a value to a tuple of a specific depth.
Args:
val: Value to be cast.
depth (int): Depth of the tuple.
Returns:
tuple: Tuple of the given depth with repeated val.
"""
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
"""
Decorator that calls a function if the first argument exists.
Args:
fn (function): The function to wrap.
Returns:
function: The wrapped function.
"""
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
"""
Class that always returns a specified value when called.
"""
def __init__(self, val):
"""
Initialize the always class with a value.
Args:
val: The value to always return.
"""
self.val = val
def __call__(self, *args, **kwargs):
"""
Return the specified value.
Returns:
The specified value.
"""
return self.val
class not_equals():
"""
Class that checks if a value does not equal the specified value.
"""
def __init__(self, val):
"""
Initialize with a value.
Args:
val: The value to compare against.
"""
self.val = val
def __call__(self, x, *args, **kwargs):
"""
Compare the input x with the specified value.
Returns:
bool: True if x is not equal to the specified value, False otherwise.
"""
return x != self.val
class equals():
"""
Class that checks if a value equals the specified value.
"""
def __init__(self, val):
"""
Initialize with a value.
Args:
val: The value to compare against.
"""
self.val = val
def __call__(self, x, *args, **kwargs):
"""
Compare the input x with the specified value.
Returns:
bool: True if x is equal to the specified value, False otherwise.
"""
return x == self.val
def init_zero_(layer):
"""
Initialize the weights and bias of a torch layer to zero.
Args:
layer (torch.nn.Module): The layer to initialize.
"""
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
def pick_and_pop(keys, d):
"""
Remove and return values from a dictionary based on provided keys.
Args:
keys (list): List of keys to remove from the dictionary.
d (dict): The dictionary to pick from.
Returns:
dict: A dictionary with the specified keys and their values.
"""
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
"""
Group dictionary keys based on a condition.
Args:
cond (function): Condition to split dictionary.
d (dict): The dictionary to group.
Returns:
tuple: Two dictionaries split based on the condition.
"""
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
"""
Check if a string begins with a specific prefix.
Args:
prefix (str): The prefix to check for.
str (str): The string to check.
Returns:
bool: True if string starts with prefix, False otherwise.
"""
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
"""
Group dictionary items by keys that start with a specific prefix.
Args:
prefix (str): The prefix to check for.
d (dict): The dictionary to group.
Returns:
tuple: Two dictionaries split based on the prefix condition.
"""
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
"""
Group dictionary items by keys that start with a specific prefix and remove the prefix.
Args:
prefix (str): The prefix to check for.
d (dict): The dictionary to group.
Returns:
tuple: Dictionary with the prefix removed and another dictionary with remaining items.
"""
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def divisible_by(num, den):
return (num % den) == 0
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float("-inf")
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres=0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind, = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
def top_a(
logits,
min_p_pow=2.0,
min_p_ratio=0.02
):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float("-inf")
logits[probs >= limit] = 1
return logits
def log(t, eps=1e-20):
return torch.log(t.clamp(min=eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumnel_sample(t, temperature=1., dim=-1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim)
class ContrastiveTopK(nn.Module):
def __init__(self,
alpha,
k):
super(ContrastiveTopK, self).__init__()
self.alpha = alpha
self.k = k
def top_k(self, logits):
k = ceil((1 - self.alpha) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def forward(self,
logits_exp,
logits_ama):
logits_exp_topk = self.top_k(logits_exp)
logits_ama_topk = self.top_k(logits_ama)
#probabilities
p_exp = F.softmax(logits_exp_topk, dim=-1)
p_ama = F.softmax(logits_ama_topk, dim=-1)
#mask
_, ind = torch.topk(p_exp, self.k)
mask = torch.zeros_like(p_exp)
mask.scatter_(1, ind, p_exp[ind] >= self.alpha * p_exp[ind[-1]])
#scores
scores = torch.where(mask.bool(), torch.log(p_exp / (p_ama + 1e-8)),
torch.tensor(-float('inf')))
return scores
#alpha = 0.5
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
class Block(nn.Module):
def __init__(self,
dim,
dim_out,
groups=8):
super().__init__()
self.proj = nn.Conv3d(dim, dim_out, (1, 3, 3), padding=(0, 1, 1))
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self,
x,
scale_shift=None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + 1
return self.act(x)
class ResnetBlock(nn.Module):
def __init__(self,
dim,
dim_out,
*,
time_emb_dim=None,
groups=8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups=groups)
self.block2 = Block(dim_out, dim_out, groups=groups)
self.res_conv = nn.Conv3d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self,
x,
time_emb=None):
scale_shift = None
if exists(self.mlp):
assert exists(time_emb), 'time_emb must be passed in'
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1 1')
scale_shift = time_emb.chunk(2, dim=1)
h = self.block1(x, scale_shift=scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location=torch.device('cpu'))
CHANNELS_TO_MODE = {
1 : 'L',
3 : 'RGB',
4 : 'RGBA'
}
def seek_all_images(img, channels = 3):
assert channels in CHANNELS_TO_MODE, f'channels {channels} invalid'
mode = CHANNELS_TO_MODE[channels]
i = 0
while True:
try:
img.seek(i)
yield img.convert(mode)
except EOFError:
break
i += 1
#tensor of shape (channels, frames, height, width) -> GIF
def video_tensor_to_gift(tensor, path, duration=120, loop=0, optimize=True):
images = map(T.ToPilImage(), tensor.unbind(dim=1))
first_img, *rest_imgs = images
first_img.save(path,
save_all=True,
appeqnd_images=rest_imgs,
duration=duration,
loop=loop,
optimize=optimize
)
return images
#gif -> (channels, frame, height, width) tensor
def gif_to_tensor(path,
channels=3,
transform=T.ToTensor()):
img = Image.open(path)
tensors = tuple(map(transform, seek_all_images(img, chanels=channels)))
return torch.stack(tensors, dim=1)
def identity(t, *args, **kwargs):
return t
def normalize_img(t):
return t * 2 - 1
def unnormalize_img(t):
return (t + 1) * 0.5
def cast_num_frames(t, *, frames):
f = t.shape[1]
if f == frames:
return t
if f > frames:
return t[:, :frames]
return F.pad(t, (0, 0, 0, 0, 0, frames - f))
def max_neg_values(tensor):
return -torch.info(tensor.dtype).max
def l2norm(t, groups=1):
t = rearrange(t, '... (g d) -> ... g d', g=groups)
t = F.normalize(t, p=2, dim=-1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim=-1, value=0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value=value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device)* -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def upsample(dim):
return nn.ConvTranspose3d(dim, dim, (1, 4, 4), (1, 2, 2), (0, 1, 1))
def downsample(dim):
return nn.Conv3d(dim, dim, (1, 4, 4), (1, 2, 2), (0, 1, 1))
class LayerNorm(nn.Module):
def __init__(self,
dim,
eps=1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
var = torch.vart(x, dim=1, unbiased=False, keepdim=True)
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) / (var + self.eps).sqrt() * self.gamma
class PreNorm(nn.Module):
def __init__(self,
dim,
fn):
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
def cosine_beta_schedule(timesteps, s=0.008):
steps = timesteps + 1
x = torch.linspace(0,
timesteps,
steps,
dtype=torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.9999)
class Normalize(nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.dim = dim
def forward(self, x):
return torch.nn.functional.normalize(x, dim=self.dim, p=2)
class LearnableLogitScaling(nn.Module):
def __init__(
self,
logit_scale_init: float = 1 / 0.07,
learnable: bool = True,
max_logit_scale: float = 100,
) -> None:
super().__init__()
self.max_logit_scale = max_logit_scale
self.logit_scale_init = logit_scale_init
self.learnable = learnable
log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init)
if learnable:
self.log_logit_scale = nn.Parameter(log_logit_scale)
else:
self.register_bufffer("log_logit_scale", log_logit_scale)
def forward(self, x):
return torch.clip(self.logit_scale.exp(),
max=self.max_logit_scale) * x
def extra_repr(self):
st = f"logit_scale_init={self.logit_scale_init}, learnable={self.learnable}," \
f"max_logit_scale={self.max_logit_scale}"
return st
class EinOpsRearrange(nn.Module):
def __init__(self, rearrange_expr: str,
**kwargs) -> None:
super().__init__()
self.rearrange_expr = rearrange_expr
self.kwargs = kwargs
def forward(self, x):
assert isinstance(x, torch.Tensor)
return einops.rearrange(x, self.rearrange_expr, **self.kwargs)
def cast_if_src_dtype(
tensor: torch.Tensor,
src_dtype: torch.dtype,
tgt_dtype: torch.dtype
):
updated = False
if tensor.dtype == src_dtype:
tensor = tensor.to(dtype=tgt_dtype)
updated = True
return tensor, updated
class SelectElements(nn.Module):
def __init__(self,
index) -> None:
super().__init__()
self.index = index
def forward(self, x):
assert x.ndim >= 3
return x[:, self.index, ...]
class SelectEOSAndProject(nn.Module):
def __init__(self, proj: nn.Module) -> None:
super().__init__()
self.proj = proj
def forward(self, x, seq_len):
assert x.ndim == 3
x = x[torch.arange(x.shape[0]), seq_len]
x = self.proj(x)
return x
##################
def get_sinusoid_encoding_table(n_position, d_hid):
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) #dim 21
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def interpolate_pos_encoding_2d(target_spatial_size, pos_embed):
N = pos_embed.shape[1]
if N == target_spatial_size:
return pos_embed
dim = pos_embed.shape[-1]
pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
0, 3, 1, 2
),
scale_factor = math.sqrt(target_spatial_size / N),
mode="bicubic",
)
if updated:
pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16)
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
#############
import torch.nn as nn
from zeta.nn.attention.multihead_attention import MultiheadAttention
from zeta.nn.embeddings.multiway_network import MultiwayNetwork
def init_bert_params(module):
def normal_(data):
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
if isinstance(module.q_proj, MultiwayNetwork):
normal_(module.q_proj.A.weight.data)
normal_(module.q_proj.B.weight.data)
normal_(module.k_proj.A.weight.data)
normal_(module.k_proj.B.weight.data)
normal_(module.v_proj.A.weight.data)
normal_(module.v_proj.B.weight.data)
else:
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
######
def pad_to_multiple(
tensor,
multiple,
dim=-1,
value=0
):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return False, tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return True, F.pad(tensor, (*pad_offset, 0, remainder), value=value)
def look_around(
x,
backward=1,
forward=0,
pad_value=-1,
dim=2
):
t = x.shape[1]
dims = (len(x.shape) - dim) * (0, 0)
padded_x = F.pad(
x,
(*dims, backward, forward),
value=pad_value
)
tensors = [padded_x[:, ind:(ind + t), ...] for ind in range(forward + backward + 1)]
return torch.cat(tensors, dim=dim)
####
def is_power_of_two(n):
return math.log2(n).is_integer()
def all_unique(arr):
return len(set(arr)) == len(arr)
def apply_fns(fns, tensors):
return [fn(tensors) for fn, tensor in zip(fns, tensors)]
def cast_tuple(t, length=1):
return t if isinstance(t, tuple) else((t,) * length)
|
zeta-main
|
zeta/utils/main.py
|
from typing import Callable, Optional, Tuple, List
from beartype import beartype
from einops import Rearrange, Reduce
from torch import nn
from zeta.nn.architecture.transformer import FeedForward
from zeta.nn.attention.attend import Attend
from zeta.nn.modules.mbconv import MBConv, Residual
from zeta.utils.main import default, exists
from zeta.utils.tensor_helpers import LayerNorm
class MaxVit(nn.Module):
def __init__(
self,
*,
num_classes,
dim,
depth,
dim_head: int = 32,
dim_conv_stem = None,
window_size: int = 7,
mbconv_expansion_rate: int = 4,
mbconv_shrinkage_rate = 0.25,
dropout = 0.01,
channels = 3
):
super().__init__()
assert isinstance(depth, tuple), "depth needs to be tuple of integers indicating number of transformer blocks at that stage"
#conv stem
dim_conv_stem = default(dim_conv_stem, dim)
self.conv_stem = nn.Sequential(
nn.Conv2d(
channels,
dim_conv_stem,
3,
stride=2,
padding=1,
),
nn.Conv2d(
dim_conv_stem,
dim_conv_stem,
3,
padding=1
)
)
#vars
num_stages = len(depth)
dims = tuple(
map(
lambda i: (2 ** i) * dim, range(num_stages)
)
)
dims = (dim_conv_stem, *dims)
dim_pairs = tuple(zip(dims[:-1], dims[1:]))
self.layers = nn.ModuleList([])
#shorthand for window size for efficient block - griid like attention
w = window_size
#iterate through stages
cond_hidden_dims = []
for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(
dim_pairs, depth
)):
for stage_ind in range(layer_depth):
is_first = stage_ind == 0
stage_dim_in = layer_dim_in if is_first else layer_dim
cond_hidden_dims.append(stage_dim_in)
block = nn.Sequential(
MBConv(
stage_dim_in,
layer_dim,
downsample=is_first,
expansion_rate=mbconv_expansion_rate,
shrinkage_rate=mbconv_shrinkage_rate
),
Rearrange('b d (x w1) (y w2) -> b x y w1 w2 d', w1=w, w2=w),
Residual(
Attend(
dim=layer_dim,
dim_head=dim_head,
dropout=dropout
)
),
Residual(
FeedForward(
dim=layer_dim,
dropout=dropout
)
),
Rearrange('b x y w1 w2 d -> b d (w1 x) (w2 y)')
)
self.layers.append(block)
embed_dim = dims[-1]
self.embed_dim = dims[-1]
self.mlp_head = nn.Sequential(
Reduce('b d h w -> b d', 'mean'),
LayerNorm,
nn.Linear(embed_dim, num_classes)
)
@beartype
def forward(
self,
x,
texts: Optional[List[str]] = None,
cond_fns: Optional[Tuple[Callable, ...]] = None,
cond_drop_prob = 0.,
return_embeddings=False
):
x = self.conv_stem(x)
if not exists(cond_fns):
cond_fns = (None,) * len(self.layers)
for stage, cond_fn in zip(self.layers, cond_fns):
if exists(cond_fn):
x = cond_fn(x)
x = stage(x)
if return_embeddings:
return x
return self.mlp_head(x)
|
zeta-main
|
zeta/models/max_vit.py
|
#gene splice
|
zeta-main
|
zeta/models/GeneSplice.py
|
#modularize the decoder to accept any attemtion, dilated or multihead
import torch
from torch.nn import Module
import bitsandbytes
from zeta import DecoderConfig, Decoder
from zeta.utils.embedding import PositionalEmbedding
from transformers import AutoTokenizer
class LongNetTokenizer:
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
eos_token="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
def tokenize_texts(self, texts):
return self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
class LongNet(Module):
def __init__(self):
super().__init__()
self.embed = bitsandbytes.nn.modules.Embedding(
320002,
2048,
padding_idx=1
)
self.embed_positions = PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
decoder_dilation_rate=4,
decoder_segment_size=2,
vocab_size=64007,
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
def forward(self, text_tokens, **kwargs):
model_input = self.decoder.forward_embedding(text_tokens)[0]
return self.decoder(model_input, passed_x=model_input)[0]
|
zeta-main
|
zeta/models/LongNet.py
|
#the best llm ever made
from torch.nn import Module
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
from zeta.nn.architecture.transformer import (
Decoder,
Transformer,
)
class Andromeda(Module):
"""
Andromeda is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
"""
def __init__(self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True, ):
"""
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
- embedding_provider: Embedding provider module
"""
super().__init__()
try:
self.Andromeda = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
attn_kv_heads=attn_kv_heads,
qk_norm=qk_norm,
attn_qk_norm=attn_qk_norm,
attn_qk_norm_dim_scale=attn_qk_norm_dim_scale
)
)
self.decoder = AutoregressiveWrapper(self.Andromeda)
except Exception as e:
print("Failed to initialize Andromeda: ", e)
raise
def forward(self, text_tokens, **kwargs):
"""
Forward pass through the model. It expects the input text_tokens.
Args:
- text_tokens: Input tokens
- kwargs: Other arguments
Returns:
- output from the decoder
"""
try:
model_input = self.decoder.forward(text_tokens)[0]
return self.decoder(model_input, padded_x=model_input[0])
except Exception as e:
print("Failed in forward method: ", e)
raise
|
zeta-main
|
zeta/models/andromeda.py
|
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
from zeta.nn.architecture.encoder import Encoder
from zeta.utils.embedding import (
PositionalEmbedding,
TextEmbedding,
VisionEmbedding,
)
from zeta.utils.module.multiway_network import MutliwayEmbedding
class BEiT3(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.args = args
assert args.multiway
assert args.vocab_size > 0
assert not args.share_encoder_input_output_embed
self.text_embed = TextEmbedding(args.vocab_size, args.encoder_embed_dim)
self.vision_embed = VisionEmbedding(
args.img_size,
args.patch_size,
args.in_chans,
args.encoder_embed_dim,
contain_mask_token=True,
prepend_cls_token=True,
)
# being consistent with Fairseq, which starts from 2 for position embedding
embed_positions = MutliwayEmbedding(
modules=[
PositionalEmbedding(self.vision_embed.num_position_embeddings() + 2, args.encoder_embed_dim),
PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim),
],
dim=1,
)
self.encoder = Encoder(
args,
embed_tokens=None,
embed_positions=embed_positions,
output_projection=None,
is_encoder_decoder=False,
)
def forward(
self,
textual_tokens=None,
visual_tokens=None,
text_padding_position=None,
attn_mask=None,
vision_masked_position=None,
incremental_state=None,
positions=None,
):
assert textual_tokens is not None or visual_tokens is not None
if textual_tokens is None:
x = self.vision_embed(visual_tokens, vision_masked_position)
encoder_padding_mask = None
multiway_split_position = -1
elif visual_tokens is None:
x = self.text_embed(textual_tokens)
encoder_padding_mask = text_padding_position
multiway_split_position = 0
else:
x1 = self.vision_embed(visual_tokens, vision_masked_position)
multiway_split_position = x1.size(1)
x2 = self.text_embed(textual_tokens)
x = torch.cat([x1, x2], dim=1)
if text_padding_position is not None:
encoder_padding_mask = torch.cat(
[
torch.zeros(x1.shape[:-1]).to(x1.device).bool(),
text_padding_position,
],
dim=1,
)
else:
encoder_padding_mask = None
encoder_out = self.encoder(
src_tokens=None,
encoder_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
token_embeddings=x,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state,
positions=positions,
)
encoder_out["multiway_split_position"] = multiway_split_position
return encoder_out
|
zeta-main
|
zeta/models/BEiT3.py
|
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
from zeta.models.gpt4 import GPT4, GPT4MultiModal
from zeta.models.andromeda import Andromeda
from zeta.models.palme import PalmE
from zeta.models.base import BaseModel
|
zeta-main
|
zeta/models/__init__.py
|
zeta-main
|
zeta/models/Magneto.py
|
|
import torch
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
from zeta.nn.architecture.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
class PalmE(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(PalmE, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
self.decoder = AutoregressiveWrapper(self.decoder)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
|
zeta-main
|
zeta/models/palme.py
|
from zeta.nn.architecture.transformer import Transformer, Decoder
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
class LLama2:
def __init__(
self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
rotary_xpos=True,
attn_flash=True,
):
super().__init__()
self.llama2 = Transformer(
num_tokens=50000,
max_seq_len=4096,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
attn_flash=attn_flash,
rotary_xpos=rotary_xpos
)
)
self.decoder = AutoregressiveWrapper(self.decoder)
def forward(self, text):
model_input = self.decoder.forward(text)[0]
return self.decoder(model_input, padded_x=model_input[0])
|
zeta-main
|
zeta/models/llama.py
|
import torch
from einops import rearrange
from torch import nn
from zeta.nn.architecture.transformer import Encoder
def exists(val):
return val is not None
def divisible_by(num, den):
return (num % den) == 0
class ViT(nn.Module):
def __init__(self,
*,
image_size,
patch_size,
attn_layers,
channels=3,
num_classes=None,
post_emb_norm=False,
emb_dropout=0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), "Attention layers must be an encoder find the encoder"
assert divisible_by(image_size, patch_size), "image dimenions must be divisible by the patch size"
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings=False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> (h w) (p1 p2 c)', p1=p, p2=p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm9x
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim=-2)
return self.mlp_head
|
zeta-main
|
zeta/models/vit.py
|
from abc import ABC, abstractmethod
class BaseModel(ABC):
def __init__(self, *args, **kwargs):
pass
def forward(self):
pass
|
zeta-main
|
zeta/models/base.py
|
import torch
from torch import nn
from zeta.nn.architecture.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
class GPT4(nn.Module):
"""
GPT4 is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
- embedding_provider: Embedding provider module
"""
def __init__(self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
attn_one_kv_head=True, # multiquery attention
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
):
super().__init__()
try:
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
attn_one_kv_head=attn_one_kv_head,
qk_norm=qk_norm,
attn_qk_norm=attn_qk_norm,
attn_qk_norm_dim_scale=attn_qk_norm_dim_scale
)
)
self.decoder = AutoregressiveWrapper(self.decoder)
except Exception as e:
print("Failed to initialize Andromeda: ", e)
raise
def forward(self, text_tokens, **kwargs):
try:
model_input = self.decoder.forward(text_tokens)[0]
return self.decoder(model_input, padded_x=model_input[0])
except Exception as e:
print("Failed in forward method: ", e)
raise
class GPT4MultiModal(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(GPT4MultiModal, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
|
zeta-main
|
zeta/models/gpt4.py
|
import torch
from zeta import DecoderConfig, Decoder
from zeta.utils.embedding import PositionalEmbedding
from transformers import CLIPProcessor, CLIPModel, AutoTokenizer
from flamingo_pytorch import PerceiverResampler
from torch.nn import Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=[""],
eos_token ="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image tokens to text as "<s>  text </s>"
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
multiway=True,
max_rel_pos=2048,
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
def forward(self, text_tokens, images, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:2], images, model_input[:, 2:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0]
|
zeta-main
|
zeta/models/kosmos.py
|
# implementation of a general rest algorithm to plug in and play with any model on zeta
# need a maintainer for this directory.
|
zeta-main
|
zeta/rl/rest.py
|
zeta-main
|
zeta/rl/__init__.py
|
|
import copy
from pathlib import Path
import torch
import torch.nn.functional as F
from beartype import beatype
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
def log(t, eps=1e-10):
return torch.log(t.clamp(min=eps))
def exists(val):
return val is not None
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def masked_mean(seq, mask=None, dim=1, keepdim=False):
if not exists(mask):
return seq.mean(dim=dim)
if seq.ndim == 3:
mask = rearrange(mask, 'b n -> b n 1')
masked_seq = seq.masked_fill(~mask, 0.)
numer = masked_seq.sum(dim=dim, keepdim=keepdim)
denom = mask.sum(dim=dim, keepdim=keepdim)
masked_mean = numer / denom.clamp(min=1e-3)
masked_mean = masked_mean.masked_fill(denom == 0, 0.)
return masked_mean
@beatype
class RewardModel(nn.Module):
def __init__(
self,
model,
dropout = 0.1,
num_binned_output = 0.,
use_lora=True,
lora_r=8,
reward_lora_scope="reward",
):
super().__init__()
self.model = copy.deepcopy(model)
self.model.set_dropout(dropout)
self.reward_lora_scope = reward_lora_scope if use_lora else None
if exists(self.reward_lora_scope):
self.model.add_finetune_params(reward_lora_scope, lora_r=lora_r)
dim = model.dim
self.binned_output = num_binned_output > 1
self.prompt_embed = nn.Parameter(torch.zeros(1, 1, dim))
self.response_embed = nn.Parameter(torch.zeros(1, 1, dim))
if self.binned_output:
self.to_pred = nn.Linear(dim, num_binned_output)
else:
self.to_pred = nn.Sequential(
nn.Linear(dim, 1, bias=False),
Rearrange('... 1 -> ...')
)
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(path))
def finetune_parameters(self):
return [
*self.to_pred.parameters(),
*(self.model.finetune_parameters(self.reward_lora_scope) \
if exists(self.reward_lora_scope) else self.model.parameters())
]
def forward(
self,
x,
mask=None,
prompt_mask=None,
prompt_lengths=None,
labels=None,
sample=None,
sample_temperature=1.,
disable_lora=False
):
assert not(exists(prompt_mask) and exists(prompt_lengths))
if exists(prompt_lengths):
batch, seq_len = x.shape
arange = torch.arange(seq_len, device=x.device)
prompt_mask = repeat(arange, 'n -> b n', b=batch) < rearrange(prompt_lengths, 'b -> b 1')
#model need to know what is prompt and what is response
extra_embed=None
if exists(prompt_mask):
extra_embed = torch.where(
rearrange(prompt_mask, 'b n -> b n 1'),
self.prompt_embed,
self.response_embed
)
embeds = self.model(
x,
extra_embed=extra_embed,
return_only_embedding=True,
disable_lora=disable_lora,
finetune_scope=self.reward_lora_scope
)
pooled = masked_mean(embeds, mask, dim=1)
pred = self.to_pred(pooled)
if sample and self.binned_output:
assert not exists(labels)
pred = gumbel_sample(pred, temperature = sample_temperature, dim=-1)
if not exists(labels):
return pred
if not self.binned_output:
return F.mse_loss(pred, labels)
if not self.binned_output:
return F.mse_loss(pred, labels)
return F.cross_entropy(pred, labels)
|
zeta-main
|
zeta/rl/reward_model.py
|
import torch
from zeta.nn.attention.cross_attention import CrossAttend
from zeta.nn.architecture.transformer import Encoder
encoder = Encoder(dim=512, depth=6)
model = CrossAttend(dim=512, depth=6)
nodes = torch.randn(1, 1, 512)
node_mask = torch.ones(1, 1).bool()
neighbors = torch.randn(1, 5, 512)
neighbor_mask = torch.ones(1, 5).bool()
encoded_neighbors = encoder(neighbors, mask=neighbor_mask)
model(nodes, context=encoded_neighbors, mask=node_mask, context_mask=neighbor_mask)
|
zeta-main
|
playground/cross_attend.py
|
import torch
from zeta import FlashAttention
q = torch.randn(2, 4, 6, 8)
k = torch.randn(2, 4, 10, 8)
v = torch.randn(2, 4, 10, 8)
attention = FlashAttention(causal=False, dropout=0.1, flash=False)
output = attention(q, k, v)
print(output.shape)
|
zeta-main
|
playground/flash_attention.py
|
import torch
from zeta.models import GPT4MultiModal
image = torch.randint(1, 3, 256, 256)
text = torch.randint(0, 20000, (1, 1024))
model = GPT4MultiModal()
output = model(text, image)
|
zeta-main
|
playground/models/gpt4_multimodal.py
|
import torch
from zeta.models.gpt4 import GPT4
x = torch.randint(0, 256, (1, 1024)).cuda()
gpt4 = GPT4()
gpt4(x)
|
zeta-main
|
playground/models/gpt4.py
|
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
|
zeta-main
|
tests/__init__.py
|
from zeta import MultiheadAttention
import time
import unittest
import torch
from zeta import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_output_shape(self):
# Setup
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
# Action
output = dilated_attention(input_tensor)
# Assert
self.assertEqual(output.shape, (2, 128, 512))
def test_xpos(self):
# Setup
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64, use_xpos=True)
# Action
output = dilated_attention(input_tensor)
# Assert
self.assertEqual(output.shape, (2, 128, 512))
def test_relative_position_bias(self):
# Setup
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64, use_rel_pos_bias=True)
# Action
output = dilated_attention(input_tensor)
# Assert
self.assertEqual(output.shape, (2, 128, 512))
def test_attention_consistency(self):
# Setup
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
# Action
output = dilated_attention(input_tensor)
# Assert
self.assertTrue((output.std(dim=-1) > 0).all())
def test_speed(self):
# Setup
input_tensor = torch.randn(2, 1024, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
# Action
start_time = time.time()
dilated_attention(input_tensor)
end_time = time.time()
# Assert
self.assertLess(end_time - start_time, 1)
def test_gradient_flow(self):
# Setup
input_tensor = torch.randn(2, 128, 512, requires_grad=True)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
# Action
output = dilated_attention(input_tensor)
output.sum().backward()
grad_norm = input_tensor.grad.norm().item()
# Assert
self.assertLess(grad_norm, 1e6)
self.assertGreater(grad_norm, 1e-6)
def test_scaling(self):
input_tensor = torch.randn(2, 1024, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
start_time = time.time()
_ = dilated_attention(input_tensor)
time_for_1024 = time.time() - start_time
input_tensor = torch.randn(2, 2048, 512)
start_time = time.time()
_ = dilated_attention(input_tensor)
time_for_2048 = time.time() - start_time
self.assertLessEqual(time_for_2048/time_for_1024, 2)
def test_reproducibility(self):
torch.manual_seed(0)
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
output1 = dilated_attention(input_tensor)
torch.manual_seed(0)
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
output2 = dilated_attention(input_tensor)
self.assertTrue(torch.allclose(output1, output2))
def test_attention_distribution(self):
input_tensor = torch.randn(2, 128, 512)
dilated_attention = MultiheadAttention(512, 8, 2, 64)
_, attn_weights = dilated_attention(input_tensor)
self.assertTrue(torch.allclose(attn_weights.sum(dim=-1), torch.tensor(1.)))
def setUp(self):
self.d_model = 128
self.num_heads = 4
self.dilation_rate = 2
self.segment_size = 32
self.dropout = 0.1
self.casual = False
self.use_xpos = False
self.use_rel_pos_bias = False
self.batch_size = 10
self.seq_len = 100
self.x = torch.rand(self.batch_size, self.seq_len, self.d_model)
self.sparse_dilated_attention = MultiheadAttention(self.d_model, self.num_heads, self.dilation_rate, self.segment_size, self.dropout, self.casual, self.use_xpos, self.use_rel_pos_bias)
def test_forward_pass(self):
output = self.sparse_dilated_attention(self.x)
self.assertEqual(output.size(), (self.batch_size, self.seq_len, self.d_model))
def test_attention_outputs(self):
output = self.sparse_dilated_attention(self.x)
self.assertTrue(torch.all(output >= 0))
self.assertTrue(torch.all(output <= 1))
def test_dropout(self):
self.sparse_dilated_attention.dropout.p = 1.0
output = self.sparse_dilated_attention(self.x)
self.assertTrue(torch.all(output == 0))
|
zeta-main
|
tests/example.py
|
from zeta.utils.attention.multihead_attention import MultiheadAttention
import torch
import unittest
from zeta import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def setUp(self):
self.args = {
'xpos_rel_pos': True,
'xpos_scale_base': 2,
'layernorm_eps': 1e-5
}
self.embed_dim = 64
self.num_heads = 4
self.multihead_attn = MultiheadAttention(self.args, self.embed_dim, self.num_heads)
def test_forward_shape(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
attn, attn_weights = self.multihead_attn(query, key, value)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 20))
def test_forward_incremental_state(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
incremental_state = {
'prev_key': torch.rand(16, self.num_heads, 10, self.embed_dim // self.num_heads),
'prev_value': torch.rand(16, self.num_heads, 10, self.embed_dim // self.num_heads)
}
attn, attn_weights = self.multihead_attn(query, key, value, incremental_state=incremental_state)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 30))
def test_forward_attn_mask(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
attn_mask = torch.ones(20, 20)
attn, attn_weights = self.multihead_attn(query, key, value, attn_mask=attn_mask)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 20))
def test_forward_key_padding_mask(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
key_padding_mask = torch.ones(16, 20)
attn, attn_weights = self.multihead_attn(query, key, value, key_padding_mask=key_padding_mask)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 20))
def test_forward_rel_pos(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
rel_pos = torch.rand(16, self.num_heads, 20, 20)
attn, attn_weights = self.multihead_attn(query, key, value, rel_pos=rel_pos)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 20))
def test_forward_is_first_step(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
attn, attn_weights = self.multihead_attn(query, key, value, is_first_step=True)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 20))
def test_forward_is_not_first_step(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
attn, attn_weights = self.multihead_attn(query, key, value, is_first_step=False)
self.assertEqual(attn.shape, (16, 20, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 20, 20))
def test_forward_different_query_key_value_size(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 30, self.embed_dim)
value = torch.rand(16, 30, self.embed_dim)
with self.assertRaises(AssertionError):
self.multihead_attn(query, key, value)
def test_forward_different_batch_size(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(32, 20, self.embed_dim)
value = torch.rand(32, 20, self.embed_dim)
with self.assertRaises(AssertionError):
self.multihead_attn(query, key, value)
def test_forward_different_embed_dim(self):
query = torch.rand(16, 20, 128)
key = torch.rand(16, 20, 128)
value = torch.rand(16, 20, 128)
with self.assertRaises(AssertionError):
self.multihead_attn(query, key, value)
def test_forward_no_value(self):
query = torch.rand(16, 20, self.embed_dim)
key = torch.rand(16, 20, self.embed_dim)
with self.assertRaises(AssertionError):
self.multihead_attn(query, key, None)
def test_forward_no_key(self):
query = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
with self.assertRaises(AssertionError):
self.multihead_attn(query, None, value)
def test_forward_no_query(self):
key = torch.rand(16, 20, self.embed_dim)
value = torch.rand(16, 20, self.embed_dim)
with self.assertRaises(AssertionError):
self.multihead_attn(None, key, value)
def test_forward_no_input(self):
with self.assertRaises(AssertionError):
self.multihead_attn(None, None, None)
def test_forward_zero_length_input(self):
query = torch.rand(16, 0, self.embed_dim)
key = torch.rand(16, 0, self.embed_dim)
value = torch.rand(16, 0, self.embed_dim)
attn, attn_weights = self.multihead_attn(query, key, value)
self.assertEqual(attn.shape, (16, 0, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 0, 0))
def test_forward_one_length_input(self):
query = torch.rand(16, 1, self.embed_dim)
key = torch.rand(16, 1, self.embed_dim)
value = torch.rand(16, 1, self.embed_dim)
attn, attn_weights = self.multihead_attn(query, key, value)
self.assertEqual(attn.shape, (16, 1, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 1, 1))
def test_forward_large_input(self):
query = torch.rand(16, 1000, self.embed_dim)
key = torch.rand(16, 1000, self.embed_dim)
value = torch.rand(16, 1000, self.embed_dim)
attn, attn_weights = self.multihead_attn(query, key, value)
self.assertEqual(attn.shape, (16, 1000, self.embed_dim))
self.assertEqual(attn_weights.shape, (self.num_heads, 16, 1000, 1000))
if __name__ == '__main__':
unittest.main()
|
zeta-main
|
tests/test_mha.py
|
import logging
import os
from functools import partial
from math import ceil
from timeit import Timer
from typing import Callable, List, NamedTuple
import plotly.graph_objects as go
import torch
from zeta.nn.attention.dilated_attention import DilatedAttention
# Generic benchmarking parameters
BATCH_SIZE = 1
TOTAL_TOKENS = 2**25 # 32M
NUM_HEADS = 4
EMBED_DIM = 8
# Vanilla attention only
VANILLA_SEQ_LENGTHS = [2**i for i in range(13, 18)] # 8k - 128k
# Dilated attention only
SEGMENT_LENGTHS = [8192, 16384, 32768] # 8k - 64k
DILATED_SEQ_LENGTHS = [2**i for i in range(13, 26)] # 8k - 32M
class BenchmarkResult(NamedTuple):
mean: float
std: float
def __repr__(self):
return f"BenchmarkResult(mean: {self.mean:.3e}, std: {self.std:.3e})"
def __str__(self):
return f"({self.mean:.3e} \u00B1 {self.std:.3e}) s"
def benchmark(
fn: Callable,
*args,
min_total_seconds: float = 1.0,
min_iterations: int = 2,
**kwargs,
) -> BenchmarkResult:
if min_iterations < 2:
raise ValueError("min_iterations must be >= 2")
timer = Timer(
"fn(*args, **kwargs)",
globals={"fn": fn, "args": args, "kwargs": kwargs},
)
# Run the function once to warm up
_ = timer.repeat(number=1, repeat=1)
times: List[float] = []
total_time = 0.0
num_iterations = min_iterations or 1
while total_time < min_total_seconds:
_times = timer.repeat(number=1, repeat=num_iterations)
times.extend(_times)
_total_time = sum(_times)
total_time += _total_time
# Estimate how many more iterations we need to run to get to 1 second
avg_time = _total_time / num_iterations
num_iterations = ceil((min_total_seconds - total_time) / avg_time)
times_tensor = torch.as_tensor(times)
return BenchmarkResult(
mean=times_tensor.mean().item(),
std=times_tensor.std().item(),
)
def get_dilated_attention_for_seq_length(seq_length: int) -> DilatedAttention:
segment_lengths: List[int] = []
dilation_rates: List[int] = []
for segment_length in SEGMENT_LENGTHS:
# We can't use segment lengths larger than the sequence length.
segment_length = min(segment_length, seq_length)
exponent = segment_length // SEGMENT_LENGTHS[0] - 1
dilation_rate = 2**exponent
segment_lengths.append(segment_length)
dilation_rates.append(dilation_rate)
return DilatedAttention(
segment_lengths=segment_lengths,
dilation_rates=dilation_rates,
)
def attention_forward(x: torch.Tensor, attn: Callable):
with torch.no_grad():
_ = attn(x, x, x)
torch.cuda.synchronize()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.info("Benchmark vanilla attention...")
vanilla_results: List[BenchmarkResult] = []
for seq_length in VANILLA_SEQ_LENGTHS:
torch.cuda.empty_cache()
batch_size = TOTAL_TOKENS // seq_length
x = torch.randn(
(batch_size, seq_length, NUM_HEADS, EMBED_DIM),
dtype=torch.float16,
device="cuda",
)
fn = partial(attention_forward)
result = benchmark(fn, x)
vanilla_results.append(result)
logging.info(f"Sequence length {seq_length}: {result}")
logging.info("Benchmark dilated attention...")
dilated_results: List[BenchmarkResult] = []
for seq_length in DILATED_SEQ_LENGTHS:
torch.cuda.empty_cache()
batch_size = TOTAL_TOKENS // seq_length
x = torch.randn(
(batch_size, seq_length, NUM_HEADS, EMBED_DIM),
dtype=torch.float16,
device="cuda",
)
attn = get_dilated_attention_for_seq_length(seq_length)
fn = partial(attention_forward, attn=attn)
result = benchmark(fn, x)
dilated_results.append(result)
logging.info(f"Sequence length {seq_length}: {result}")
logging.info("Plotting results...")
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=VANILLA_SEQ_LENGTHS,
y=[r.mean for r in vanilla_results],
error_y=dict(
type="data",
array=[r.std for r in vanilla_results],
visible=True,
),
name="Vanilla Attention",
),
)
fig.add_trace(
go.Scatter(
x=DILATED_SEQ_LENGTHS,
y=[r.mean for r in dilated_results],
error_y=dict(
type="data",
array=[r.std for r in dilated_results],
visible=True,
),
name="Dilated Attention",
),
)
fig.update_layout(
title="Attention Benchmark (Total Tokens = 32M)",
xaxis_title="Sequence Length",
yaxis_title="Runtime (s)",
xaxis_type="log",
yaxis_type="log",
)
fig.write_image(os.path.join("doc", "benchmark.png"))
|
zeta-main
|
benchmarks/dilated_bench.py
|
import torch
from gpt3.gpt3 import GPT3
x = torch.randint(0, 256, (1, 1024)).cuda()
model = GPT3()
model(x)
|
GPT3-main
|
example.py
|
from torch import nn
# from gpt3.model import Transformer, Decoder, AutoregressiveWrapper
from zeta import Transformer, Decoder, AutoregressiveWrapper
class GPT3(nn.Module):
def __init__(
self,
num_tokens=50477,
max_seq_len=4096,
dim=12288,
depth=96,
heads=96,
attn_dim_head=128
):
super().__init__()
self.num_tokens = num_tokens
self.max_seq_len = 4096
self.dim = dim
self.depth = depth
self.heads = heads
self.attn_dim_head = attn_dim_head
self.model = Transformer(
num_tokens=self.num_tokens,
max_seq_len=self.max_seq_len,
attn_layers = Decoder(
dim=self.dim,
depth=self.depth,
heads=self.heads,
)
).cuda()
self.model = AutoregressiveWrapper(self.model)
def forward(self, text, **kwargs):
return self.model(text)
|
GPT3-main
|
gpt3/gpt3.py
|
from gpt3.gpt3 import GPT3
from gpt3.train import train
|
GPT3-main
|
gpt3/__init__.py
|
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
########### SETUP CONFIG
import torch.distributed as dist
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.state import AcceleratorState
from accelerate.utils import DummyOptim, InitProcessGroupKwargs
from datasets import load_dataset
from lion_pytorch import Lion
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
from torch.distributed.fsdp import (
BackwardPrefetch,
FullyShardedDataParallel,
MixedPrecision,
ShardingStrategy,
)
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torch.nn import LayerNorm
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoTokenizer,
default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
set_seed,
)
from gpt3.gpt3 import GPT3, Transformer
# state = AcceleratorState()
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1
logger = get_logger(__name__, log_level="INFO")
class CFG:
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 1e-4 #3e-4 # 1e-4 for lion
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "Andromeda"
LOGGING_STEPS: int = 100
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, Transformer)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
Andromeda_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Transformer,
},
)
else:
Andromeda_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=Andromeda_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
try:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
except KeyError:
# print(f"Parameter {param_name} does not exist in the model")
pass
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
state = AcceleratorState()
state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = CFG.BATCH_SIZE #??????
accelerator.init_trackers(
project_name="Andromeda",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
# init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
# model = Andromeda(
# num_tokens=50432,
# max_seq_len=8192,
# dim=3072,
# depth=24,
# dim_head=128,
# heads=12,
# use_abs_pos_emb=False,
# alibi_pos_bias=True,
# alibi_num_heads=6,
# rotary_xpos=True,
# attn_flash=True,
# shift_tokens=1,
# attn_one_kv_head=True,
# qk_norm=True,
# attn_qk_norm=True,
# attn_qk_norm_dim_scale=True,
# embedding_provider=AndromedaEmbedding()
# )
model = GPT3()
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='lion',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
# if False: # if CFG.USE_DEEPSPEED:
# lr_scheduler = DummyScheduler(
# optim,
# total_num_steps=max_train_steps * accelerator.num_processes,
# warmup_num_steps=NUM_WARMUP_STEPS
# )
# else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
#logging every CFG.LOGGING STEPS
if CFG.LOGGING_STEPS > 0 and step % CFG.LOGGING_STEPS == 0:
logger.info(
f"Step: {completed_steps}/{max_train_steps}, Loss: {loss.item():.5f}"
)
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def train():
os.environ['MASTER_ADDR'] #'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count())
dist.init_process_group(backend='nccl') #init_method="env://")
Train()
if __name__ == '__main__':
train()
|
GPT3-main
|
gpt3/train.py
|
CELESTIAL-1-master
|
cstx/__init__.py
|
|
import copy
from inspect import isfunction
from typing import List
import tqdm
import torch
import torch.nn as nn
import numpy as np
from functools import partial
from contextlib import contextmanager
from research.BindDiffusion.ldm.modules.diffusionmodules.util import noise_like
#HELPERS
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_updates=True):
super().__init__()
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_updates
else torch.tensor(-1,dtype=torch.int))
for name, p in model.named_parameters():
if p.requires_grad:
#remove as '.'-character is not allowed in buffers
s_name = name.replace('.','')
self.m_name2s_name.update({name:s_name})
self.register_buffer(s_name,p.clone().detach().data)
self.collected_params = []
def forward(self, model):
decay = self.decay
if self.num_updates >= 0:
self.num_updates += 1
decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
else:
assert not key in self.m_name2s_name
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert not key in self.m_name2s_name
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
def preprocess_model_args(args):
# If args has layer_units, get the corresponding
# units.
# If args get backbone, get the backbone model.
args = copy.deepcopy(args)
if 'layer_units' in args:
layer_units = [
get_unit()(i) for i in args.layer_units
]
args.layer_units = layer_units
if 'backbone' in args:
args.backbone = get_model()(args.backbone)
return args
@singleton
class get_model(object):
def __init__(self):
self.model = {}
self.version = {}
def register(self, model, name, version='x'):
self.model[name] = model
self.version[name] = version
def __call__(self, cfg, verbose=True):
"""
Construct model based on the config.
"""
t = cfg.type
# the register is in each file
if t.find('audioldm')==0:
pass
elif t.find('autoencoderkl')==0:
pass
elif t.find('optimus')==0:
pass
elif t.find('clip')==0:
pass
elif t.find('clap')==0:
pass
elif t.find('sd')==0:
pass
elif t.find('codi')==0:
pass
elif t.find('openai_unet')==0:
pass
args = preprocess_model_args(cfg.args)
net = self.model[t](**args)
return net
def get_version(self, name):
return self.version[name]
def register(name, version='x'):
def wrapper(class_):
get_model().register(class_, name, version)
return class_
return wrapper
version = '0'
symbol = 'sd'
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif schedule == "sqrt":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def highlight_print(info):
print('')
print(''.join(['#']*(len(info)+4)))
print('# '+info+' #')
print(''.join(['#']*(len(info)+4)))
print('')
class DDPM(nn.Module):
def __init__(self,
unet_config,
timesteps=1000,
use_ema=True,
beta_schedule="linear",
beta_linear_start=1e-4,
beta_linear_end=2e-2,
loss_type="l2",
clip_denoised=True,
cosine_s=8e-3,
given_betas=None,
l_simple_weight=1.,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
parameterization="eps",
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0, ):
super().__init__()
assert parameterization in ["eps", "x0"], \
'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
highlight_print("Running in {} mode".format(self.parameterization))
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.use_positional_encodings = use_positional_encodings
from collections import OrderedDict
self.model = nn.Sequential(OrderedDict([('diffusion_model', get_model()(unet_config))]))
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.v_posterior = v_posterior
self.l_simple_weight = l_simple_weight
self.original_elbo_weight = original_elbo_weight
self.register_schedule(
given_betas=given_betas,
beta_schedule=beta_schedule,
timesteps=timesteps,
linear_start=beta_linear_start,
linear_end=beta_linear_end,
cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(
fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
def register_schedule(self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3):
if given_betas is not None:
betas = given_betas
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
cosine_s=cosine_s)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert alphas_cumprod.shape[0] == self.num_timesteps, \
'alphas have to be defined for each timestep'
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
1. - alphas_cumprod) + self.v_posterior * betas
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
if self.parameterization == "eps":
lvlb_weights = self.betas ** 2 / (
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
elif self.parameterization == "x0":
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.model.parameters())
self.model_ema.copy_to(self.model)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.model.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
value1 = extract_into_tensor(
self.sqrt_recip_alphas_cumprod, t, x_t.shape)
value2 = extract_into_tensor(
self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
return value1*x_t -value2*noise
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised: bool):
model_out = self.model(x, t)
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, return_intermediates=False):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
intermediates = [img]
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
clip_denoised=self.clip_denoised)
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
intermediates.append(img)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, batch_size=16, return_intermediates=False):
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size),
return_intermediates=return_intermediates)
def q_sample(self, x_start, t, noise=None):
noise = torch.randn_like(x_start) if noise is None else noise
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
def get_loss(self, pred, target, mean=True):
if self.loss_type == 'l1':
loss = (target - pred).abs()
if mean:
loss = loss.mean()
elif self.loss_type == 'l2':
if mean:
loss = torch.nn.functional.mse_loss(target, pred)
else:
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
else:
raise NotImplementedError("unknown loss type '{loss_type}'")
return loss
def p_losses(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_out = self.model(x_noisy, t)
loss_dict = {}
if self.parameterization == "eps":
target = noise
elif self.parameterization == "x0":
target = x_start
else:
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
log_prefix = 'train' if self.training else 'val'
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
loss_simple = loss.mean() * self.l_simple_weight
loss_vlb = (self.lvlb_weights[t] * loss).mean()
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
loss = loss_simple + self.original_elbo_weight * loss_vlb
loss_dict.update({f'{log_prefix}/loss': loss})
return loss, loss_dict
def forward(self, x, *args, **kwargs):
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
return self.p_losses(x, t, *args, **kwargs)
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self.model)
version = '0'
symbol = 'sd'
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif schedule == "sqrt":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def highlight_print(info):
print('')
print(''.join(['#']*(len(info)+4)))
print('# '+info+' #')
print(''.join(['#']*(len(info)+4)))
print('')
class DDPM(nn.Module):
def __init__(self,
unet_config,
timesteps=1000,
use_ema=True,
beta_schedule="linear",
beta_linear_start=1e-4,
beta_linear_end=2e-2,
loss_type="l2",
clip_denoised=True,
cosine_s=8e-3,
given_betas=None,
l_simple_weight=1.,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
parameterization="eps",
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0, ):
super().__init__()
assert parameterization in ["eps", "x0"], \
'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
highlight_print("Running in {} mode".format(self.parameterization))
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.use_positional_encodings = use_positional_encodings
from collections import OrderedDict
self.model = nn.Sequential(OrderedDict([('diffusion_model', get_model()(unet_config))]))
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.v_posterior = v_posterior
self.l_simple_weight = l_simple_weight
self.original_elbo_weight = original_elbo_weight
self.register_schedule(
given_betas=given_betas,
beta_schedule=beta_schedule,
timesteps=timesteps,
linear_start=beta_linear_start,
linear_end=beta_linear_end,
cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(
fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
def register_schedule(self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3):
if given_betas is not None:
betas = given_betas
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
cosine_s=cosine_s)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert alphas_cumprod.shape[0] == self.num_timesteps, \
'alphas have to be defined for each timestep'
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
1. - alphas_cumprod) + self.v_posterior * betas
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
if self.parameterization == "eps":
lvlb_weights = self.betas ** 2 / (
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
elif self.parameterization == "x0":
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.model.parameters())
self.model_ema.copy_to(self.model)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.model.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
value1 = extract_into_tensor(
self.sqrt_recip_alphas_cumprod, t, x_t.shape)
value2 = extract_into_tensor(
self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
return value1*x_t -value2*noise
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised: bool):
model_out = self.model(x, t)
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, return_intermediates=False):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
intermediates = [img]
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
clip_denoised=self.clip_denoised)
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
intermediates.append(img)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, batch_size=16, return_intermediates=False):
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size),
return_intermediates=return_intermediates)
def q_sample(self, x_start, t, noise=None):
noise = torch.randn_like(x_start) if noise is None else noise
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
def get_loss(self, pred, target, mean=True):
if self.loss_type == 'l1':
loss = (target - pred).abs()
if mean:
loss = loss.mean()
elif self.loss_type == 'l2':
if mean:
loss = torch.nn.functional.mse_loss(target, pred)
else:
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
else:
raise NotImplementedError("unknown loss type '{loss_type}'")
return loss
def p_losses(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_out = self.model(x_noisy, t)
loss_dict = {}
if self.parameterization == "eps":
target = noise
elif self.parameterization == "x0":
target = x_start
else:
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
log_prefix = 'train' if self.training else 'val'
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
loss_simple = loss.mean() * self.l_simple_weight
loss_vlb = (self.lvlb_weights[t] * loss).mean()
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
loss = loss_simple + self.original_elbo_weight * loss_vlb
loss_dict.update({f'{log_prefix}/loss': loss})
return loss, loss_dict
def forward(self, x, *args, **kwargs):
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
return self.p_losses(x, t, *args, **kwargs)
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self.model)
version = '0'
symbol = 'codi'
@register('codi', version)
class Celestial(DDPM):
def __init__(self,
audioldm_cfg,
autokl_cfg,
optimus_cfg,
clip_cfg,
clap_cfg,
vision_scale_factor=0.1812,
text_scale_factor=4.3108,
audio_scale_factor=0.9228,
scale_by_std=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.audioldm = get_model()(audioldm_cfg)
self.autokl = get_model()(autokl_cfg)
self.optimus = get_model()(optimus_cfg)
self.clip = get_model()(clip_cfg)
self.clap = get_model()(clap_cfg)
if not scale_by_std:
self.vision_scale_factor = vision_scale_factor
self.text_scale_factor = text_scale_factor
self.audio_scale_factor = audio_scale_factor
else:
self.register_buffer("text_scale_factor", torch.tensor(text_scale_factor))
self.register_buffer("audio_scale_factor", torch.tensor(audio_scale_factor))
self.register_buffer('vision_scale_factor', torch.tensor(vision_scale_factor))
self.freeze()
def freeze(self):
self.eval()
for param in self.parameters():
param.requires_grad = False
@property
def device(self):
return next(self.parameters()).device
@torch.no_grad()
def autokl_encode(self, image):
encoder_posterior = self.autokl.encode(image)
z = encoder_posterior.sample().to(image.dtype)
return self.vision_scale_factor * z
@torch.no_grad()
def autokl_decode(self, z):
z = 1. / self.vision_scale_factor * z
return self.autokl.decode(z)
@torch.no_grad()
def optimus_encode(self, text):
if isinstance(text, List):
tokenizer = self.optimus.tokenizer_encoder
token = [tokenizer.tokenize(sentence.lower()) for sentence in text]
token_id = []
for tokeni in token:
token_sentence = [tokenizer._convert_token_to_id(i) for i in tokeni]
token_sentence = tokenizer.add_special_tokens_single_sentence(token_sentence)
token_id.append(torch.LongTensor(token_sentence))
token_id = torch._C._nn.pad_sequence(token_id, batch_first=True, padding_value=0.0)[:, :512]
else:
token_id = text
z = self.optimus.encoder(token_id, attention_mask=(token_id > 0))[1]
z_mu, z_logvar = self.optimus.encoder.linear(z).chunk(2, -1)
return z_mu.squeeze(1) * self.text_scale_factor
@torch.no_grad()
def optimus_decode(self, z, temperature=1.0):
z = 1.0 / self.text_scale_factor * z
return self.optimus.decode(z, temperature)
@torch.no_grad()
def audioldm_encode(self, audio, time=2.0):
encoder_posterior = self.audioldm.encode(audio, time=time)
z = encoder_posterior.sample().to(audio.dtype)
return z * self.audio_scale_factor
@torch.no_grad()
def audioldm_decode(self, z):
if (torch.max(torch.abs(z)) > 1e2):
z = torch.clip(z, min=-10, max=10)
z = 1.0 / self.audio_scale_factor * z
return self.audioldm.decode(z)
@torch.no_grad()
def mel_spectrogram_to_waveform(self, mel):
# Mel: [bs, 1, t-steps, fbins]
if len(mel.size()) == 4:
mel = mel.squeeze(1)
mel = mel.permute(0, 2, 1)
waveform = self.audioldm.vocoder(mel)
waveform = waveform.cpu().detach().numpy()
return waveform
@torch.no_grad()
def clip_encode_text(self, text, encode_type='encode_text'):
swap_type = self.clip.encode_type
self.clip.encode_type = encode_type
embedding = self.clip.encode(text)
self.clip.encode_type = swap_type
return embedding
@torch.no_grad()
def clip_encode_vision(self, vision, encode_type='encode_vision'):
swap_type = self.clip.encode_type
self.clip.encode_type = encode_type
embedding = self.clip.encode(vision)
self.clip.encode_type = swap_type
return embedding
@torch.no_grad()
def clap_encode_audio(self, audio):
embedding = self.clap(audio)
return embedding
def forward(self, x=None, c=None, noise=None, xtype='image', ctype='prompt', u=None, return_algined_latents=False):
if isinstance(x, list):
t = torch.randint(0, self.num_timesteps, (x[0].shape[0],), device=x[0].device).long()
else:
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=x.device).long()
return self.p_losses(x, c, t, noise, xtype, ctype, u, return_algined_latents)
def apply_model(self, x_noisy, t, cond, xtype='image', ctype='prompt', u=None, return_algined_latents=False):
return self.model.diffusion_model(x_noisy, t, cond, xtype, ctype, u, return_algined_latents)
def get_pixel_loss(self, pred, target, mean=True):
if self.loss_type == 'l1':
loss = (target - pred).abs()
if mean:
loss = loss.mean()
elif self.loss_type == 'l2':
if mean:
loss = torch.nn.functional.mse_loss(target, pred)
else:
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
else:
raise NotImplementedError("unknown loss type '{loss_type}'")
loss = torch.nan_to_num(loss, nan=0.0, posinf=0.0, neginf=-0.0)
return loss
def get_text_loss(self, pred, target):
if self.loss_type == 'l1':
loss = (target - pred).abs()
elif self.loss_type == 'l2':
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
loss = torch.nan_to_num(loss, nan=0.0, posinf=0.0, neginf=0.0)
return loss
def p_losses(self, x_start, cond, t, noise=None, xtype='image', ctype='prompt', u=None, return_algined_latents=False):
if isinstance(x_start, list):
noise = [torch.randn_like(x_start_i) for x_start_i in x_start] if noise is None else noise
x_noisy = [self.q_sample(x_start=x_start_i, t=t, noise=noise_i) for x_start_i, noise_i in zip(x_start, noise)]
model_output = self.apply_model(x_noisy, t, cond, xtype, ctype, u, return_algined_latents)
if return_algined_latents:
return model_output
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
loss = 0.0
for model_output_i, target_i, xtype_i in zip(model_output, target, xtype):
if xtype_i == 'image':
loss_simple = self.get_pixel_loss(model_output_i, target_i, mean=False).mean([1, 2, 3])
elif xtype_i == 'video':
loss_simple = self.get_pixel_loss(model_output_i, target_i, mean=False).mean([1, 2, 3, 4])
elif xtype_i == 'text':
loss_simple = self.get_text_loss(model_output_i, target_i).mean([1])
elif xtype_i == 'audio':
loss_simple = self.get_pixel_loss(model_output_i, target_i, mean=False).mean([1, 2, 3])
loss += loss_simple.mean()
return loss / len(xtype)
else:
noise = torch.randn_like(x_start) if noise is None else noise
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond, xtype, ctype)
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
if xtype == 'image':
loss_simple = self.get_pixel_loss(model_output, target, mean=False).mean([1, 2, 3])
elif xtype == 'video':
loss_simple = self.get_pixel_loss(model_output, target, mean=False).mean([1, 2, 3, 4])
elif xtype == 'text':
loss_simple = self.get_text_loss(model_output, target).mean([1])
elif xtype == 'audio':
loss_simple = self.get_pixel_loss(model_output, target, mean=False).mean([1, 2, 3])
loss = loss_simple.mean()
return loss
|
CELESTIAL-1-master
|
cstx/model.py
|
from setuptools import setup, find_packages
setup(
name = 'AttentionGrid',
packages = find_packages(exclude=['examples']),
version = '0.0.2',
license='APACHE',
description = 'AttentionGrid - Library',
author = 'Phil Wang',
author_email = 'kye@apac.ai',
url = 'https://github.com/kyegomez/AttentionGrid',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
'attention'
'large language models'
],
install_requires=[
'torch',
'einops'
'triton',
'jax'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
AttentionGrid-main
|
setup.py
|
AttentionGrid-main
|
AttentionGrid/__init__.py
|
|
from abc import ABC, abstractmethod
class AbstractAttention(ABC):
@abstractmethod
def forward(self, *args, **kwargs):
""" Perform the forward pass of the attention mechanism. """
pass
class AbstractEncoder(ABC):
@abstractmethod
def forward(self, *args, **kwargs):
""" Perform the forward pass of the encoder. """
pass
class AbstractDecoder(ABC):
@abstractmethod
def forward(self, *args, **kwargs):
""" Perform the forward pass of the decoder. """
pass
class AbstractTransformer(AbstractEncoder, AbstractDecoder):
"""
An abstract transformer class that inherits from the abstract encoder and decoder.
This setup allows the transformer to use the forward method from both the encoder and decoder.
"""
@abstractmethod
def forward(self, *args, **kwargs):
""" Perform the forward pass of the transformer. """
pass
|
AttentionGrid-main
|
AttentionGrid/abstract/abstract.py
|
AttentionGrid-main
|
AttentionGrid/abstract/__init__.py
|
|
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
from torchscale.architecture.utils import init_bert_params
from torchscale.component.droppath import DropPath
from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.relative_position_bias import RelativePositionBias
from torchscale.component.xmoe.moe_layer import MOELayer
from torchscale.component.xmoe.routing import Top1Gate, Top2Gate
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
class DecoderLayer(nn.Module):
def __init__(
self,
args,
depth,
is_moe_layer=False,
is_encoder_decoder=False,
):
super().__init__()
self.args = args
self.embed_dim = args.decoder_embed_dim
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.decoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if not is_encoder_decoder:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.decoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = self.build_ffn(
self.embed_dim,
self.args,
)
else:
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if args.deepnorm:
if is_encoder_decoder:
self.alpha = math.pow(3.0 * args.decoder_layers, 0.25)
else:
self.alpha = math.pow(2.0 * args.decoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=False,
encoder_decoder_attention=True,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
self_attn_rel_pos=None,
cross_attn_rel_pos=None,
is_first_step=False,
):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask,
rel_pos=self_attn_rel_pos,
is_first_step=is_first_step,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=None,
rel_pos=cross_attn_rel_pos,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x, l_aux = self.moe_layer(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None, l_aux
class Decoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
super().__init__(**kwargs)
self.args = args
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.decoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_decoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layer_norm = None
self.self_attn_relative_position = None
self.cross_attn_relative_position = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.self_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if is_encoder_decoder:
self.cross_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = math.pow(12.0 * args.decoder_layers, 0.25)
else:
init_scale = math.pow(8.0 * args.decoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(math.log(args.decoder_layers * 3))
else:
init_scale = math.sqrt(math.log(args.decoder_layers * 2))
for name, p in self.named_parameters():
if "encoder_attn" in name:
continue
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
return output_projection
def build_decoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = DecoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
tokens,
token_embedding=None,
incremental_state=None,
):
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state
)
if incremental_state is not None and not self.is_first_step(incremental_state):
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def is_first_step(self, incremental_state):
if incremental_state is None:
return False
return incremental_state.get("is_first_step", False)
def forward(
self,
prev_output_tokens,
self_attn_padding_mask=None,
encoder_out=None,
incremental_state=None,
features_only=False,
return_all_hiddens=False,
token_embeddings=None,
**kwargs
):
# embed tokens and positions
x, _ = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
is_first_step = self.is_first_step(incremental_state)
# relative position
self_attn_rel_pos_bias = None
slen = prev_output_tokens.size(1)
if self.self_attn_relative_position is not None:
self_attn_rel_pos_bias = self.self_attn_relative_position(
batch_size=x.size(0), qlen=slen, klen=slen
)
if incremental_state is not None and not is_first_step:
self_attn_rel_pos_bias = self_attn_rel_pos_bias[-1:, :, :]
cross_attn_rel_pos_bias = None
if self.cross_attn_relative_position is not None:
cross_attn_rel_pos_bias = self.cross_attn_relative_position(
batch_size=x.size(0),
qlen=slen,
klen=encoder_out["encoder_out"].size(1),
)
if incremental_state is not None and not is_first_step:
cross_attn_rel_pos_bias = cross_attn_rel_pos_bias[-1:, :, :]
# decoder layers
inner_states = [x]
if encoder_out is None:
l_aux = []
else:
l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else []
for idx, layer in enumerate(self.layers):
if incremental_state is None or is_first_step:
self_attn_mask = torch.triu(
torch.zeros([x.size(1), x.size(1)])
.float()
.fill_(float("-inf"))
.type_as(x),
1,
)
if is_first_step and incremental_state is not None:
if idx not in incremental_state:
incremental_state[idx] = {}
else:
self_attn_mask = None
if idx not in incremental_state:
incremental_state[idx] = {}
x, layer_attn, _, l_aux_i = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state[idx] if incremental_state is not None else None,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
self_attn_rel_pos=self_attn_rel_pos_bias,
cross_attn_rel_pos=cross_attn_rel_pos_bias,
is_first_step=is_first_step,
)
l_aux.append(l_aux_i)
inner_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only:
x = self.output_layer(x)
return x, {
"inner_states": inner_states,
"l_aux": l_aux,
"attn": None,
}
def output_layer(self, features):
return self.output_projection(features)
|
AttentionGrid-main
|
AttentionGrid/core/decoder.py
|
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
|
AttentionGrid-main
|
AttentionGrid/core/transformer_wrapper.py
|
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
# from .multiway_network import MultiwayWrapper
# from .xpos_relative_position import XPOS
from inspect import isfunction
from einops import rearrange
from dataclasses import dataclass
#helpers
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import copy
def MultiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiwayNetwork(module, dim=dim)
return module
def set_split_position(position):
def apply_fn(module):
if hasattr(module, "split_position"):
module.split_position = position
return apply_fn
class MultiwayNetwork(nn.Module):
def __init__(self, module, dim=1):
super().__init__()
self.dim = dim
self.A = module
self.B = copy.deepcopy(module)
self.B.reset_parameters()
self.split_position = -1
def forward(self, x, **kwargs):
if self.split_position == -1:
return self.A(x, **kwargs)
if self.split_position == 0:
return self.B(x, **kwargs)
x1, x2 = torch.split(
x,
[self.split_position, x.size(self.dim) - self.split_position],
dim=self.dim,
)
# x1, x2 = x[:self.split_position], x[self.split_position:]
y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs)
return torch.cat([y1, y2], dim=self.dim)
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class XPOS(nn.Module):
def __init__(
self, head_dim, scale_base=512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self, x, offset=0, downscale=False):
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x
class MutliwayEmbedding(MultiwayNetwork):
def __init__(self, modules, dim=1):
super(MultiwayNetwork, self).__init__()
self.dim = dim
assert len(modules) == 2
self.A = modules[0]
self.B = modules[1]
self.split_position = -1
def exists(val):
return val is not None
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
@dataclass
class Intermediates:
qk_similarities: Tensor = None
pre_soft_max_attn: Tensor = None
post_softmax_attn: Tensor = None
def to_tuple(self):
return (self.qk_similarities, self.pre_soft_max_attn, self.post_softmax_attn)
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
#https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self.__get_slopes(heads))
slopes.register_buffer('slopes', slopes, persistent=False)
self.register_bufferr('bias', None, persistent=False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j -1, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= i and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self._get_slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim=0)
self.register_buffer('bias', bias, persistent=False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, total_heads):
super().__init__(heads, total_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim=-2)
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent=False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
# flash_attn=False,
alibi_pos_bias = False,
alibi_num_heads = None,
alibi_learned = False,
# one_write_head=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def flash_attn(
self,
q, k, v,
mask=None,
attn_bias=None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
#recommended for multi query single key value attention
#kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
#handle scale by default they scale by dim_head ** -0.5 but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
#check if mask exists and expand to compatible shape
#the mask is B L so it would have to be expanded to B H N L
casual = casual
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
#manually handle casual mask, if another mask was given
if casual:
casual_mask = torch.ones((q_len, k_len), dtype = torch.bool, device=device).triu(k_len - q_len + 1)
mask = mask | casual_mask
casual = False
#handle alibi positional bias
#convet from bool to gloat
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, -1, -1, -1)
#if mask given, the mask would already contain the casual_mask from above logic
#otherwise if no mask given but still contain casual, mask out alibi positional bias to a large negative number
mask_value = - torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(mask, mask_value // 2)
elif casual:
casual_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
attn_bias = attn_bias.masked_fill(casual_mask, mask_value // 2)
casual = False
#scaled_dot_product_attention handles attn_mask either as bool or additive vias
#make it an addiitve bias
mask = attn_bias
#check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
#pytorch 2.0a flash attention: q, k, v, mask, dropout, casual, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_casual = casual
)
return out, Intermediates()
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
# assert not (alibi_pos_bias),
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if self.alibi_pos_bias:
# alibi_bias = self.alibi_positional_bias(tgt_len, src_len)
# attn_weights = attn_weights + alibi_bias
alibi_num_heads = default(alibi_num_heads, self.num_heads)
assert alibi_num_heads <= self.num_heads, 'number of ALiBi heads must be less than the total number of heads'
alibi_pos_klass = LearnedAlibiPositionalBias if self.alibi_learned else AlibiPositionalBias
rel_pos = alibi_pos_klass(heads = alibi_num_heads, total_heads=self.num_heads)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights
|
AttentionGrid-main
|
AttentionGrid/attentions/torchscale_multihead/torchscale_multihead.py
|
AttentionGrid-main
|
AttentionGrid/attentions/torchscale_multihead/__init__.py
|
|
AttentionGrid-main
|
AttentionGrid/attentions/landmark_attention/__init__.py
|
|
import math
import triton
import torch
import triton.language as tl
@triton.jit
def _fwd_kernel(#debug, sdz, sdh, sdm, sdn,
Q, K, V, sm_scale,
Out,
sqz, sqh, sqm, sqd, # shape = (Z,H,N_CTX_Q,D)
skz, skh, skn, skd, # shape = (Z,H,N_CTX_KV,D)
svz, svh, svn, svd, # shape = (Z,H,N_CTX_KV,D)
soz, soh, som, sod, # shape = (Z,H,N_CTX_Q,D)
L, M,
Z, H, N_CTX_Q, N_CTX_KV,
BLOCK: tl.constexpr, # will load BLOCK_M queries, and compute self attention by blocks of BLOCK_N keys
BLOCK_DMODEL: tl.constexpr, # dimensionality of heads: D
N_PREFIX_Q: tl.constexpr,
):
start_m = tl.program_id(0) # idx of sequence length chunk of size 128 (BLOCK_N)
off_hz = tl.program_id(1) # idx of head_batch (unique idx for each head in each batch)
BLOCK_M: tl.constexpr = BLOCK
BLOCK_N: tl.constexpr = BLOCK
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # indices of queries we want to process
offs_m_real = (start_m + N_PREFIX_Q) * BLOCK_M + tl.arange(0, BLOCK_M) # indices of queries we want to process
offs_m_real += tl.where(tl.arange(0, BLOCK_M) == BLOCK_M - 1, -1, 0)
offs_n = tl.arange(0, BLOCK_N) # indices of keys we want to process, we start from [0, BLOCK_N-1] and update in the loop
offs_d = tl.arange(0, BLOCK_DMODEL) # we want to process all the dimensions of a given head
offs_q = off_hz * sqh + offs_m[:, None] * sqm + offs_d[None, :] * sqd # Q.view(Z*H,N_CTX_Q,D)[off_hz, start_m*BLOCK_M:(start_m+1)*BLOCK_M, :].squeeze() that's a BLOCK_M*D matrix
offs_k = off_hz * skh + offs_n[None, :] * skn + offs_d[:, None] * skd # K.view(Z*H,N_CTX_KV,D)[off_hz, 0:BLOCK_N, :].transpose(1,2).squeeze() that's a D*BLOCK_N matrix
offs_v = off_hz * svh + offs_n[:, None] * svn + offs_d[None, :] * svd # V.view(Z*H,N_CTX_KV,D)[off_hz, 0:BLOCK_N, :].squeeze() that's a BLOCK_N*D matrix
# pointers to m and l
m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_prev = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# Load values
q_vals = tl.load(Q + offs_q, mask=offs_m[:, None] < N_CTX_Q, other=0)
for start_n in range(0, (N_PREFIX_Q + start_m)):
# Load values for K and K_idx
k_vals = tl.load(K + offs_k, mask=offs_n[None, :] < N_CTX_KV, other=0)
# compute qk
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=q_vals.dtype)
qk += tl.dot(q_vals, k_vals, allow_tf32=False)
qk *= sm_scale
# causal masking
qk = tl.where(offs_m_real[:,None] >= offs_n[None,:], qk, float("-inf"))
landmark_qk = tl.max(tl.where(tl.arange(0, BLOCK_N)[None, :] == BLOCK_N - 1, qk, float("-inf")), 1)
normal_qk = tl.where(tl.arange(0, BLOCK_N)[None, :] == BLOCK_N - 1, float("-inf"), qk)
normal_m = tl.max(normal_qk, 1)
normal_p = tl.exp(normal_qk - normal_m[:, None])
normal_denom = tl.sum(normal_p, 1)
# compute attention weights
m_curr = tl.maximum(landmark_qk, m_prev) # compute new m
m_curr_ = m_curr # tl.where(m_curr != float('-inf'), m_curr, float(0.0)) # ADDITIONAL CHECK IF YOU GET NaNs
l_prev *= tl.exp(m_prev - m_curr_) # correct old l
landmark_p = tl.exp(landmark_qk - m_curr_)
l_curr = landmark_p + l_prev
l_rcp = 1. / l_curr # rescale operands of matmuls
# l_rcp = tl.where((l_rcp == float('inf')), 0, l_rcp) # ADDITIONAL CHECK IF YOU GET NaNs
landmark_p *= l_rcp
acc *= (l_prev * l_rcp)[:, None] # weight for each value vector
# update acc
v_vals = tl.load(V + offs_v, mask=offs_n[:, None] < N_CTX_KV, other=0)
acc += tl.dot((landmark_p[:, None] * normal_p / normal_denom[:, None]).to(Q.dtype.element_ty), v_vals, allow_tf32=False)
# update m_i and l_i
l_prev = l_curr
m_prev = m_curr
# update offsets
offs_n += BLOCK_N
offs_k += BLOCK_N * skn
offs_v += BLOCK_N * svn
k_vals = tl.load(K + offs_k, mask=offs_n[None, :] < N_CTX_KV, other=0)
# compute qk
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=q_vals.dtype)
qk += tl.dot(q_vals, k_vals, allow_tf32=False)
qk *= sm_scale
# causal masking
qk = tl.where(offs_m_real[:,None] >= offs_n[None,:], qk, float("-inf"))
m_curr = tl.maximum(tl.max(qk, 1), m_prev) # compute new m
m_curr_ = m_curr#m_curr_ = tl.where(m_curr != float('-inf'), m_curr, float(0.0))
l_prev *= tl.exp(m_prev - m_curr_) # correct old l
p = tl.exp(qk - m_curr_[:, None])
l_curr = tl.sum(p, 1) + l_prev
l_rcp = 1. / l_curr # rescale operands of matmuls
# l_rcp = tl.where((l_rcp == float('inf')), 0, l_rcp) # ADDITIONAL CHECK IF YOU GET NaNs
p *= l_rcp[:, None]
acc *= (l_prev * l_rcp)[:, None] # weight for each value vector
# update acc
p = p.to(Q.dtype.element_ty)
v_vals = tl.load(V + offs_v, mask=offs_n[:, None] < N_CTX_KV, other=0)
acc += tl.dot(p, v_vals, allow_tf32=False)
l_prev = l_curr
m_prev = m_curr
# store L and M
offs_L = off_hz * N_CTX_Q + offs_m # L is of shape (Z*H, N_CTX_Q), here we point to L[off_hz, start_m*Block_M:(start_m+1)*Block_M]
offs_M = off_hz * N_CTX_Q + offs_m
tl.store(L + offs_L, l_prev, mask=offs_m < N_CTX_Q)
tl.store(M + offs_M, m_prev, mask=offs_m < N_CTX_Q)
# store results to output
offs_o = off_hz * soh + offs_m[:, None] * som + offs_d[None, :] * sod
tl.store(Out + offs_o, acc, mask=offs_m[:, None] < N_CTX_Q)
@triton.jit
def _bwd_preprocess(
Out, soz, soh, som, sod,
DO, L, slzh, slm,
NewDO, Delta, N_CTX_Q,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_d = tl.arange(0, D_HEAD)
# load
off_o = off_hz * soh + off_m[:, None] * som + off_d[None, :] * sod
off_l = off_hz * slzh + off_m * slm
o = tl.load(Out + off_o).to(tl.float32)#, mask=off_m[:, None] < N_CTX_Q, other=0.0).to(tl.float32)
do = tl.load(DO + off_o).to(tl.float32)#, mask=off_m[:, None] < N_CTX_Q, other=0.0).to(tl.float32)
denom = tl.load(L + off_l).to(tl.float32)#, mask=off_m < N_CTX_Q, other=1.0).to(tl.float32)
# denom = tl.where(denom == 0, 1.0, denom) # ADDITIONAL CHECK IF YOU GET NaNs
# compute
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(NewDO + off_o, do)#, mask=off_m[:, None] < N_CTX_Q)
tl.store(Delta + off_l, delta)#, mask=off_m < N_CTX_Q)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L, M,
D,
sqz, sqh, sqm, sqd,
skz, skh, skn, skd,
svz, svh, svn, svd,
Z, H, N_CTX_Q, N_CTX_KV,
BLOCK: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
N_PREFIX_Q: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
BLOCK_M: tl.constexpr = BLOCK
BLOCK_N: tl.constexpr = BLOCK
# offset pointers for batch/head
Q += off_z * sqz + off_h * sqh
K += off_z * skz + off_h * skh
V += off_z * svz + off_h * svh
DO += off_z * sqz + off_h * sqh
DQ += off_z * sqz + off_h * sqh
DK += off_z * skz + off_h * skh
DV += off_z * svz + off_h * svh
offs_d = tl.arange(0, BLOCK_DMODEL)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX_Q # pointer to D.view(Z*H,N_CTX_Q)[off_hz]
m_ptrs = M + off_hz * N_CTX_Q # pointer to m.view(Z*H,N_CTX_Q)[off_hz]
for start_n in range(0, N_CTX_KV, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
offs_n = start_n + tl.arange(0, BLOCK_N)
# pointers for keys and values
k_ptrs = K + (offs_n[:, None] * skn + offs_d[None, :] * skd)
v_ptrs = V + (offs_n[:, None] * svn + offs_d[None, :] * svd)
# initialize dv amd dk
dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
k = tl.load(k_ptrs)#, mask=offs_n[:, None] < N_CTX_KV)
v = tl.load(v_ptrs)#, mask=offs_n[:, None] < N_CTX_KV)
if start_n < N_PREFIX_Q * BLOCK_M:
start_q_index = 0
elif N_CTX_Q <= start_n - N_PREFIX_Q * BLOCK_M:
start_q_index = start_n - N_PREFIX_Q * BLOCK_M
else:
first_start_m = start_n - N_PREFIX_Q * BLOCK_M
first_start_m = tl.multiple_of(first_start_m, BLOCK_M)
offs_m = (first_start_m + tl.arange(0, BLOCK_M))
offs_m_real = offs_m + N_PREFIX_Q * BLOCK_M # indices of queries we want to process
offs_m_real += tl.where(tl.arange(0, BLOCK_M) == BLOCK_M - 1, -1, 0)
q_ptrs = Q + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
do_ptrs = DO + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
dq_ptrs = DQ + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
q = tl.load(q_ptrs) #, mask=offs_m[:,None] < N_CTX_Q)
qk = tl.dot(q, tl.trans(k), allow_tf32=False)
qk = tl.where(offs_m_real[:,None] >= (offs_n[None,:]), qk, float("-inf"))
m = tl.load(m_ptrs + offs_m) #, mask=offs_m < N_CTX_Q)
m_ = m # tl.where(m != float('-inf'), m, 0.0)
last_p = tl.exp(qk * sm_scale - m_[:, None])
do = tl.load(do_ptrs) #, mask=offs_m[:,None] < N_CTX_Q)
# compute dv
dv += tl.dot(tl.trans(last_p.to(Q.dtype.element_ty)), do, allow_tf32=False)
Di = tl.load(D_ptrs + offs_m) #, mask=offs_m < N_CTX_Q)
# compute dp = dot(v, do)
last_dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
last_dp += tl.dot(do, tl.trans(v), allow_tf32=False)
# compute ds = p * (dp - delta[:, None])
ds = last_p * last_dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q, allow_tf32=False)
dq = tl.load(dq_ptrs) #, mask=offs_m[:,None] < N_CTX_Q)
# compute dq
dq += tl.dot(ds.to(Q.dtype.element_ty), k, allow_tf32=False)
tl.store(dq_ptrs, dq) #, mask=offs_m[:, None] < N_CTX_Q)
start_q_index = first_start_m + BLOCK_M
for start_m in range(start_q_index, N_CTX_Q, BLOCK_M):
start_m = tl.multiple_of(start_m, BLOCK_M)
offs_m = (start_m + tl.arange(0, BLOCK_M))
# offs_m_real = offs_m + N_PREFIX_Q * BLOCK_M # indices of queries we want to process
# offs_m_real += tl.where(tl.arange(0, BLOCK_M) == BLOCK_M - 1, -1, 0)
q_ptrs = Q + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
do_ptrs = DO + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
dq_ptrs = DQ + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
q = tl.load(q_ptrs) #, mask=offs_m[:,None] < N_CTX_Q)
qk = tl.dot(q, tl.trans(k), allow_tf32=False)
qk *= sm_scale
# qk = tl.where(offs_m_real[:,None] >= (offs_n[None,:]), qk, float("-inf")) # This should not be necessary anymore since we separate the first step
landmark_qk = tl.max(tl.where(tl.arange(0, BLOCK_N)[None, :] == BLOCK_N - 1, qk, float("-inf")), 1)
normal_qk = tl.where(tl.arange(0, BLOCK_N)[None, :] == BLOCK_N - 1, float("-inf"), qk)
m = tl.load(m_ptrs + offs_m)#, mask=offs_m < N_CTX_Q)
m_ = m # tl.where(m != float('-inf'), m, 0.0) # ADDITIONAL CHECK IF YOU GET NaNs
p = tl.exp(landmark_qk - m_) # BLOCK_M
do = tl.load(do_ptrs)#, mask=offs_m[:,None] < N_CTX_Q) # BLOCK_M x H
normal_m = tl.max(normal_qk, 1)
normal_p = tl.exp(normal_qk - normal_m[:, None])
normal_p_normalized = normal_p / tl.sum(normal_p, 1)[:, None] # BLOCK_M x (BLOCK_N - 1)
normal_kv = tl.dot(normal_p_normalized.to(Q.dtype.element_ty), v, allow_tf32=False) # BLOCK_M x H
normal_D = tl.sum(do * normal_kv, 1)
# compute dv
dv += tl.dot(tl.trans((p[:, None] * normal_p_normalized).to(Q.dtype.element_ty)), do, allow_tf32=False)
Di = tl.load(D_ptrs + offs_m)#, mask=offs_m < N_CTX_Q)
# compute dp and ds for landmark
dp = tl.zeros([BLOCK_M], dtype=tl.float32) - Di
dp += normal_D
landmark_ds = p * dp
# compute dp and ds for others
normal_dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - normal_D[:, None]
normal_dp += tl.dot(do, tl.trans(v), allow_tf32=False)
normal_ds = p[:, None] * normal_p_normalized * normal_dp
# merge
ds = tl.where(tl.arange(0, BLOCK_N)[None, :] == BLOCK_N - 1, landmark_ds[:, None], normal_ds)
ds *= sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q, allow_tf32=False)
dq = tl.load(dq_ptrs) #, mask=offs_m[:,None] < N_CTX_Q)
# compute dq
dq += tl.dot(ds.to(Q.dtype.element_ty), k, allow_tf32=False)
tl.store(dq_ptrs, dq) #, mask=offs_m[:, None] < N_CTX_Q)
# write-back
dv_ptrs = DV + (offs_n[:, None] * svn + offs_d[None, :] * svd)
dk_ptrs = DK + (offs_n[:, None] * skn + offs_d[None, :] * skd)
tl.store(dv_ptrs, dv) #, mask=offs_n[:, None] < N_CTX_KV)
tl.store(dk_ptrs, dk) #, mask=offs_n[:, None] < N_CTX_KV)
class FusedLandmarkAttention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, n_prefix_q, sm_scale, block_size):
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
# shape constraints
batch, nheads, seqlen_q, d = q.shape
_, _, seqlen_k, _ = k.shape
assert k.shape == (batch, nheads, seqlen_k, d)
assert v.shape == (batch, nheads, seqlen_k, d)
assert d <= 128, 'FlashAttention only support head dimensions up to 128'
assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
#assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16'
assert q.is_cuda and k.is_cuda and v.is_cuda
BLOCK = block_size
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if d <= 64 else 8
_fwd_kernel[grid](
q, k, v, sm_scale,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
L, m,
q.shape[0], q.shape[1], q.shape[2], k.shape[2],
BLOCK=BLOCK, BLOCK_DMODEL=d,
N_PREFIX_Q=n_prefix_q,
num_warps=num_warps, num_stages=2
)
ctx.save_for_backward(q, k, v, o, L, m)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = d
ctx.N_PREFIX_Q = n_prefix_q
ctx.BLOCK = BLOCK
return o
@staticmethod
def backward(ctx, do):
BLOCK = ctx.BLOCK
q, k, v, o, l, m = ctx.saved_tensors
assert q.shape[2] % BLOCK == 0, "Backward supported only for full blocks"
assert k.shape[2] % BLOCK == 0, "Backward supported only for full blocks"
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
do_scaled = torch.empty_like(do)
delta = torch.empty_like(l)
_bwd_preprocess[(ctx.grid[0], ctx.grid[1])](
o, o.stride(0), o.stride(1), o.stride(2), o.stride(3), do, l, l.stride(0), l.stride(1),
do_scaled, delta, q.shape[2],
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do_scaled,
dq, dk, dv,
l, m,
delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2], k.shape[2],
BLOCK=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL,
N_PREFIX_Q=ctx.N_PREFIX_Q,
num_warps=8,
num_stages=1,
)
return dq, dk, dv, None, None, None
def fused_landmark_attention(q, k, v, is_mem, sm_scale=None, block_size=64):
expected_is_mem = torch.arange(0, is_mem.shape[-1], device=is_mem.device) % block_size == (block_size - 1)
assert (is_mem == expected_is_mem).all()
n_history_kv = k.shape[-2] - q.shape[-2]
assert n_history_kv % block_size == 0
n_history_blocks = n_history_kv // block_size
if sm_scale is None:
sm_scale = 1.0 / math.sqrt(q.size(-1))
return FusedLandmarkAttention.apply(q, k, v, n_history_blocks, sm_scale, block_size)
|
AttentionGrid-main
|
AttentionGrid/attentions/landmark_attention/fused_landmark_attention.py
|
AttentionGrid-main
|
AttentionGrid/attentions/dynamic_sparse_flash_attention/__Init__.py
|
|
import math
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel_hash(
Q, K, V, sm_scale,
Out,
sqz, sqh, sqm, sqd, # shape = (Z,H,N_CTX_Q,D)
skz, skh, skn, skd, # shape = (Z,H,N_CTX_KV,D)
svz, svh, svn, svd, # shape = (Z,H,N_CTX_KV,D)
soz, soh, som, sod, # shape = (Z,H,N_CTX_Q,D)
Q_idx, K_idx,
sqiz, sqih, sqim, # shape = (Z,H,N_CTX_Q)
skiz, skih, skin, # shape = (Z,H,N_CTX_KV)
Q_hash, K_hash,
sqhz, sqhh, sqhm, # shape = (Z,H,N_CTX_Q)
skhz, skhh, skhn, # shape = (Z,H,N_CTX_KV)
L, M,
Z, H, N_CTX_Q, N_CTX_KV,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, # will load BLOCK_M queries, and compute self attention by blocks of BLOCK_N keys
BLOCK_DMODEL: tl.constexpr # dimensionality of heads: D
):
start_m = tl.program_id(0) # idx of sequence length chunk of size 128 (BLOCK_N)
off_hz = tl.program_id(1) # idx of head_batch (unique idx for each head in each batch)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # indices of queries we want to process
offs_n = tl.arange(0, BLOCK_N) # indices of keys we want to process, we start from [0, BLOCK_N-1] and update in the loop
offs_d = tl.arange(0, BLOCK_DMODEL) # we want to process all the dimensions of a given head
offs_q = off_hz * sqh + offs_m[:, None] * sqm + offs_d[None, :] * sqd # Q.view(Z*H,N_CTX_Q,D)[off_hz, start_m*BLOCK_M:(start_m+1)*BLOCK_M, :].squeeze() that's a BLOCK_M*D matrix
offs_qi = off_hz * sqih + offs_m * sqim # Q_idx.view(Z*H,N_CTX_Q)[off_hz, start_m*BLOCK_M:(start_m+1)*BLOCK_M] a vector of BLOCK_M indices
offs_qh = off_hz * sqhh + offs_m * sqhm
offs_kh = off_hz * skhh + offs_n * skhn
# pointers to m and l
m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_prev = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# Load values
qi_vals = tl.load(Q_idx + offs_qi, mask=offs_m < N_CTX_Q, other=-1)
q_vals = tl.load(Q + offs_q, mask=qi_vals[:, None] >= 0)
qh_vals = tl.load(Q_hash + offs_qh, mask=offs_m < N_CTX_Q, other=1e9)
min_q_hash = tl.min(qh_vals, axis=0)
qh_vals = tl.where(offs_m < N_CTX_Q, qh_vals, -1)
max_q_hash = tl.max(qh_vals, axis=0)
end_n = 0
start_n = 0
# Increment the start and end to find start and end blocks
for _ in range(0, N_CTX_KV, BLOCK_N):
kh_vals = tl.load(K_hash + offs_kh, mask=offs_n < N_CTX_KV, other=+1e9)
min_kh = tl.min(kh_vals, axis=0)
if min_kh <= max_q_hash and min_kh != 1e9:
end_n += 1
kh_vals = tl.where(offs_n < N_CTX_KV, kh_vals, -1e9)
max_kh = tl.max(kh_vals, axis=0)
if max_kh < min_q_hash and max_kh != -1e9:
start_n += 1
offs_n += BLOCK_N
offs_kh += BLOCK_N * skhn
# remove unecessary trailing blocks based on causal structure
causal_end_n = end_n
offs_n = BLOCK_N * start_n + tl.arange(0, BLOCK_N)
offs_ki = off_hz * skih + offs_n * skin
max_qi = tl.max(qi_vals, axis=0) # largest query index in block
for i in range(start_n, end_n):
ki_vals = tl.load(K_idx + offs_ki, mask=offs_n < N_CTX_KV, other=1e9)
min_ki = tl.min(ki_vals, axis=0)
if min_ki <= max_qi and min_ki != 1e9:
causal_end_n = i + 1
offs_ki += BLOCK_N * skin
offs_n += BLOCK_N
# re-initialize offsets
offs_n = BLOCK_N * start_n + tl.arange(0, BLOCK_N) # indices of keys we want to process, we start from [0, BLOCK_N-1] and update in the loop
offs_k = off_hz * skh + offs_n[None, :] * skn + offs_d[:, None] * skd # K.view(Z*H,N_CTX_KV,D)[off_hz, 0:BLOCK_N, :].transpose(1,2).squeeze() that's a D*BLOCK_N matrix
offs_v = off_hz * svh + offs_n[:, None] * svn + offs_d[None, :] * svd # V.view(Z*H,N_CTX_KV,D)[off_hz, 0:BLOCK_N, :].squeeze() that's a BLOCK_N*D matrix
offs_ki = off_hz * skih + offs_n * skin # K_idx.view(Z*H,N_CTX_KV)[off_hz, 0:BLOCK_N] a vector of BLOCK_N indices
offs_kh = off_hz * skhh + offs_n * skhn
for _ in range(start_n, causal_end_n):
# Load values for K and K_idx
ki_vals = tl.load(K_idx + offs_ki, mask=offs_n < N_CTX_KV, other=1e9)
kh_vals = tl.load(K_hash + offs_kh, mask=offs_n < N_CTX_KV, other=-1e9)
k_vals = tl.load(K + offs_k, mask=ki_vals[None, :] < 1e9)
# compute qk
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.bfloat16)
qk += tl.dot(q_vals, k_vals)
qk *= sm_scale
# causal masking
qk = tl.where((qi_vals[:,None] >= ki_vals[None,:]) & (qh_vals[:,None] == kh_vals[None,:]), qk, float("-inf"))
# compute attention weights
m_curr = tl.maximum(tl.max(qk, 1), m_prev) # compute new m
m_curr_ = tl.where(m_curr != float('-inf'), m_curr, float(0.0))
l_prev *= tl.exp(m_prev - m_curr_) # correct old l
p = tl.exp(qk - m_curr_[:, None])
l_curr = tl.sum(p, 1) + l_prev
l_rcp = 1. / l_curr # rescale operands of matmuls
l_rcp = tl.where((l_rcp == float('inf')), 0, l_rcp)
p *= l_rcp[:, None]
acc *= (l_prev * l_rcp)[:, None] # weight for each value vector
# update acc
p = p.to(Q.dtype.element_ty)
v_vals = tl.load(V + offs_v, mask=ki_vals[:, None] < 1e9, other=0)
acc += tl.dot(p, v_vals)
# update m_i and l_i
l_prev = l_curr
m_prev = m_curr
# update offsets
offs_n += BLOCK_N
offs_k += BLOCK_N * skn
offs_v += BLOCK_N * svn
offs_ki += BLOCK_N * skin
offs_kh += BLOCK_N * skhn
# store L and M
offs_L = off_hz * N_CTX_Q + offs_m # L is of shape (Z*H, N_CTX_Q), here we point to L[off_hz, start_m*Block_M:(start_m+1)*Block_M]
offs_M = off_hz * N_CTX_Q + offs_m
tl.store(L + offs_L, l_prev, mask=offs_m < N_CTX_Q)
tl.store(M + offs_M, m_prev, mask=offs_m < N_CTX_Q)
# store results to output
offs_o = off_hz * soh + offs_m[:, None] * som + offs_d[None, :] * sod
tl.store(Out + offs_o, acc, mask=offs_m[:, None] < N_CTX_Q)
@triton.jit
def _bwd_preprocess_hash(
Out, soz, soh, som, sod,
DO, L, slzh, slm,
NewDO, Delta, N_CTX_Q,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_d = tl.arange(0, D_HEAD)
# load
off_o = off_hz * soh + off_m[:, None] * som + off_d[None, :] * sod
off_l = off_hz * slzh + off_m * slm
o = tl.load(Out + off_o, mask=off_m[:, None] < N_CTX_Q, other=0.0).to(tl.float32)
do = tl.load(DO + off_o, mask=off_m[:, None] < N_CTX_Q, other=0.0).to(tl.float32)
denom = tl.load(L + off_l, mask=off_m < N_CTX_Q, other=1.0).to(tl.float32)
denom = tl.where(denom == 0, 1.0, denom)
# compute
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(NewDO + off_o, do, mask=off_m[:, None] < N_CTX_Q)
tl.store(Delta + off_l, delta, mask=off_m < N_CTX_Q)
@triton.jit
def _bwd_kernel_hash(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
Q_idx, K_idx,
sqiz, sqih, sqim, # shape = (Z,H,N_CTX_Q)
skiz, skih, skin, # shape = (Z,H,N_CTX_KV)
Q_hash, K_hash,
sqhz, sqhh, sqhm, # shape = (Z,H,N_CTX_Q)
skhz, skhh, skhn, # shape = (Z,H,N_CTX_KV)
L, M,
D,
sqz, sqh, sqm, sqd,
skz, skh, skn, skd,
svz, svh, svn, svd,
Z, H, N_CTX_Q, N_CTX_KV,
num_block_q, num_block_kv,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
# offset pointers for batch/head
Q += off_z * sqz + off_h * sqh
K += off_z * skz + off_h * skh
V += off_z * svz + off_h * svh
DO += off_z * sqz + off_h * sqh
DQ += off_z * sqz + off_h * sqh
DK += off_z * skz + off_h * skh
DV += off_z * svz + off_h * svh
offs_d = tl.arange(0, BLOCK_DMODEL)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX_Q # pointer to D.view(Z*H,N_CTX_Q)[off_hz]
m_ptrs = M + off_hz * N_CTX_Q # pointer to m.view(Z*H,N_CTX_Q)[off_hz]
for block_id_n in range(0, num_block_kv):
start_n = block_id_n * BLOCK_N
offs_n = start_n + tl.arange(0, BLOCK_N)
offs_ki = off_hz * skih + offs_n * skin
ki_vals = tl.load(K_idx + offs_ki, mask=offs_n < N_CTX_KV, other=1e9)
min_ki = tl.min(ki_vals, axis=0)
ki_vals = tl.where(offs_n < N_CTX_KV, ki_vals, -1)
# pointers for keys and values
k_ptrs = K + (offs_n[:, None] * skn + offs_d[None, :] * skd)
v_ptrs = V + (offs_n[:, None] * svn + offs_d[None, :] * svd)
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# Find start and end block for those keys
offs_kh = off_hz * skhh + offs_n * skhn
kh_vals = tl.load(K_hash + offs_kh, mask=offs_n < N_CTX_KV, other=1e9)
min_k_hash = tl.min(kh_vals, axis=0)
kh_vals = tl.where(offs_n < N_CTX_KV, kh_vals, -1)
max_k_hash = tl.max(kh_vals, axis=0)
start_blockidx_m = 0
end_blockidx_m = 0
# Increment the start and end to find start and end blocks
offs_m = tl.arange(0, BLOCK_M)
offs_qh = off_hz * sqhh + offs_m * sqhm
for _ in range(0, N_CTX_Q, BLOCK_M):
qh_vals = tl.load(Q_hash + offs_qh, mask=offs_m < N_CTX_Q, other=+1e9)
min_qh = tl.min(qh_vals, axis=0)
if min_qh <= max_k_hash and min_qh != 1e9:
end_blockidx_m += 1
qh_vals = tl.where(offs_m < N_CTX_Q, qh_vals, -1e9)
max_qh = tl.max(qh_vals, axis=0)
if max_qh < min_k_hash and max_qh != -1e9:
start_blockidx_m += 1
offs_m += BLOCK_M
offs_qh += BLOCK_M * sqhm
# remove unecessary trailing blocks based on causal structure
causal_start_n = start_blockidx_m
offs_m = BLOCK_M * start_blockidx_m + tl.arange(0, BLOCK_M)
offs_qi = off_hz * sqih + offs_m * sqim
for i in range(start_blockidx_m, end_blockidx_m):
qi_vals = tl.load(Q_idx + offs_qi, mask=offs_m < N_CTX_Q, other=-1)
max_qi = tl.max(qi_vals, axis=0)
if max_qi < min_ki and max_qi != -1:
causal_start_n = i + 1
offs_qi += BLOCK_N * skin
offs_m += BLOCK_N
k = tl.load(k_ptrs, mask=offs_n[:, None] < N_CTX_KV)
v = tl.load(v_ptrs, mask=offs_n[:, None] < N_CTX_KV)
for start_m in range(causal_start_n * BLOCK_M, end_blockidx_m * BLOCK_M, BLOCK_M):
offs_m = (start_m + tl.arange(0, BLOCK_M))
q_ptrs = Q + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
do_ptrs = DO + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
dq_ptrs = DQ + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
qi_ptrs = Q_idx + (off_hz * sqih + offs_m * sqim)
qh_ptrs = Q_hash + (off_hz * sqhh + offs_m * sqhm)
qi = tl.load(qi_ptrs, mask=offs_m < N_CTX_Q, other=1e9)
qh = tl.load(qh_ptrs, mask=offs_m < N_CTX_Q, other=1e9)
q = tl.load(q_ptrs, mask=offs_m[:,None] < N_CTX_Q)
qk = tl.dot(q, tl.trans(k))
qk = tl.where((qi[:,None] >= ki_vals[None,:]) & (qh[:,None] == kh_vals[None,:]), qk, float("-inf"))
m = tl.load(m_ptrs + offs_m, mask=offs_m < N_CTX_Q)
m_ = tl.where(m != float('-inf'), m, 0.0)
p = tl.exp(qk * sm_scale - m_[:, None])
do = tl.load(do_ptrs, mask=offs_m[:,None] < N_CTX_Q)
# compute dv
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
Di = tl.load(D_ptrs + offs_m, mask=offs_m < N_CTX_Q)
# compute dp = dot(v, do)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
dq = tl.load(dq_ptrs)
# compute dq
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq, mask=offs_m[:, None] < N_CTX_Q)
# write-back
dv_ptrs = DV + (offs_n[:, None] * svn + offs_d[None, :] * svd)
dk_ptrs = DK + (offs_n[:, None] * skn + offs_d[None, :] * skd)
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < N_CTX_KV)
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < N_CTX_KV)
class _attention_hash(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, q_idx, k_idx, q_hash, k_hash, sm_scale):
# only support for Ampere now
capability = torch.cuda.get_device_capability()
if capability[0] < 8:
raise RuntimeError("Flash attention currently only supported for compute capability >= 80")
BLOCK = 128
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
# assert Lk in {16, 32, 64, 128}
assert Lk in {64} # TODO: fix other cases
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel_hash[grid](
q, k, v, sm_scale,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q_idx, k_idx,
q_idx.stride(0), q_idx.stride(1), q_idx.stride(2),
k_idx.stride(0), k_idx.stride(1), k_idx.stride(2),
q_hash, k_hash,
q_hash.stride(0), q_hash.stride(1), q_hash.stride(2),
k_hash.stride(0), k_hash.stride(1), k_hash.stride(2),
L, m,
q.shape[0], q.shape[1], N_CTX_Q=q.shape[2], N_CTX_KV=k.shape[2],
BLOCK_M=BLOCK, BLOCK_N=BLOCK, BLOCK_DMODEL=Lk,
num_warps=num_warps, num_stages=2
)
ctx.save_for_backward(q, k, v, o, L, m, q_idx, k_idx, q_hash, k_hash)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
return o
@staticmethod
def backward(ctx, do):
BLOCK = 128
q, k, v, o, l, m, q_idx, k_idx, q_hash, k_hash = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
do_scaled = torch.empty_like(do)
delta = torch.empty_like(l)
_bwd_preprocess_hash[(ctx.grid[0], ctx.grid[1])](
o, o.stride(0), o.stride(1), o.stride(2), o.stride(3), do, l, l.stride(0), l.stride(1),
do_scaled, delta, q.shape[2],
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
num_block_q = ctx.grid[0]
num_block_kv = math.ceil(k.shape[2] / BLOCK)
_bwd_kernel_hash[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do_scaled,
dq, dk, dv,
q_idx, k_idx,
q_idx.stride(0), q_idx.stride(1), q_idx.stride(2),
k_idx.stride(0), k_idx.stride(1), k_idx.stride(2),
q_hash, k_hash,
q_hash.stride(0), q_hash.stride(1), q_hash.stride(2),
k_hash.stride(0), k_hash.stride(1), k_hash.stride(2),
l, m,
delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2], k.shape[2],
num_block_q, num_block_kv,
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
num_stages=1,
)
return dq, dk, dv, None, None, None, None, None
hash_sparse_attention_kernel = _attention_hash.apply
@triton.jit
def _fwd_kernel_qk(#debug, sdz, sdh, sdm, sdn,
Q, K, V, sm_scale,
Out,
sqz, sqh, sqm, sqd, # shape = (Z,H,N_CTX_Q,D)
skz, skh, skn, skd, # shape = (Z,H,N_CTX_KV,D)
svz, svh, svn, svd, # shape = (Z,H,N_CTX_KV,D)
soz, soh, som, sod, # shape = (Z,H,N_CTX_Q,D)
Q_idx, K_idx,
sqiz, sqih, sqim, # shape = (Z,H,N_CTX_Q)
skiz, skih, skin, # shape = (Z,H,N_CTX_KV)
L, M,
Z, H, N_CTX_Q, N_CTX_KV,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, # will load BLOCK_M queries, and compute self attention by blocks of BLOCK_N keys
BLOCK_DMODEL: tl.constexpr # dimensionality of heads: D
):
start_m = tl.program_id(0) # idx of sequence length chunk of size 128 (BLOCK_N)
off_hz = tl.program_id(1) # idx of head_batch (unique idx for each head in each batch)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # indices of queries we want to process
offs_n = tl.arange(0, BLOCK_N) # indices of keys we want to process, we start from [0, BLOCK_N-1] and update in the loop
offs_d = tl.arange(0, BLOCK_DMODEL) # we want to process all the dimensions of a given head
offs_q = off_hz * sqh + offs_m[:, None] * sqm + offs_d[None, :] * sqd # Q.view(Z*H,N_CTX_Q,D)[off_hz, start_m*BLOCK_M:(start_m+1)*BLOCK_M, :].squeeze() that's a BLOCK_M*D matrix
offs_k = off_hz * skh + offs_n[None, :] * skn + offs_d[:, None] * skd # K.view(Z*H,N_CTX_KV,D)[off_hz, 0:BLOCK_N, :].transpose(1,2).squeeze() that's a D*BLOCK_N matrix
offs_v = off_hz * svh + offs_n[:, None] * svn + offs_d[None, :] * svd # V.view(Z*H,N_CTX_KV,D)[off_hz, 0:BLOCK_N, :].squeeze() that's a BLOCK_N*D matrix
offs_qi = off_hz * sqih + offs_m * sqim # Q_idx.view(Z*H,N_CTX_Q)[off_hz, start_m*BLOCK_M:(start_m+1)*BLOCK_M] a vector of BLOCK_M indices
offs_ki = off_hz * skih + offs_n * skin # K_idx.view(Z*H,N_CTX_KV)[off_hz, 0:BLOCK_N] a vector of BLOCK_N indices
# pointers to m and l
m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_prev = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# Load values
qi_vals = tl.load(Q_idx + offs_qi, mask=offs_m < N_CTX_Q, other=-1)
q_vals = tl.load(Q + offs_q, mask=offs_m[:, None] < N_CTX_Q, other=0)
max_qi = tl.max(qi_vals, axis=0) # lagest query index in block
end_n = 0
for _ in range(0, N_CTX_KV, BLOCK_N):
ki_vals = tl.load(K_idx + offs_ki, mask=offs_n < N_CTX_KV, other=1e9)
min_ki = tl.min(ki_vals, axis=0)
if min_ki <= max_qi and min_ki != 1e9:
end_n += 1
offs_ki += BLOCK_N * skin
offs_n += BLOCK_N
offs_n = tl.arange(0, BLOCK_N)
offs_ki = off_hz * skih + offs_n * skin
for _ in range(0, end_n):
# Load values for K and K_idx
ki_vals = tl.load(K_idx + offs_ki, mask=offs_n < N_CTX_KV, other=1e9)
k_vals = tl.load(K + offs_k, mask=offs_n[None, :] < N_CTX_KV, other=0)
# compute qk
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.bfloat16)
qk += tl.dot(q_vals, k_vals)
qk *= sm_scale
# causal masking
qk = tl.where(qi_vals[:,None] >= ki_vals[None,:], qk, float("-inf"))
# compute attention weights
m_curr = tl.maximum(tl.max(qk, 1), m_prev) # compute new m
m_curr_ = tl.where(m_curr != float('-inf'), m_curr, float(0.0))
l_prev *= tl.exp(m_prev - m_curr_) # correct old l
p = tl.exp(qk - m_curr_[:, None])
l_curr = tl.sum(p, 1) + l_prev
l_rcp = 1. / l_curr # rescale operands of matmuls
l_rcp = tl.where((l_rcp == float('inf')), 0, l_rcp)
p *= l_rcp[:, None]
acc *= (l_prev * l_rcp)[:, None] # weight for each value vector
# update acc
p = p.to(Q.dtype.element_ty)
v_vals = tl.load(V + offs_v, mask=offs_n[:, None] < N_CTX_KV, other=0)
acc += tl.dot(p, v_vals)
# update m_i and l_i
l_prev = l_curr
m_prev = m_curr
# update offsets
offs_n += BLOCK_N
offs_k += BLOCK_N * skn
offs_v += BLOCK_N * svn
offs_ki += BLOCK_N * skin
# store L and M
offs_L = off_hz * N_CTX_Q + offs_m # L is of shape (Z*H, N_CTX_Q), here we point to L[off_hz, start_m*Block_M:(start_m+1)*Block_M]
offs_M = off_hz * N_CTX_Q + offs_m
tl.store(L + offs_L, l_prev, mask=offs_m < N_CTX_Q)
tl.store(M + offs_M, m_prev, mask=offs_m < N_CTX_Q)
# store results to output
offs_o = off_hz * soh + offs_m[:, None] * som + offs_d[None, :] * sod
tl.store(Out + offs_o, acc, mask=offs_m[:, None] < N_CTX_Q)
@triton.jit
def _bwd_preprocess_qk(
Out, soz, soh, som, sod,
DO, L, slzh, slm,
NewDO, Delta, N_CTX_Q,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_d = tl.arange(0, D_HEAD)
# load
off_o = off_hz * soh + off_m[:, None] * som + off_d[None, :] * sod
off_l = off_hz * slzh + off_m * slm
o = tl.load(Out + off_o, mask=off_m[:, None] < N_CTX_Q, other=0.0).to(tl.float32)
do = tl.load(DO + off_o, mask=off_m[:, None] < N_CTX_Q, other=0.0).to(tl.float32)
denom = tl.load(L + off_l, mask=off_m < N_CTX_Q, other=1.0).to(tl.float32)
denom = tl.where(denom == 0, 1.0, denom)
# compute
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(NewDO + off_o, do, mask=off_m[:, None] < N_CTX_Q)
tl.store(Delta + off_l, delta, mask=off_m < N_CTX_Q)
@triton.jit
def _bwd_kernel_qk(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
Q_idx, K_idx,
sqiz, sqih, sqim, # shape = (Z,H,N_CTX_Q)
skiz, skih, skin, # shape = (Z,H,N_CTX_KV)
L, M,
D,
sqz, sqh, sqm, sqd,
skz, skh, skn, skd,
svz, svh, svn, svd,
Z, H, N_CTX_Q, N_CTX_KV,
num_block_q, num_block_kv,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
# offset pointers for batch/head
Q += off_z * sqz + off_h * sqh
K += off_z * skz + off_h * skh
V += off_z * svz + off_h * svh
DO += off_z * sqz + off_h * sqh
DQ += off_z * sqz + off_h * sqh
DK += off_z * skz + off_h * skh
DV += off_z * svz + off_h * svh
offs_d = tl.arange(0, BLOCK_DMODEL)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX_Q # pointer to D.view(Z*H,N_CTX_Q)[off_hz]
m_ptrs = M + off_hz * N_CTX_Q # pointer to m.view(Z*H,N_CTX_Q)[off_hz]
for block_id_n in range(0, num_block_kv):
start_n = block_id_n * BLOCK_N
offs_n = start_n + tl.arange(0, BLOCK_N)
offs_ki = off_hz * skih + offs_n * skin
ki_vals = tl.load(K_idx + offs_ki, mask=offs_n < N_CTX_KV, other=1e9)
# pointers for keys and values
k_ptrs = K + (offs_n[:, None] * skn + offs_d[None, :] * skd)
v_ptrs = V + (offs_n[:, None] * svn + offs_d[None, :] * svd)
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# Find start block for those keys
min_ki = tl.min(ki_vals, axis=0) # lagest query index in block
offs_m = tl.arange(0, BLOCK_M)
offs_qi = off_hz * sqih + offs_m * sqim
start_blockidx_m = 0
for _ in range(0, N_CTX_Q, BLOCK_M):
qi_vals = tl.load(Q_idx + offs_qi, mask=offs_m < N_CTX_Q, other=-1)
max_qi = tl.max(qi_vals, axis=0)
if max_qi < min_ki and max_qi != -1:
start_blockidx_m += 1
offs_qi += BLOCK_M * sqim
offs_m += BLOCK_M
k = tl.load(k_ptrs, mask=offs_n[:, None] < N_CTX_KV)
v = tl.load(v_ptrs, mask=offs_n[:, None] < N_CTX_KV)
for start_m in range(start_blockidx_m * BLOCK_M, N_CTX_Q, BLOCK_M):
offs_m = (start_m + tl.arange(0, BLOCK_M))
q_ptrs = Q + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
do_ptrs = DO + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
dq_ptrs = DQ + (offs_m[:, None] * sqm + offs_d[None, :] * sqd)
qi_ptrs = Q_idx + (off_hz * sqih + offs_m * sqim)
qi = tl.load(qi_ptrs, mask=offs_m < N_CTX_Q, other=-1)
q = tl.load(q_ptrs, mask=offs_m[:,None] < N_CTX_Q)
qk = tl.dot(q, tl.trans(k))
qk = tl.where((qi[:,None] >= ki_vals[None,:]), qk, float("-inf"))
m = tl.load(m_ptrs + offs_m, mask=offs_m < N_CTX_Q)
m_ = tl.where(m != float('-inf'), m, 0.0)
p = tl.exp(qk * sm_scale - m_[:, None])
do = tl.load(do_ptrs, mask=offs_m[:,None] < N_CTX_Q)
# compute dv
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
Di = tl.load(D_ptrs + offs_m, mask=offs_m < N_CTX_Q)
# compute dp = dot(v, do)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
dq = tl.load(dq_ptrs, mask=offs_m[:,None] < N_CTX_Q)
# compute dq
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq, mask=offs_m[:, None] < N_CTX_Q)
# write-back
dv_ptrs = DV + (offs_n[:, None] * svn + offs_d[None, :] * svd)
dk_ptrs = DK + (offs_n[:, None] * skn + offs_d[None, :] * skd)
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < N_CTX_KV)
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < N_CTX_KV)
class _attention_qk(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, q_idx, k_idx, sm_scale):
# only support for Ampere now
capability = torch.cuda.get_device_capability()
if capability[0] < 8:
raise RuntimeError("Flash attention currently only supported for compute capability >= 80")
BLOCK = 128
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
# assert Lk in {16, 32, 64, 128}
assert Lk in {64} # TODO: fix other cases
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel_qk[grid](
q, k, v, sm_scale,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q_idx, k_idx,
q_idx.stride(0), q_idx.stride(1), q_idx.stride(2),
k_idx.stride(0), k_idx.stride(1), k_idx.stride(2),
L, m,
q.shape[0], q.shape[1], q.shape[2], k.shape[2],
BLOCK_M=BLOCK, BLOCK_N=BLOCK, BLOCK_DMODEL=Lk,
num_warps=num_warps, num_stages=2
)
ctx.save_for_backward(q, k, v, o, L, m, q_idx, k_idx)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
return o
@staticmethod
def backward(ctx, do):
BLOCK = 128
q, k, v, o, l, m, q_idx, k_idx = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
do_scaled = torch.empty_like(do)
delta = torch.empty_like(l)
_bwd_preprocess_qk[(ctx.grid[0], ctx.grid[1])](
o, o.stride(0), o.stride(1), o.stride(2), o.stride(3), do, l, l.stride(0), l.stride(1),
do_scaled, delta, q.shape[2],
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
num_block_q = ctx.grid[0]
num_block_kv = math.ceil(k.shape[2] / BLOCK)
_bwd_kernel_qk[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do_scaled,
dq, dk, dv,
q_idx, k_idx,
q_idx.stride(0), q_idx.stride(1), q_idx.stride(2),
k_idx.stride(0), k_idx.stride(1), k_idx.stride(2),
l, m,
delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2], k.shape[2],
num_block_q, num_block_kv,
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
num_stages=1,
)
return dq, dk, dv, None, None, None
qk_sparse_attention_kernel = _attention_qk.apply
#wrapper with rpe and post processing
def dynamic_sparse_attention(q, k, v, q_idx, k_idx, sm_scale=None, sparsity_mode='hash'):
"""
Keyword arguments:
q: query tensor of shape (BATCH, N_CTX_Q, H, D_HEAD)
k: key tensor of shape (BATCH, N_CTX_KV, H, D_HEAD)
v: value tensor of shape (BATCH, N_CTX_KV, H, D_HEAD)
q_idx: tensor of shape (BATCH, N_CTX_Q, H) for each sequence in the batch, for each query in the sequence, for each head,
represents either the bucket index if sparsity_mode=='hash' or the whether to keep that given head if sparsity_mode=='qk'.
The type should be torch.int32 if sparsity_mode=='hash' and torch.float if sparsity_mode=='qk'.
k_idx: tensor of shape (BATCH, N_CTX_KV, H) for each sequence in the batch, for each key in the sequence, for each head,
represents either the bucket index if sparsity_mode=='hash' or the whether to keep that given head if sparsity_mode=='qk'.
The type should be torch.int32 if sparsity_mode=='hash' and torch.float if sparsity_mode=='qk'
sm_scale: normalization constant, 1/sqrt(D_HEAD) unless specified
sparsity_mode: 'hash' to select the hash-sparse implementation and 'qk' for the qk-sparse implementation
"""
if sm_scale is None:
sm_scale = 1.0 / math.sqrt(q.size(-1))
if sparsity_mode == 'hash':
return hash_sparse_attention(q, k, v, q_has=q_idx, k_hash=k_idx, sm_scale=sm_scale)
elif sparsity_mode == 'qk':
return qk_sparse_attention(q, k, v, q_keep=q_idx, k_keep=k_idx, sm_scale=sm_scale)
else:
raise KeyError(f"Unkown sparsity_mode: {sparsity_mode} should be in ['hash', 'qk']")
def compact(keep_tensor, x, index=None):
"""
Build a compact representation of x
keyword args:
x: input tensor to compact, x.shape = (BATCH, N_CTX, H, D_HEAD)
keep_tensor: float tensor of shape (BATCH, N_CTX, H) containing a 1 when the head is kept else 0
"""
BATCH, T, H, D_HEAD = x.shape
if index is None:
with torch.no_grad():
indices_per_head = keep_tensor.sum(dim=-2)
buffer_size = indices_per_head.max().int() # first sum computes the num of non-killed elem per head, we take to max of that
#sorting it is very important that the sorting is stable else we cannot use casual masking
sorted = keep_tensor.sort(dim=-2, descending=True, stable=True) # sorted.indices.shape == (BATCH x T x H) , now sorted over sequence T
index = sorted.indices[:,:buffer_size,:] # (BATCH x buffer_size x H) expand indices to cover all the dimensions for each heads
else:
indices_per_head = None
compact_x = x.gather(dim=-3, index=index.unsqueeze(-1).expand(-1,-1,-1,D_HEAD)) # (BATCH x buffer_size x H x D_HEAD) / expand indices to cover all the dimensions for each heads
return compact_x, index, indices_per_head
@torch.no_grad()
def pad_index(index, indices_per_head, pad_idx=-1):
#pad the index tensor to comply with the kernel => retuns a copy
#keyword args:
#index: original index tensor to pad given by `compact`, index.shape = (BATCH, buffer_size, H). For each batch and timestep, reprsents the head idx it's originating from.
#indices_per_head: of shape (BATCH. H) for each head contains how many indices have not been dropped
BATCH, buffer_size, H = index.shape
index_copy = torch.clone(index).type(torch.int32)
mask = torch.arange(buffer_size, device=index.device).view(1,-1,1).expand(BATCH,buffer_size,H) >= indices_per_head.view(BATCH,1,-1)
index_copy[mask] = pad_idx
return index_copy
def qk_sparse_attention(q, k, v, q_keep, k_keep, sm_scale):
assert q_keep.dtype == torch.float and k_keep.dtype == torch.float
BATCH, N_CTX_Q, H, D_HEAD = q.shape
#building compact representations
q_c, q_idx, iph_q = compact(q_keep, q) # q_c.shape = (BATCH, compact_N_CTX_Q, h)
k_c, k_idx, iph_k = compact(k_keep, k) # k_c.shape = (BATCH, compact_N_CTX_KV, H)
v_c, _, _ = compact(k_keep, v, index=k_idx) # v_c.shape = (BATCH, compact_N_CTX_KV, H)
q_idx_padded = pad_index(q_idx, iph_q, pad_idx=-1)# (B, compact_N_CTX_Q, h)
k_idx_padded = pad_index(k_idx, iph_k, pad_idx=1e9) # (B, compact_N_CTX_KV, h)
#we need to transposr everything
q_c = q_c.transpose(1, 2).contigous() # (BATCH, H, compact_N_CTX_Q, D_HEAD)
k_c = k_c.tranpose(1, 2).contiguous() # (BATCH, H, compact_N_CTX_KV, D_HEAD)
v_c = v_c.transpose(1, 2).contigous() # (BATCH, H, COMPACT_N_CTX_KV, D_HEAD)
q_idx_padded = q_idx_padded.tranpose(1, 2).contigous() # (BATCH, H, compact_N_CTX_Q)
k_idx_padded = k_idx_padded.transpose(1, 2).contiguous() # (BATCH, H, compact_N_CTX_KV)
y_c = qk_sparse_attention_kernel(q_c, k_c, v_c, q_idx_padded, k_idx_padded, sm_scale).transpos(1, 2)
y = torch.zeros_like(q).scatter(dim=1, index=q_idx.long().view(BATCH,-1,H,1).expand(BATCH, -1, H, D_HEAD), src=y_c)
return y
def hash_sparse_attention(q, k, v, q_hash, k_hash, sm_scale):
assert q_hash.dtype == torch.int32 and k_hash.dtype == torch.int32
BATCH, N_CTX_Q, H, D_HEAD = q.shape
q = q.transpose(1, 2) # (BATCH, H, N_CTX_Q, D_HEAD)
k = k.transpose(1, 2) # (BATCH, H, N_CTX_KV, D_HEAD)
v = v.transpose(1, 2) # (BATCH, H, N_CTX,_KV, D_HEAD)
#re order the queries, keys, values according q_hash and k_hash
q_hash = q_hash.sort(dim=-1, stable=True) # q_hash.shape = (BATCH, H, N_CTX_Q), stable sort to keep time ordering within a bucket
k_hash = k_hash.sort(dim=-1, stable=True) # k_hash.shape = (BATCH, H, N_CTX_KV)
q_idx = q_hash.indices
k_idx = k_hash.indices
q_hash = q_hash.values
k_hash = k_hash.values
q_idx_extended = q_idx.unsqueeze(-1).expand_as(q)
k_idx_extended = k_idx.unsqeeuze(-1).expand_as(k)
q = torch.gather(q, dim=-2, index=q_idx_extended).contiguous()
k = torch.gather(k, dim=-2, index=k_idx_extended).contiguous()
v = torch.gather(v, dim=-2, index=k_idx_extended).contiguous()
y = hash_sparse_attention_kernel(q, k, v, q_idx, k_idx, q_hash, k_hash, sm_scale)
y = torch.zeros((BATCH, H, N_CTX_Q, D_HEAD), dtype=q.dtype, device=q.device).scatter(dim=2, index=q_idx_extended, src=y).transpose(1,2).contiguous()
return y
def get_tensors(BATCH, H, N_CTX, D_HEAD):
q = torch.randn((BATCH, N_CTX, H, D_HEAD), dtype=torch.bfloat16, device="cuda", requires_grad=True)
k = torch.rand((BATCH, N_CTX, H, D_HEAD), dtype=torch.bfloat16, device="cuda", requires_grad=True)
v = torch.randn((BATCH, N_CTX, H, D_HEAD), dtype=torch.bfloat16, device="cuda", requires_grad=True)
return q, k, v
def flashattention(q, k, v):
BATCH, N_CTX, H, D_HEAD = q.shape
q = q.view(BATCH, N_CTX, H, D_HEAD).transpose(1, 2) # (BATCH, H, N_CTX, D_HEAD)
k = k.view(BATCH, N_CTX, H, D_HEAD).transpose(1, 2) # (BATCH, H, N_CTX, D_HEAD)
v = v.view(BATCH, N_CTX, H, D_HEAD).transpose(1, 2) # #(BATCH, H, N_CTX, D_HEAD)
y = torch.nn.functional.scaled_dot_production_attention(q, k, v, dropout_p=0.0, attn_mask=None, is_casual=True)
return y.transpose(1, 2).contiguous()
#hash sparse
mode = "fwd+bwd"
BATCH, H, D_HEAD= 4, 48, 64
N_CTXs = [258, 512, 1024, 2048, 4096, 8192, 16384, 32768]
def sparse_attn(num_buckets_or_sparsity, n_ctx, mode='fwd'):
q, k, v = get_tensors(BATCH, H, n_ctx, D_HEAD)
if isinstance(num_buckets_or_sparsity, int): # hash mode
q_hash = torch.randint(0, num_buckets_or_sparsity, (BATCH, n_ctx, H), dtype=torch.int32, device="cuda")
k_hash = torch.randint(0, num_buckets_or_sparsity, (BATCH, n_ctx, H), dtype=torch.int32, device="cuda")
def fn():
return dynamic_sparse_attention(q, k, v, q_hash, k_hash, sm_scale=1.0 / math.sqrt(q.size(-1)), sparsity_mode="hash")
else: # qk mode
q_keep = (torch.rand((BATCH, n_ctx, H), dtype=torch.bfloat16, device="cuda") > num_buckets_or_sparsity).float()
k_keep = (torch.rand((BATCH, n_ctx, H), dtype=torch.bfloat16, device="cuda") > num_buckets_or_sparsity).float()
def fn():
return dynamic_sparse_attention(q, k, v, q_keep, k_keep, sm_scale=1.0 / math.sqrt(q.size(-1)), sparsity_mode="qk")
if mode == 'fwd':
return fn()
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
return o.backward(do, retain_graph=True)
else: # mode == 'fwd+bwd'
o = fn()
do = torch.randn_like(o)
return o.backward(do, retain_graph=True)
def flash_attn(n_ctx, mode='fwd'):
q, k, v = get_tensors(BATCH, H, n_ctx, D_HEAD)
def fn():
return flashattention(q, k, v)
if mode == 'fwd':
return fn()
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
return o.backward(do, retain_graph=True)
else: # mode == 'fwd+bwd'
o = fn()
do = torch.randn_like(o)
return o.backward(do, retain_graph=True)
|
AttentionGrid-main
|
AttentionGrid/attentions/dynamic_sparse_flash_attention/dynamic_sparse_triton.py
|
import torch
import torch.nn as nn
from einops import rearrange
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input
class FlashAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
max_s=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
if unpadded: (nnz, 3, h, d)
key_padding_mask: a bool tensor of shape (B, S)
"""
assert not need_weights
assert qkv.dtype in [torch.float16, torch.bfloat16]
assert qkv.is_cuda
if cu_seqlens is None:
batch_size = qkv.shape[0]
seqlen = qkv.shape[1]
if key_padding_mask is None:
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = seqlen
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
indices, batch_size, seqlen),
'b s (h d) -> b s h d', h=nheads)
else:
assert max_s is not None
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
return output, None
class FlashMHA(nn.Module):
def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0,
causal=False, device=None, dtype=None) -> None:
assert batch_first
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8"
self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
self.inner_attn = FlashAttention(attention_dropout=attention_dropout)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
def forward(self, x, key_padding_mask=None, need_weights=False):
"""x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim)
key_padding_mask: bool tensor of shape (batch, seqlen)
"""
qkv = self.Wqkv(x)
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
context, attn_weights = self.inner_attn(qkv, key_padding_mask=key_padding_mask,
need_weights=need_weights, causal=self.causal)
return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights
|
AttentionGrid-main
|
AttentionGrid/attentions/stacked_flash_attention/attention.py
|
AttentionGrid-main
|
AttentionGrid/attentions/stacked_flash_attention/__init__.py
|
|
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import math
import torch.nn.functional as F
def pad_at_dim(t, pad, dim=-1, value=0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value=value)
def exists(val):
return val is not None
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
#rotary
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512
):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class XPOS(nn.Module):
def __init__(
self, head_dim, scale_base=512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self, x, offset=0, downscale=False):
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, total_heads):
super().__init__(heads, total_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim = -2)
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent = False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias
|
AttentionGrid-main
|
AttentionGrid/attentions/stacked_flash_attention/utils/embeddings.py
|
#add ability to choose your own tokenizer, and embedder, and ask what else can be done for production level training
import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from collections import namedtuple
from dataclasses import dataclass
from typing import List
from einops import rearrange, repeat
# from optimus_prime.attend import Attend, Intermediates
# from optimus_prime.autoregressive_wrapper import AutoregressiveWrapper
from abc import ABC, abstractmethod
# constants
from functools import partial
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Tensor = None
pre_softmax_attn: Tensor = None
post_softmax_attn: Tensor = None
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
scale = None,
qk_norm = False,
flash = False,
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
mask = mask | causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, -1, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(mask, mask_value // 2)
elif causal:
causal_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
dtype = dots.dtype
pre_softmax_attn = dots.clone()
mask_value = -torch.finfo(dots.dtype).max
if exists(mask):
dots = dots.masked_fill(mask, mask_value)
if self.causal:
i, j = dots.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
dots = dots.masked_fill(causal_mask, mask_value)
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: List[Tensor] = None
attn_intermediates: List[Intermediates] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
#tokenization
class BaseTokenizer(ABC):
@abstractmethod
def tokenize(self, text: str) -> List[int]:
pass
class CustomTokenizer(BaseTokenizer):
def tokenize(self, text: str) -> List[int]:
# Your custom tokenization algorithm
tokens = ...
return tokens
# embedding
class BaseEmbedding(ABC):
@abstractmethod
def get_embedding(self, num_tokens: int, dim: int) -> nn.Module:
# Custom embedding function or model
embedding = ...
return embedding
class AndromedaEmbedding(BaseEmbedding):
def get_embedding(self, num_tokens: int, dim: int) -> nn.Module:
embedding = nn.Embedding(num_tokens, dim)
return embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, embedding_provider: BaseEmbedding, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = embedding_provider.get_embedding(num_tokens, dim)
# nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, total_heads):
super().__init__(heads, total_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, i, j, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim = -2)
if exists(self.bias) and self.bias.shape[-1] >= j:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent = False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512
):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
def scale_fn(t):
return t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(self, dim_in, dim_out, activation):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate)
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
) if not glu else GLU(dim, inner_dim, activation)
self.ff = nn.Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False # https://arxiv.org/abs/2208.06061
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
flash = flash
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = default(context_mask, mask)
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
top, _ = dots.topk(self.sparse_topk, dim = -1)
vk = rearrange(top[..., -1], '... -> ... 1')
sparse_topk_mask = dots < vk
masks.append(sparse_topk_mask)
if len(masks) > 0:
final_attn_mask = or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = None,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
alibi_learned = False,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_xpos_scale_base = 512,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned else AlibiPositionalBias
self.rel_pos = alibi_pos_klass(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
assert not (not pre_norm and resi_dual), 'resiDualcannot be used when not using prenorm'
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
self.cross_attend = cross_attend
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
norm_class = RMSNorm if use_rmsnorm else norm_class
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if (resi_dual or not pre_norm) and not is_last_layer else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
self.layers_length = len(self.layers) # It doesn't work if called after
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
ind == (self.layers_length - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm) and not self.resi_dual:
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = residual_fn(out, outer_residual)
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if self.resi_dual:
x = x + pre_norm(outer_residual)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
dropout = 0.,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
x = self.norm(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
# tokenizer: BaseTokenizer,
embedding_provider: BaseEmbedding,
emb_dim = None,
max_mem_len = 0.,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1. # GLM-130B and Cogview successfully used this, set at 0.1
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
# your own tokenizer
# self.tokenizer = tokenizer
#your own embedding function
self.token_emb = TokenEmbedding(emb_dim, num_tokens, embedding_provider, l2norm_embed=l2norm_embed)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, embedding_provider, l2norm_embed=l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
x = self.norm(x)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class ContinuousTransformerWrapper(nn.Module):
def __init__(
self,
*,
max_seq_len,
attn_layers,
dim_in = None,
dim_out = None,
emb_dim = None,
post_emb_norm = False,
emb_dropout = 0.,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
self.max_seq_len = max_seq_len
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
def forward(
self,
x,
return_embeddings = False,
return_intermediates = False,
mask = None,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
**kwargs
):
x = self.project_in(x)
x = x + self.pos_emb(x, pos = pos)
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
_, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
x = self.emb_dropout(x)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
x = self.norm(x)
out = self.project_out(x) if not return_embeddings else x
if return_intermediates:
return out, intermediates
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class XTransformer(nn.Module):
def __init__(
self,
*,
dim,
tie_token_emb = False,
ignore_index = -100,
pad_value = 0,
deepnorm = False,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
if deepnorm:
enc_kwargs['scale_residual'] = True
dec_kwargs['scale_residual'] = True
enc_depth = enc_kwargs['depth']
dec_depth = dec_kwargs['depth']
enc_kwargs['scale_residual_constant'] = 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625
dec_kwargs['scale_residual_constant'] = (3 * dec_depth) ** 0.25
self.encoder = TransformerWrapper(
**enc_transformer_kwargs,
attn_layers = Encoder(dim = dim, **enc_kwargs)
)
self.decoder = TransformerWrapper(
**dec_transformer_kwargs,
attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
)
if deepnorm:
deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625)
deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25)
if tie_token_emb:
self.decoder.token_emb = self.encoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
@torch.no_grad()
def generate(self, seq_in, seq_out_start, seq_len, mask = None, attn_mask = None, **kwargs):
encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
if exists(src_prepend_embeds) and exists(mask):
mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
if self.training and self.cross_attn_tokens_dropout > 0:
enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
out = self.decoder(tgt, context = enc, context_mask = mask)
return out
|
AttentionGrid-main
|
AttentionGrid/attentions/x_transformers_attention/x_transformers.py
|
AttentionGrid-main
|
AttentionGrid/attentions/x_transformers_attention/__Init__.py
|
|
import functools
from functools import partial
from typing import NamedTuple, Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from einops import rearrange
from flax.linen import combine_masks
from jax import lax
from jax import numpy as jnp
from abstract import AbstractAttention
def quick_gelu(x):
return x * jax.nn.sigmoid(1.702 * x)
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
def get_gradient_checkpoint_policy(name):
return {
'everything_saveable': jax.checkpoint_policies.everything_saveable,
'nothing_saveable': jax.checkpoint_policies.nothing_saveable,
'dots_saveable': jax.checkpoint_policies.dots_saveable,
'dots_with_no_batch_dims_saveable': jax.checkpoint_policies.dots_with_no_batch_dims_saveable,
}[name]
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, sentinel:] = cos
return jnp.array(out)
def rotate_every_two(tensor):
rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class _AttentionBlock(nn.Module):
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
float32_logits: bool = False
def setup(self):
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.ln_2 = nn.LayerNorm(epsilon=self.layer_norm_epsilon, dtype=self.dtype)
self.fc_in = nn.Dense(self.intermediate_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.fc_out = nn.Dense(self.embed_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
self.act = ACT2FN[self.activation_function]
self.resid_dropout = nn.Dropout(rate=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
return attn_output
def forward_qkv(
self,
hidden_states,
position_ids,
deterministic: bool = True,
):
hidden_states = self.ln_1(hidden_states)
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
sincos = jnp.take(self.embed_positions, position_ids, axis=0)
sincos = jnp.split(sincos, 2, axis=-1)
if self.rotary_dim is not None and self.rotary_dim > 0:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = jnp.concatenate([k_rot, k_pass], axis=-1)
query = jnp.concatenate([q_rot, q_pass], axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
if self.float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
return query, key, value
def forward_ffn(
self,
hidden_states,
deterministic: bool = True,
):
hidden_states = self.ln_2(hidden_states)
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.resid_dropout(hidden_states, deterministic=deterministic)
return hidden_states
class AttentionBlock(nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
query = query / jnp.sqrt(query.shape[-1])
dropout_rng = None
if not deterministic and self.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -1e9).astype(self.dtype),
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
query, key, value = self.attn.forward_qkv(hidden_states, position_ids)
key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
# use standard dot product attention since query length is 1
attn_weights = nn.dot_product_attention_weights(
query,
key,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = self.attn.forward_ffn(hidden_states + attn_output, deterministic=deterministic)
outputs = attn_output + ffn_output + hidden_states
else:
attn_output = blockwise_compute_attn(
query,
key,
value,
bias=attention_bias,
deterministic=not deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.attn_pdrop,
causal_mask=self.causal,
query_chunk_size=self.q_chunk_size,
key_chunk_size=self.k_chunk_size,
dtype=self.dtype,
policy=self.policy,
precision=None,
prevent_cse=self.prevent_cse,
)
attn_output = self.attn.attn_out_proj(attn_output, deterministic=deterministic)
ffn_output = blockwise_compute_ffn(
self.attn,
hidden_states + attn_output,
chunk_size=self.q_chunk_size,
deterministic=deterministic,
policy=self.policy,
prevent_cse=self.prevent_cse,
)
outputs = ffn_output + hidden_states + attn_output
return outputs
def _chunk_attention_bias(query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask,
query_chunk_idx, key_chunk_idx):
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
chunk_bias = jnp.zeros((1, 1, 1, 1))
if bias is not None:
chunk_bias = lax.dynamic_slice(
bias,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:2], min(bias.shape[-2], query_chunk_size), min(bias.shape[-1], key_chunk_size)),
)
if causal_mask:
query_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(query_chunk_size, 1), dimension=0)
key_idx = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, key_chunk_size), dimension=1)
offset = query_offset - key_offset
query_idx += offset
causal_mask_value = (query_idx < key_idx) * MASK_VALUE
chunk_bias += causal_mask_value.reshape(1, 1, *causal_mask_value.shape)
if not deterministic and attn_pdrop > 0.0:
attn_dropout_slice = lax.dynamic_slice(
attn_dropout,
start_indices=(0, 0, query_offset, key_offset),
slice_sizes=(
*attn_dropout.shape[:2],
min(attn_dropout.shape[-2], query_chunk_size),
min(attn_dropout.shape[-1], key_chunk_size),
),
)
chunk_bias -= attn_dropout_slice * 1e6
return chunk_bias
class Carry(NamedTuple):
numerator: jax.Array
denominator: jax.Array
max_so_far: jax.Array
def blockwise_compute_attn(query, key, value,
bias=None,
deterministic=False,
dropout_rng=None,
attn_pdrop=0.0,
causal_mask=True,
query_chunk_size=None,
key_chunk_size=None,
dtype=jnp.float32,
policy='nothing_saveable',
precision=lax.Precision.HIGHEST,
prevent_cse=False,):
q_len = query.shape[1]
kv_len = key.shape[1]
query = rearrange(query, 'b (n c) h q -> b n c h q', c=query_chunk_size)
key, value = map(lambda t: rearrange(t, 'b (n c) h v -> b n c h v', c=key_chunk_size), (key, value))
query, key, value = map(lambda t: rearrange(t, 'b n c h d -> n b c h d'), (query, key, value))
num_q, batch, _, num_heads, dim_per_head = query.shape
num_kv, _, _, _, _ = key.shape
for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):
assert bias_dim == 1 or bias_dim == broadcast_dim
if not deterministic and attn_pdrop > 0.0:
attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)
attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))
else:
attn_dropout = None
_chunk_bias_fn = functools.partial(
_chunk_attention_bias,
query_chunk_size, key_chunk_size,
bias, deterministic, attn_dropout, attn_pdrop, causal_mask)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def summarize_chunk(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
(numerator, denominator, prev_max_score) = carry
attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)
bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)
bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)
attn_weights = attn_weights + bias_chunk
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
max_score = jnp.maximum(prev_max_score, max_score)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = jnp.einsum(
'bqhv,bvhf->bqhf', exp_weights, value_chunk, precision=precision
)
correction = jnp.exp(prev_max_score - max_score)
numerator = numerator * correction + exp_values
denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)
return Carry(numerator, denominator, max_score), None
init_carry = Carry(
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=dtype),
(-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=dtype),
)
(numerator, denominator, max_score), _ = lax.scan(
summarize_chunk, init_carry, xs=(key, value, jnp.arange(0, num_kv))
)
outputs = (numerator / denominator).astype(dtype)
return outputs
_, res = lax.scan(
lambda _, x: ((), _query_chunk_attention(x)),
(), xs=(query, jnp.arange(0, num_q))
)
res = rearrange(res, 'n b c h d -> b (n c) h d')
return res
def blockwise_compute_ffn(cell, inputs, chunk_size, deterministic, policy, prevent_cse):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def ffn(cell, _, hidden_states):
outputs = cell.forward_ffn(hidden_states, deterministic=deterministic)
return _, outputs
ffn_remat = nn.remat(
ffn,
variables="params",
rngs={"params" : False},
prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy),
)
_, res = nn.scan(
ffn_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(cell, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
class Blockwise_LM_Head(nn.Module):
vocab_size: int
chunk_size: int
policy: str = 'nothing_saveable'
dtype: jnp.dtype = jnp.float32
prevent_cse: bool = False
def setup(self):
self.lm_head = nn.Dense(
self.vocab_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.variance_scaling(
scale=1.0, mode='fan_in',
distribution='normal',
)
)
def __call__(self, inputs):
inputs = rearrange(inputs, 'b (n c) d -> b n c d', c=self.chunk_size)
inputs = rearrange(inputs, 'b n c d -> n b c d')
num_q, _, _, _ = inputs.shape
def lm_head(cell, _, hidden_states):
outputs = cell(hidden_states)
return _, outputs
lm_head_remat = nn.remat(
lm_head,
variables="params",
rngs={"params" : False},
prevent_cse=self.prevent_cse,
policy=get_gradient_checkpoint_policy(self.policy),
)
_, res = nn.scan(
lm_head_remat,
variable_broadcast="params",
split_rngs={"params": False},
in_axes=0,
out_axes=0,
length=num_q,
)(self.lm_head, None, inputs)
res = rearrange(res, 'n b c d -> b (n c) d')
return res
def blockwise_cross_entropy(logits, tokens, valid=None,
chunk_size=None, policy=None, prevent_cse=None):
if valid is None:
valid = jnp.ones(tokens.shape[:2])
valid = valid.astype(jnp.float32)
logits = jnp.reshape(logits, (-1, logits.shape[-1]))
tokens = jnp.reshape(tokens, (-1,))
valid = jnp.reshape(valid, (-1,))
def _cross_entropy_loss_and_accuracy(logits, tokens, valid):
valid_text_length = jnp.maximum(jnp.sum(valid, axis=-1), 1e-10)
token_log_prob = jnp.squeeze(
jnp.take_along_axis(
jax.nn.log_softmax(logits, axis=-1),
jnp.expand_dims(tokens, -1),
axis=-1,
),
-1,
)
token_log_prob = jnp.where(valid > 0.0, token_log_prob, jnp.array(0.0))
correct = jnp.where(
valid > 0.0,
jnp.argmax(logits, axis=-1) == tokens,
jnp.array(False)
)
return token_log_prob, correct, valid_text_length
@partial(jax.checkpoint, prevent_cse=prevent_cse,
policy=get_gradient_checkpoint_policy(policy))
def _loss_and_accuracy(carry, args):
loss, accuracy, num = carry
logits, tokens, valid = args
token_log_prob, correct, valid_text_length = \
_cross_entropy_loss_and_accuracy(logits, tokens, valid)
loss = loss + jnp.sum(token_log_prob, axis=-1) / valid_text_length
accuracy = accuracy + jnp.sum(correct, axis=-1) / valid_text_length
num = num + 1
return (loss, accuracy, num), None
num_chunk = logits.shape[0] // chunk_size
logits = rearrange(logits, '(n c) d -> n c d', c=chunk_size)
tokens = rearrange(tokens, '(n c) -> n c', c=chunk_size)
valid = rearrange(valid, '(n c) -> n c', c=chunk_size)
(loss, accuracy, num), _ = jax.lax.scan(
_loss_and_accuracy, (0.0, 0.0, 0), xs=(logits, tokens, valid),
length=num_chunk,
)
loss = - loss / num
accuracy = accuracy / num
return loss, accuracy
class BlockwiseParallelJax(AbstractAttention, nn.Module):
q_chunk_size: int
k_chunk_size: int
hidden_size: int
num_heads: int
rotary_dim: Optional[int]
intermediate_size: int
layer_norm_epsilon: float = 1e-5
activation_function: str = "gelu"
attn_pdrop: float = 0.0
resid_pdrop: float = 0.0
max_position_embeddings: int = 1024
dtype: jnp.dtype = jnp.float32
causal: bool = True
policy: str = 'nothing_saveable'
prevent_cse: bool = False
float32_logits: bool = False
def setup(self):
self.attn = _AttentionBlock(
self.hidden_size,
self.num_heads,
self.rotary_dim,
self.intermediate_size,
self.layer_norm_epsilon,
self.activation_function,
self.resid_pdrop,
self.max_position_embeddings,
self.dtype,
self.causal,
self.float32_logits,
)
def forward(self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
):
q_chunk_size = self.q_chunk_size
k_chunk_size = self.k_chunk_size
attn_pdrop = self.attn_pdrop
# Compute attention
attn_outputs = blockwise_compute_attn(
hidden_states,
attention_mask,
position_ids,
self.attn,
q_chunk_size,
k_chunk_size,
attn_pdrop,
deterministic,
init_cache,
)
# Apply residual connection and layer normalization
hidden_states = self.attn.ln1(hidden_states + attn_outputs)
# Apply position-wise feed-forward network
ff_outputs = self.attn.ff(hidden_states)
# Apply residual connection and layer normalization
hidden_states = self.attn.ln2(hidden_states + ff_outputs)
return hidden_states
if __name__ == '__main__':
with jax.profiler.trace('/tmp/prof/blockwise_parallel_simplified'):
class Model(nn.Module):
def setup(self):
self.blocks = [
AttentionBlock(
q_chunk_size=256,
k_chunk_size=256,
hidden_size=2048,
num_heads=16,
rotary_dim=128,
intermediate_size=8192,
layer_norm_epsilon=1e-5,
activation_function="gelu",
resid_pdrop=0.0,
max_position_embeddings=2048,
dtype=jnp.float32,
causal=True,
)
for _ in range(2)
]
def __call__(self, hidden_states, attention_mask, position_ids):
for block in self.blocks:
hidden_states = block(hidden_states, attention_mask, position_ids)
return hidden_states
hidden_states = jnp.zeros((2, 1024, 2048))
attention_mask = jnp.zeros((2, 1024), dtype=jnp.int32)
position_ids = jnp.zeros((2, 1024), dtype=jnp.int32)
model = Model()
variables = model.init(jax.random.PRNGKey(0), hidden_states, attention_mask, position_ids)
output = model.apply(variables, hidden_states, attention_mask, position_ids)
output = output.block_until_ready()
|
AttentionGrid-main
|
AttentionGrid/attentions/blockwise_parallel/blockwise_attention_jax.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from functools import partial
def quick_gelu(x):
return x * torch.sigmoid(1.702 * x)
ACT2FN = {
"gelu": F.gelu,
"relu": F.relu,
"silu": F.silu,
"swish": F.swish,
"gelu_new": quick_gelu,
"quick_gelu": quick_gelu,
}
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
def create_sinusoidal_positions(num_pos, dim):
inv_freq = 1.0 / (10000 * (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i, j -> i j", np.arange(num_pos), inv_freq).astype("float32")
sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
sentinel = dim // 2 + dim % 2
out = np.zeros((num_pos, dim))
out[:, 0:sentinel] = sin
out[:, 0:sentinel] = cos
return torch.tensor(out)
def rotate_every_two(tensor):
rotate_half_tensor = torch.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), dim=-1)
rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
return rotate_half_tensor
def apply_rotary_pos_emb(tensor, sincos):
sin_pos, cos_pos = sincos
sin_pos = sin_pos[:, :, None, :].repeat(1, 1, 2, 1)
cos_pos = cos_pos[:, :, None, :].repeat(1, 1, 2, 1)
return (torch * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class BlockwiseParallel(nn.Module):
def __init__(self, hidden_size, num_heads, rotary_dim, intermediate_size, layer_norm_epsilon=1e-5,
activation_function="gelu", resid_pdrop=0.0, max_position_embeddings=1024, dtype=torch.float32,
casual=True, float32_logits=False):
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.rotary_dim = rotary_dim
self.intermediate_size = intermediate_size
self.layer_norm_epsilon = layer_norm_epsilon
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.max_position_embeddings = max_position_embeddings
self.dtype = dtype
self.casual = casual
self.float32_logits = float32_logits
self.embed_dim = self.hidden_size
self.head_dim = self.embed_dim // self.num_heads
dense = partial(
nn.Linear,
self.embed_dim,
bias=False,
dtype=self.dtype
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.ln_1 = nn.LayerNorm(self.hideen_size, eps=self.layer_norm_epsilon, elementwise_affine=True)
self.ln_2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_epsilon, elementwise_affine=True)
self.fc_in = nn.Linear(self.hidden_size, self.intermediate_size, dtype=self.dtype)
self.fc_out = nn.Linear(self.intermediate_size, self.hidden_size, dtype=self.dtype)
self.act = ACT2FN(self.activation_function)
self.resid_pdrop = nn.Dropout(p=self.resid_pdrop)
if self.rotary_dim is not None and self.rotary_dim > 0:
pos_embd_dim = self.rotary_dim
else:
pos_embd_dim = self.embed_dim // self.num_heads
self.embed_positions = create_sinusoidal_positions(self.max_position_embeddings, pos_embd_dim)
def _split_heads(self, hidden_states):
return hidden_states.view(hidden_states.shape[-2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.view(hidden_states.shape[:-2] + (self.embed_dim,))
def attn_out_proj(self, attn_output, deterministic):
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_pdrop(attn_output)
return attn_output
def forward_qkv(self, hidden_states, position_ids, deterministic=True):
hidden_states = self.ln_1(hidden_states)
q = self.q_proj(hidden_states)
k = self.k_proj(hidden_states)
v = self.v_proj(hidden_states)
q = self._split_heads(q)
k = self._split_heads(k)
v = self._split_heads(v)
if self.rotary_dim is not None and self.rotary_dim > 0:
sincos = self.embed_positions[position_ids].unsqueeze(1)
q, k = apply_rotary_pos_emb(q, sincos), apply_rotary_pos_emb(k, sincos)
return q, k, v
def forward(self, hidden_states, position_ids, attention_mask=None, deterministic=True):
q, k, v = self.forward_qkv(hidden_states, position_ids, deterministic)
attn_output, attn_weights = self._attn(q, k, v, attention_mask, deterministic)
attn_output = self.attn_out_proj(attn_output, deterministic)
hidden_states = hidden_states + attn_output
hidden_states = self.ln_2(hidden_states)
ffn_output = self.fc_in(hidden_states)
ffn_output = self.act(ffn_output)
ffn_output = self.fc_out(ffn_output)
ffn_output = self.resid_pdrop(ffn_output)
hidden_states = hidden_states + ffn_output
return hidden_states, attn_weights
def _attn(self, q, k, v, attention_mask=None, deterministic=True):
attn_weights = torch.matmul(q, k.transpose(-1, -2))
if attention_mask is None:
attn_weights = attn_weights + attention_mask
attn_weights = F.softmax(attn_weights, dim=-1)
if not deterministic:
attn_weights = self.resid_pdrop(attn_weights)
attn_output = torch.matmul(attn_weights, v)
return attn_output, attn_weights
|
AttentionGrid-main
|
AttentionGrid/attentions/blockwise_parallel/blockwise_attention_torch.py
|
import torch
from blockwise_attention_torch import BlockwiseParallel
#constants
MASK_VALUE = -1e10
batch_size= 8
sequence_length = 8192
hidden_size = 256
num_heads = 8
rotary_dim = 64
intermediate_size = 512
#random tensor
input_tensor = torch.randn(batch_size, sequence_length, hidden_size)
#create position_ids
position_ids = torch.arange(0, sequence_length, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)
#create an attention mask {optional}
attention_mask = torch.ones(batch_size, sequence_length, sequence_length)
attention_mask = (attention_mask - torch.eye(sequence_length)).unsqueeze(1).repeat(1, sequence_length, 1)
attention_mask = attention_mask.masked_fill(attention_mask==0, MASK_VALUE)
#init attention block
attention_block = BlockwiseParallel(hidden_size, num_heads, rotary_dim, intermediate_size)
#pass the input tensor through the attention block
output, attn_weights = attention_block(input_tensor, position_ids, attention_mask)
#print the output tensor and attention weights
print(f"Output tensor shape: {output.shape}")
print("Attention weights shape: {attn_weights.shape}")
|
AttentionGrid-main
|
AttentionGrid/attentions/blockwise_parallel/test.py
|
AttentionGrid-main
|
AttentionGrid/attentions/dilation_attention/__init__.py
|
|
import torch
import torch.nn as nn
from flash_attn.flash_attention import FlashMHA
# Replace this with your correct GPU device
device = "cuda:0"
dtype=torch.float16
class DilatedAttention(nn.Module):
def __init__(self, d_model, num_heads, dilation_rate, segment_size, dropout=0.0):
super(DilatedAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
self.dilation_rate = dilation_rate
self.segment_size = segment_size
self.attention = FlashMHA(embed_dim=d_model, num_heads=num_heads, device=device, dtype=dtype)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
batch_size, seq_len, _ = x.shape
# Split and sparsify
x = x.view(batch_size, -1, self.segment_size, self.d_model)
x = x[:, :, :: self.dilation_rate, :]
# Perform attention
attn_output, _ = self.attention(x, x, x)
#apply dropout
attn_output = self.dropout(attn_output)
# Scatter and concatenate
attn_output = attn_output.view(batch_size, -1, self.d_model)
return attn_output
class LongNet(nn.Module):
def __init__(self, d_model, num_heads, dilation_rates, segment_sizes):
super(LongNet, self).__init__()
assert len(dilation_rates) == len(segment_sizes), "dilation_rates and segment_sizes should have the same length"
self.d_model = d_model
self.num_heads = num_heads
self.dilation_rates = dilation_rates
self.segment_sizes = segment_sizes
self.dilated_attention_layers = nn.ModuleList(
[DilatedAttention(d_model, num_heads, dilation_rate, segment_size)]
for dilation_rate, segment_size in zip(dilation_rates, segment_sizes)
)
def forward(self, x):
#accumlate outputs from different layers
outputs = []
#process each dilated attention layer
for i in range(len(self.dilated_attention_layers)):
output = self.dilated_attention_layers[i](x)
outputs.append(output)
#combine the outputs
output = torch.sum(torch.stack(outputs), dim=0)
return output
|
AttentionGrid-main
|
AttentionGrid/attentions/dilation_attention/main.py
|
import torch
from AttentionGrid import fused_landmark_attention
# Define some input data
q = torch.randn(64, 12, 64)
k = torch.randn(64, 12, 64)
v = torch.randn(64, 12, 64)
is_mem = torch.randn(64, 12, 64)
# You can now use the fused landmark attention function in your code
output = fused_landmark_attention(q, k, v, is_mem)
|
AttentionGrid-main
|
examples/landmark_triton.py
|
from AttentionGrid import BlockwiseParallel
import torch
# Initialize the class
bp = BlockwiseParallel(
hidden_size=768,
num_heads=12,
rotary_dim=32,
intermediate_size=3072
)
# Suppose we have hidden_states, attention_mask, and position_ids as input data
hidden_states = torch.rand(1, 100, 768)
position_ids = torch.arange(100).unsqueeze(0)
# You can now apply the attention mechanism to your input data
output, attn_weights = bp(hidden_states, position_ids)
|
AttentionGrid-main
|
examples/blockwise_torch.py
|
from AttentionGrid import BlockwiseParallelJax
import jax.numpy as jnp
# Initialize the class
bpjax = BlockwiseParallelJax(
q_chunk_size=64,
k_chunk_size=64,
hidden_size=768,
num_heads=12,
rotary_dim=32,
intermediate_size=3072
)
# Suppose we have hidden_states, attention_mask, and position_ids as input data
hidden_states = jnp.random.rand(1, 100, 768)
attention_mask = jnp.random.rand(1, 1, 100, 100)
position_ids = jnp.arange(100).reshape(1, 100)
# You can now apply the attention mechanism to your input data
output = bpjax.forward(hidden_states, attention_mask, position_ids)
|
AttentionGrid-main
|
examples/blockwise_jax.py
|
from AttentionGrid import sparse_attn
# Define some hyperparameters
BATCH = 64
H = 12
n_ctx = 100
D_HEAD = 64
# You can now use the sparse attention function in your code
output = sparse_attn(num_buckets_or_sparsity=128, n_ctx=n_ctx, mode='fwd')
|
AttentionGrid-main
|
examples/dynamic_sparse_attention_triton.py
|
Gen1-main
|
gen1/__init__.py
|
|
Gen1-main
|
gen1/model.py
|
|
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from typing import Optional
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from packaging import version
from torch import Tensor, einsum, nn
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
# handle grouped multi-query attention
if kv_heads == 1:
k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v))
elif kv_heads < heads:
k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v))
# handle zero kv, as means for allowing network to attend to nothing
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates
|
Gen1-main
|
gen1/attend.py
|
import os
import requests
from requests.exceptions import RequestException
import time
from libgen_api import LibgenSearch
import zipfile
import threading
from ebooklib import epub
from PyPDF2 import PdfReader
# from pdfminer.high_level import extract_text
from bs4 import BeautifulSoup
import json
import gzip
def search_science_books(query, num_results=1000):
s = LibgenSearch()
results = s.search_title(query)
science_books = [book for book in results if "science" in book["Title"].lower()]
return science_books[:num_results]
def download_book(url, save_path, max_retries=3, timeout=10):
retries = 0
while retries < max_retries:
try:
response = requests.get(url, timeout=timeout)
with open(save_path, 'wb') as f:
f.write(response.content)
return
except RequestException:
print(f"Download failed, retrying {retries + 1}/{max_retries}")
retries += 1
time.sleep(2)
print(f"Failed to download {url} after {max_retries} retries.")
def extract_epub_content(file_path):
book = epub.read_epub(file_path)
content = []
for item in book.get_items():
if item.get_type() == epub.ITEM_DOCUMENT:
content.append(BeautifulSoup(item.get_content(), 'html.parser').get_text())
return '\n'.join(content)
def extract_pdf_content(file_path):
with open(file_path, 'rb') as f:
pdf = PdfReader(f)
content = []
for i in range(len(pdf.pages)):
content.append(pdf.pages[i].extract_text())
print(content)
return ''.join(content)
def extract_epub_metadata(file_path):
book = epub.read_epub(file_path)
return {
'title': book.get_metadata('DC', 'title')[0][0],
'authors': [author[0] for author in book.get_metadata('DC', 'creator')],
'language': book.get_metadata('DC', 'language')[0][0],
'publisher': book.get_metadata('DC', 'publisher')[0][0] if book.get_metadata('DC', 'publisher') else '',
'published': book.get_metadata('DC', 'date')[0][0] if book.get_metadata('DC', 'date') else '',
'content': extract_epub_content(file_path),
}
def extract_pdf_metadata(file_path):
with open(file_path, 'rb') as f:
pdf = PdfReader(f)
info = pdf.getDocumentInfo()
return {
'title': info.title,
'authors': [info.author] if info.author else [],
'language': '',
'publisher': info.producer if info.producer else '',
'published': info.creationDate if info.creationDate else '',
'content': extract_pdf_content(file_path),
}
def extract_metadata(file_path, extension):
if extension == 'epub':
return extract_epub_metadata(file_path)
elif extension == 'pdf':
return extract_pdf_metadata(file_path)
else:
return {}
def save_structured_data(structured_data, file_path):
with gzip.open(file_path, 'wt', encoding='utf-8') as f:
json.dump(structured_data, f, ensure_ascii=False, indent=4)
def process_book(book, download_directory, structured_data_directory):
s = LibgenSearch()
download_links = s.resolve_download_links(book)
download_url = download_links.get("GET")
if download_url:
save_path = os.path.join(download_directory, f"{book['ID']}.{book['Extension']}")
download_book(download_url, save_path)
print(f"Downloaded {book['Title']} by {book['Author']} to {save_path}")
metadata = extract_metadata(save_path, book["Extension"])
structured_data = {
"ID": book["ID"],
"Title": book["Title"],
"Authors": book["Author"].split(', '),
"Publisher": book["Publisher"],
"Year": book["Year"],
"Pages": book["Pages"],
"Language": book["Language"],
"Size": book["Size"],
"Extension": book["Extension"],
**metadata
}
print(structured_data)
structured_data_file = os.path.join(structured_data_directory, f"{book['ID']}.json.gz")
save_structured_data(structured_data, structured_data_file)
print(f"Saved structured data for {book['Title']} by {book['Author']} to {structured_data_file}")
s = LibgenSearch()
books = search_science_books("science", num_results=10)
download_directory = "downloads"
os.makedirs(download_directory, exist_ok=True)
structured_data_directory = "structured_data"
os.makedirs(structured_data_directory, exist_ok=True)
# Use threading to speed up the downloading and processing of books
threads = []
for book in books:
thread = threading.Thread(target=process_book, args=(book, download_directory, structured_data_directory))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print("Finished processing all books.")
|
all-books-on-libgen-master
|
libgen_api/harvest2.py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="libgen_api",
packages=["libgen_api"],
version="1.0.0",
description="Search Library genesis by Title or Author",
long_description_content_type="text/markdown",
long_description=long_description,
url="https://github.com/harrison-broadbent/libgen-api",
download_url="https://github.com/harrison-broadbent/libgen-api/archive/v1.0.0.tar.gz",
author="Harrison Broadbent",
author_email="mail@harrisonbroadbent.com",
license="MIT",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
keywords=["libgen search", "libgen api", "search libgen", "search library genesis"],
install_requires=["bs4", "requests", "lxml"],
)
|
all-books-on-libgen-master
|
libgen_api/setup.py
|
#example
#download books -> analyze them -> organize in a structured format[title, authors, content, metadata, published] -> each book with all data formats => json
import os
import requests
from requests.exceptions import RequestException
import time
from libgen_api import LibgenSearch
import zipfile
from ebooklib import epub
from PyPDF2 import PdfReader
import json
def search_science_books(query, num_results=100):
s = LibgenSearch()
results = s.search_title(query)
print(results)
science_books = [book for book in results if "science" in book["Title"].lower()]
print(science_books)
return science_books[:num_results]
# books = search_science_books("science")
# download_directory = "downloads"
# os.makedirs(download_directory, exist_ok=True)
# for book in books:
# download_links = s.resolve_download_links(book)
# download_url = download_links.get("GET")
# if download_url:
# save_path = os.path.join(download_directory, f"{book['ID']}.{book['Extension']}")
# download_book(download_url, save_path)
# print(f"Downloaded {book['Title']} by {book['Author']} to {save_path}")
# zip_filename = "science_books.zip"
# with zipfile.ZipFile(zip_filename, 'w') as zipf:
# for root, _, files in os.walk(download_directory):
# for file in files:
# file_path = os.path.join(root, file)
# zipf.write(file_path, os.path.relpath(file_path, download_directory))
# harvestor
def download_book(url, save_path, max_retries=3, timeout=10):
retries = 0
while retries < max_retries:
try:
response = requests.get(url, timeout=timeout)
with open(save_path, 'wb') as f:
f.write(response.content)
return
except RequestException:
print(f"Download failed, retrying {retries + 1}/{max_retries}")
retries += 1
time.sleep(2) #wait for 2 seconds before retrying
print(f"Failed to download {url} after {max_retries} retries.")
def extract_epub_content(file_path):
book = epub.read_epub(file_path)
content = []
for item in book.get_item():
if item.get_type() == epub.ITEM_DOCUMENT:
content.append(item.get_content().decode('utf-8'))
return ''.join(content)
def extract_pdf_content(file_path):
with open(file_path, 'rb') as f:
pdf = PdfReader()
content = []
for i in range(pdf.getNumPages()):
content.append(pdf.getPage(i).extract_text())
print(content)
return ''.join(content)
print(extract_pdf_content)
def extract_epub_metadata(file_path):
book = epub.read_epub(file_path)
return {
'title': book.get_metadata('DC', 'title')[0][0],
'authors': [author[0] for author in book.get_metadata('DC', 'creator')],
'language': book.get_metadata('DC', 'language')[0][0],
'publisher': book.get_metadata('DC', 'publisher')[0][0] if book.get_metadata('DC', 'publisher') else '',
'published': book.get_metadata('DC', 'date')[0][0] if book.get_metadata('DC', 'date') else '',
'content': extract_epub_content(file_path),
}
print(extract_epub_metadata)
def extract_pdf_metadata(file_path):
with open(file_path, 'rb') as f:
pdf = PdfFileReader(f)
info = pdf.getDocumentInfo()
return {
'title': info.title,
'authors': [info.author] if info.author else [],
'language': '',
'publisher': info.producer if info.producer else '',
'published': info.creationDate if info.creationDate else '',
'content': extract_pdf_content(file_path),
}
print(extract_pdf_metadata)
def extract_metadata(file_path, extension):
if extension == 'epub':
return extract_epub_metadata(file_path)
elif extension == 'pdf':
return extract_pdf_metadata(file_path)
else:
return {}
s = LibgenSearch()
books = search_science_books("science")
download_directory = "downloads"
os.makedirs(download_directory, exist_ok=True)
structured_data = []
structured_data_directory = "structured_data"
os.makedirs(structured_data_directory, exist_ok=True)
for book in books:
download_links = s.resolve_download_links(book)
download_url = download_links.get("GET")
if download_url:
save_path = os.path.join(download_directory, f"{book['ID']}.{book['Extension']}")
download_book(download_url, save_path)
print(f"Downloaded {book['Title']} by {book['Author']} to {save_path}")
metadata = extract_metadata(save_path, book["Extension"])
structured_data = {
"ID": book["ID"],
"Title": book["Title"],
"Authors": book["Author"].split(', '),
"Publisher": book["Publisher"],
"Year": book["Year"],
"Pages": book["Pages"],
"Language": book["Language"],
"Size": book["Size"],
"Extension": book["Extension"],
**metadata
}
print(structured_data)
structured_data_file = os.path.join(structured_data_directory, f"{book['ID']}.json")
with open(structured_data_file, 'w', encoding='utf-8') as f:
json.dump(structured_data, f, ensure_ascii=False, indent=4)
print(f"Saved structured data for {book['Title']} by {book['Author']} to {structured_data_file}")
|
all-books-on-libgen-master
|
libgen_api/harvest.py
|
import pytest
from libgen_api.libgen_search import LibgenSearch
title = "Pride and Prejudice"
author = "Agatha Christie"
ls = LibgenSearch()
class TestBasicSearching:
def test_title_search(self):
titles = ls.search_title(title)
first_result = titles[0]
assert title in first_result["Title"]
def test_author_search(self):
titles = ls.search_author(author)
first_result = titles[0]
assert author in first_result["Author"]
def test_title_filtering(self):
title_filters = {"Year": "2007", "Extension": "epub"}
titles = ls.search_title_filtered(title, title_filters, exact_match=True)
first_result = titles[0]
assert (title in first_result["Title"]) & fields_match(
title_filters, first_result
)
def test_author_filtering(self):
author_filters = {"Language": "German", "Year": "2009"}
titles = ls.search_author_filtered(author, author_filters, exact_match=True)
first_result = titles[0]
assert (author in first_result["Author"]) & fields_match(
author_filters, first_result
)
# explicit test of exact filtering
# should return no results as they will all get filtered out
def test_exact_filtering(self):
exact_filters = {"Extension": "PDF"}
# if exact_match = True, this will filter out all results as
# "pdf" is always written lower case on Library Genesis
titles = ls.search_author_filtered(author, exact_filters, exact_match=True)
assert len(titles) == 0
def test_non_exact_filtering(self):
non_exact_filters = {"Extension": "PDF"}
titles = ls.search_author_filtered(author, non_exact_filters, exact_match=False)
first_result = titles[0]
assert (author in first_result["Author"]) & fields_match(
non_exact_filters, first_result, exact=False
)
def test_non_exact_partial_filtering(self):
partial_filters = {"Extension": "p", "Year": "200"}
titles = ls.search_title_filtered(title, partial_filters, exact_match=False)
first_result = titles[0]
assert (title in first_result["Title"]) & fields_match(
partial_filters, first_result, exact=False
)
def test_exact_partial_filtering(self):
exact_partial_filters = {"Extension": "p"}
titles = ls.search_title_filtered(
title, exact_partial_filters, exact_match=True
)
assert len(titles) == 0
def test_resolve_download_links(self):
titles = ls.search_author(author)
title_to_download = titles[0]
dl_links = ls.resolve_download_links(title_to_download)
# ensure each host is in the results and that they each have a url
assert (["GET", "Cloudflare", "IPFS.io", "Infura"] == list(dl_links.keys())) & (
False not in [len(link) > 0 for key, link in dl_links.items()]
)
# should return an error if search query is less than 3 characters long
def test_raise_error_on_short_search(self):
with pytest.raises(Exception):
titles = ls.search_title(title[0:2])
####################
# Helper Functions #
####################
# Check object fields for equality -
# -> Returns True if they match.
# -> Returns False otherwise.
#
# when exact-True, fields are checked strictly (==).
#
# when exact=False, fields are normalized to lower case,
# and checked whether filter value is a subset of the response.
def fields_match(filter_obj, response_obj, exact=True):
for key, value in filter_obj.items():
if exact is False:
value = value.lower()
response_obj[key] = response_obj[key].lower()
if value not in response_obj[key]:
return False
elif response_obj[key] != value:
return False
return True
|
all-books-on-libgen-master
|
libgen_api/test/test_pytest.py
|
"""
Basic testing script for libgen-api.
Runs through a number of searches using different parameters, outputs results to terminal.
Run -
python3 test.py
"""
from libgen_api.libgen_search import LibgenSearch
import json
title = "Pride and Prejudice"
author = "Agatha Christie"
# helper function to print first title if it exists.
def print_results(titles_array):
print(json.dumps(titles_array[0], indent=1) if len(titles_array) else "No results.")
print("\n\n--- END OF OUTPUT ---\n\n")
# test title search
# should print a result for the book specified at the top of the file.
t = LibgenSearch()
print("\n>>>\tSearching for title: " + title)
titles = t.search_title(title)
print_results(titles)
# test author search
# should print a result for the author specified at the top of the file.
a = LibgenSearch()
print("\n>>>\tSearching for author: " + author)
titles = a.search_author(author)
print_results(titles)
# test title filtering
# should print a result for the book specified at the top of the file,
# conforming to the title_filters below.
tf = LibgenSearch()
title_filters = {"Year": "2007", "Extension": "epub"}
print(
"\n>>>\tSearching for title: "
+ title
+ " with filters --- "
+ ", ".join([":".join(i) for i in title_filters.items()])
)
titles = tf.search_title_filtered(title, title_filters, exact_match=True)
print_results(titles)
# test author filtering
# should print a result for the author specified at the top of the file,
# conforming to the title_filters below.
af = LibgenSearch()
author_filters = {"Language": "German", "Year": "2009"}
print(
"\n>>>\tSearching for author: "
+ author
+ " with filters --- "
+ ", ".join([":".join(i) for i in author_filters.items()])
)
titles = af.search_author_filtered(author, author_filters, exact_match=True)
print_results(titles)
# test exact filtering explicitly (using an Author search)
# should print no results as the filter exclude all results.
afe = LibgenSearch()
exact_filters = {
"Extension": "PDF"
} # if exact_match = True, all results get filtered as "pdf" is always written lower case
print(
"\n>>>\tSearching for author: "
+ author
+ " with filters --- "
+ ", ".join([":".join(i) for i in exact_filters.items()])
+ " & exact_match == True"
)
titles = afe.search_author_filtered(author, exact_filters, exact_match=True)
print_results(titles)
# test non-exact filtering (using an Author search)
# should print a result for the author specified at the top of the file,
# conforming to the title_filters below.
afne = LibgenSearch()
non_exact_filters = {
"Extension": "PDF"
} # if exact_match = True, all results get filtered as "pdf" is always written lower case
print(
"\n>>>\tSearching for author: "
+ author
+ " with filters --- "
+ ", ".join([":".join(i) for i in non_exact_filters.items()])
+ " & exact_match == FALSE"
)
titles = afne.search_author_filtered(author, non_exact_filters, exact_match=False)
print_results(titles)
# test partial filtering (using a Title)
# should print a result for the title specified at the top of the file,
# conforming to the non_exact_filter below, with non-exact matching.
tfpne = LibgenSearch()
partial_filters = {"Extension": "p", "Year": "200"}
print(
"\n>>>\tSearching for title: "
+ title
+ " with filters --- "
+ ", ".join([":".join(i) for i in partial_filters.items()])
+ " & exact_match == False"
)
titles = tfpne.search_title_filtered(title, partial_filters, exact_match=False)
print_results(titles)
# test partial filtering (using a Title)
# should return nothing as the extension is not an exact match to an existing one (ie. "pdf")
tfpe = LibgenSearch()
exact_partial_filters = {"Extension": "p"}
print(
"\n>>>\tSearching for title: "
+ title
+ " with filters --- "
+ ", ".join([":".join(i) for i in exact_partial_filters.items()])
+ " & exact_match == True"
)
titles = tfpe.search_title_filtered(title, exact_partial_filters, exact_match=True)
print_results(titles)
# test resolving of mirror links
# should print a populated hash of source:download_link pairs
arml = LibgenSearch()
print("\n>>>\tSearching for title: " + title + " and resolving download links")
# Author hard-coded so that it pairs with title (currently pride and prejudice)
titles = arml.search_author("Jane Austen")
item_to_download = titles[0]
download_links = arml.resolve_download_links(item_to_download)
print_results([download_links])
|
all-books-on-libgen-master
|
libgen_api/test/manualtesting.py
|
from .search_request import SearchRequest
import requests
from bs4 import BeautifulSoup
MIRROR_SOURCES = ["GET", "Cloudflare", "IPFS.io", "Infura"]
class LibgenSearch:
def search_title(self, query):
search_request = SearchRequest(query, search_type="title")
return search_request.aggregate_request_data()
def search_author(self, query):
search_request = SearchRequest(query, search_type="author")
return search_request.aggregate_request_data()
def search_title_filtered(self, query, filters, exact_match=True):
search_request = SearchRequest(query, search_type="title")
results = search_request.aggregate_request_data()
filtered_results = filter_results(
results=results, filters=filters, exact_match=exact_match
)
return filtered_results
def search_author_filtered(self, query, filters, exact_match=True):
search_request = SearchRequest(query, search_type="author")
results = search_request.aggregate_request_data()
filtered_results = filter_results(
results=results, filters=filters, exact_match=exact_match
)
return filtered_results
def resolve_download_links(self, item):
mirror_1 = item["Mirror_1"]
page = requests.get(mirror_1)
soup = BeautifulSoup(page.text, "html.parser")
links = soup.find_all("a", string=MIRROR_SOURCES)
download_links = {link.string: link["href"] for link in links}
return download_links
def filter_results(results, filters, exact_match):
"""
Returns a list of results that match the given filter criteria.
When exact_match = true, we only include results that exactly match
the filters (ie. the filters are an exact subset of the result).
When exact-match = false,
we run a case-insensitive check between each filter field and each result.
exact_match defaults to TRUE -
this is to maintain consistency with older versions of this library.
"""
filtered_list = []
if exact_match:
for result in results:
# check whether a candidate result matches the given filters
if filters.items() <= result.items():
filtered_list.append(result)
else:
filter_matches_result = False
for result in results:
for field, query in filters.items():
if query.casefold() in result[field].casefold():
filter_matches_result = True
else:
filter_matches_result = False
break
if filter_matches_result:
filtered_list.append(result)
return filtered_list
|
all-books-on-libgen-master
|
libgen_api/libgen_api/libgen_search.py
|
all-books-on-libgen-master
|
libgen_api/libgen_api/libgen_api_helpers.py
|
|
from .search_request import SearchRequest
from .libgen_search import LibgenSearch
|
all-books-on-libgen-master
|
libgen_api/libgen_api/__init__.py
|
import requests
from bs4 import BeautifulSoup
# WHY
# The SearchRequest module contains all the internal logic for the library.
#
# This encapsulates the logic,
# ensuring users can work at a higher level of abstraction.
# USAGE
# req = search_request.SearchRequest("[QUERY]", search_type="[title]")
class SearchRequest:
col_names = [
"ID",
"Author",
"Title",
"Publisher",
"Year",
"Pages",
"Language",
"Size",
"Extension",
"Mirror_1",
"Mirror_2",
"Mirror_3",
"Mirror_4",
"Mirror_5",
"Edit",
]
def __init__(self, query, search_type="title"):
self.query = query
self.search_type = search_type
if len(self.query) < 3:
raise Exception("Query is too short")
def strip_i_tag_from_soup(self, soup):
subheadings = soup.find_all("i")
for subheading in subheadings:
subheading.decompose()
def get_search_page(self):
query_parsed = "%20".join(self.query.split(" "))
if self.search_type.lower() == "title":
search_url = (
f"http://gen.lib.rus.ec/search.php?req={query_parsed}&column=title"
)
elif self.search_type.lower() == "author":
search_url = (
f"http://gen.lib.rus.ec/search.php?req={query_parsed}&column=author"
)
search_page = requests.get(search_url)
return search_page
def aggregate_request_data(self):
search_page = self.get_search_page()
soup = BeautifulSoup(search_page.text, "lxml")
self.strip_i_tag_from_soup(soup)
# Libgen results contain 3 tables
# Table2: Table of data to scrape.
information_table = soup.find_all("table")[2]
# Determines whether the link url (for the mirror)
# or link text (for the title) should be preserved.
# Both the book title and mirror links have a "title" attribute,
# but only the mirror links have it filled.(title vs title="libgen.io")
raw_data = [
[
td.a["href"]
if td.find("a")
and td.find("a").has_attr("title")
and td.find("a")["title"] != ""
else "".join(td.stripped_strings)
for td in row.find_all("td")
]
for row in information_table.find_all("tr")[
1:
] # Skip row 0 as it is the headings row
]
output_data = [dict(zip(self.col_names, row)) for row in raw_data]
return output_data
|
all-books-on-libgen-master
|
libgen_api/libgen_api/search_request.py
|
from setuptools import setup, find_packages
setup(
name = 'tree-of-thoughts',
packages = find_packages(exclude=[]),
version = '0.3.6',
license='MIT',
description = 'Tree of Thoughts - Pytorch',
author = 'Kye Gomez',
author_email = 'kye@apac.ai',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/tree-of-thoughts',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers',
"Prompt Engineering"
],
install_requires=[
'guidance',
'openai',
'transformers',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
tree-of-thoughts-main
|
setup.py
|
import os
from tree_of_thoughts.models.openai_models import OpenAILanguageModel
from tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts
api_model= "gpt-3.5-turbo"
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAILanguageModel(api_key=api_key, api_model=api_model)
# Initialize the MonteCarloTreeofThoughts class with the model
tree_of_thoughts = MonteCarloTreeofThoughts(model)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
initial_prompt = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
num_thoughts = 1
max_steps = 3
max_states = 4
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(
initial_prompt=initial_prompt,
num_thoughts=num_thoughts,
max_steps=max_steps,
max_states=max_states,
pruning_threshold=pruning_threshold,
# sleep_time=sleep_time
)
print(f"Solution: {solution}")
|
tree-of-thoughts-main
|
example.py
|
from abc import ABC, abstractmethod
import openai
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key):
openai.api_key = api_key
def generate_thoughts(self, state, k):
#implement logic
pass
def evaluate_states(self, states):
pass
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, thought_decomposition, thought_generator, state_evaluator, search_algorithm):
self.model = model
self.thought_decomposition = thought_decomposition
self.thought_generator = thought_generator
self.state_evaluator = state_evaluator
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth):
if self.search_algorithm == 'BFS':
return self.tot_bfs(x, k, T, b)
elif self.search_algorithm == 'DFS':
return self.tot_dfs(x, k, T, vth)
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.thought_generator(self.model, s, k)}
Vt = self.state_evaluator(self.model, S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.thought_generator(self.model, max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.thought_generator(self.model, s, 1))
return
for s_prime in sorted(self.thought_generator(self.model, s, k)):
if self.state_evaluator(self.model, {s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
return output
# usage
openai_model = OpenAILanguageModel('sk')
#choose search algorithm('bfs or 'dfs)
search_algorithm = "BFS"
#create an instance of the tree of thoughts class
tree_of_thoughts= TreeofThoughts(openai_model, search_algorithm)
|
tree-of-thoughts-main
|
experiements/treeofthoughts-v1.py
|
import concurrent.futures
from abc import ABC, abstractmethod
import openai
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value"):
openai.api_key = api_key
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def generate_thoughts(self, state, k):
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', generate {k} coherent thoughts to continue the reasoning process:"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=k,
max_tokens=50,
stop=None,
temperature=0.5,
)
thoughts = [choice.text.strip() for choice in response.choices]
print(thoughts)
return thoughts
def evaluate_states(self, states):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1:"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=10,
stop=None,
temperature=0.5,
)
try:
# print(response.choices[0].text.strip())
value = float(response.choices[0].text.strip())
print(value)
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state:\n{states_text}\n\nVote:"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=50,
stop=None,
temperature=0.5,
)
best_state_text = response.choices[0].text.strip()
print(best_state_text)
best_state = tuple(best_state_text.split())
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True):
super().__init__(api_key, strategy, evaluation_strategy)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(thoughts)
return thoughts
def parallel_evaluate_states(self, states):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states))
print(state_values)
return state_values
# model = OptimizedOpenAILanguageModel('your_openai_api_key_here')
#update tree of thoughts to use optimized models mehtods
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth):
if self.search_algorithm == 'BFS':
return self.tot_bfs(x, k, T, b)
elif self.search_algorithm == 'DFS':
return self.tot_dfs(x, k, T, vth)
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
Vt = self.model.evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.model.generate_thoughts(s, 1))
return
for s_prime in sorted(self.model.generate_thoughts(s, k)):
if self.model.evaluate_states({s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
return output
#does not output state after each thought --- idk why -- needs work
class OptimizedTreeofThoughts(TreeofThoughts):
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
Vt = self.model.parallel_evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.model.generate_thoughts(s, 1))
return
for s_prime in sorted(self.model.generate_thoughts(s, k)):
if self.model.evaluate_states({s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
return output
search_algorithm = "DFS"
strategy = "cot"
evaluation_strategy="value"
#create instance
model = OptimizedOpenAILanguageModel('')
tree_of_thoughts = OptimizedTreeofThoughts(model, search_algorithm)
input_problem = "What are the best reasoning methods to advance Large Language Models"
k = 5
T = 3
b = 5
vth = 0.5
#call the solve emthod with the input problem and other params
solution = tree_of_thoughts.solve(input_problem, k, T, b, vth)
#use the solution in env
print(solution)
|
tree-of-thoughts-main
|
experiements/hyperoptimized.py
|
import os
import json
import itertools
import argparse
import numpy as np
from functools import partial
from models import gpt, gpt_usage
from tasks import get_task
class CustomLanguageModel:
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
# Implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
# Implement state evaluation logic using self.model
pass
class TreeofThoughtsV2:
def __init__(self, args, model):
self.args = args
self.model = CustomLanguageModel(model)
def get_value(self, task, x, y, n_evaluate_sample, cache_value=True):
value_prompt = task.value_prompt_wrap(x, y)
if cache_value and value_prompt in task.value_cache:
return task.value_cache[value_prompt]
value_outputs = gpt(value_prompt, n=n_evaluate_sample, stop=None)
value = task.value_outputs_unwrap(x, y, value_outputs)
if cache_value:
task.value_cache[value_prompt] = value
return value
def get_values(self, task, x, ys, n_evaluate_sample, cache_value=True):
values = []
local_value_cache = {}
for y in ys: # each partial output
if y in local_value_cache: # avoid duplicate candidates
value = 0
else:
value = get_value(task, x, y, n_evaluate_sample, cache_value=cache_value)
local_value_cache[y] = value
values.append(value)
return values
def get_votes(self, task, x, ys, n_evaluate_sample):
# ...
def get_proposals(self, task, x, y):
# ...
def get_samples(self, task, x, y, n_generate_sample, prompt_sample, stop):
# ...
def solve(self, task, idx, to_print=True):
# ...
def naive_solve(self, task, idx, to_print=True):
# ...
def run(self):
def parse_args():
args = argparse.ArgumentParser()
args.add_argument('--backend', type=str, choices=['gpt-4', 'gpt-3.5-turbo'], default='gpt-4')
args.add_argument('--temperature', type=float, default=0.7)
args.add_argument('--task', type=str, required=True, choices=['game24', 'text', 'crosswords'])
args.add_argument('--task_file_path', type=str, required=True)
args.add_argument('--task_start_index', type=int, default=900)
args.add_argument('--task_end_index', type=int, default=1000)
args.add_argument('--naive_run', action='store_true')
args.add_argument('--prompt_sample', type=str, choices=['standard', 'cot']) # only used when method_generate = sample, or naive_run
args.add_argument('--method_generate', type=str, choices=['sample', 'propose'])
args.add_argument('--method_evaluate', type=str, choices=['value', 'vote'])
args.add_argument('--method_select', type=str, choices=['sample', 'greedy'])
args.add_argument('--n_generate_sample', type=int, default=1) # only thing needed if naive_run
args.add_argument('--n_evaluate_sample', type=int, default=1)
args.add_argument('--n_select_sample', type=int, default=1)
args = args.parse_args()
return args
|
tree-of-thoughts-main
|
experiements/treeofthoughtsv2.py
|
import concurrent.futures
from abc import ABC, abstractmethod
import openai
import os
import guidance
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
import json
DATA_PATH = './data'
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class HuggingLanguageModel(AbstractLanguageModel):
def __init__(self, model_name, model_tokenizer=None, verbose=False):
self.model = AutoModelForCausalLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_tokenizer or model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve {state_text}"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = '\n'.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, num_return_sequences=1, max_length=max_length)
value_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
value = float(value_text)
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
class HFPipelineModel(AbstractLanguageModel):
def __init__(self, model_name, verbose=False):
self.pipeline = pipeline("text-generation", model=model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(input_ids=inputs["input_ids"], max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: \n\n\n'{state_text}'\n\n\n, pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
generated_outputs = self.pipeline(prompt, max_length=max_length, num_return_sequences=1)
value_text = generated_outputs[0]["generated_text"]
value = float(value_text)
print(f'value {value}')
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
def load(model_nmae, verbose=False):
return HFPipelineModel(model_name, verbose)
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=True):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'."
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=400,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
with open("openai.logs", 'a') as log_file:
log_file.write("Response : "+ text+"\n\n\n")
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
return thoughts
def generate_thoughts(self, state, k):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f"Given the current state of reasoning: \n\n\n'{state_text}'\n\n\nGenerate the next best coherent thought to achieve the reasoning process and get the solution: "
prompt += self.ReAct_prompt
print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
prompt = f"Given the following reasoning: \n\n\n'{state_text}'\n Give me the best solution you can think of the task : {initial_prompt}"
answer = self.generate_text(prompt, 1)
# print(thoughts)
print(f"General solution : {answer}")
return answer
def evaluate_states(self, states, initial_prompt):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {initial_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
print(f'state: {value_text}')
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {initial_prompt} and become very pessimistic very NOTHING ELSE"
response = self.openai_api_call_handler(prompt, 50, 1)
print(f'state response: {response}')
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
print(f'best_state: {best_state}')
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states, initial_prompt):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states, initial_prompt))
print(f"Parallel evaluated state values: {state_values}")
return state_values
class GuidanceLanguageModel(AbstractLanguageModel):
def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False):
# gpt4 = guidance.llms.OpenAI("gpt-4")
# vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto")
self.model = model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = '''{{#assistant~}}
{{gen 'Observation' temperature=0.5 max_tokens=50}}
{{~/assistant}}'''
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.thoughts_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Generate {{k}} coherent thoughts as short as possible to continue the reasoning process.
Don't answer the question yet.
{{~/user}}
%s
{{#assistant~}}
{{gen 'Thoughts' temperature=0.5 max_tokens=50}}
{{~/assistant}}
''' % self.ReAct_prompt, llm=self.model)
self.value_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Evaluate its value as a float between 0 and 1, and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Value' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
self.vote_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the following states of reasoning, vote for the best state:
{{states_text}}
Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Vote' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
def model_response_handler(self, program, **kargs):
print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)")
reponse = program(**kargs)
return reponse
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
state_text = ' '.join(state)
thoughts = []
for _ in range(k):
response = self.model_response_handler(self.thoughts_program, state_text=state_text, k=1)
text = response['Thoughts']
thoughts += [text]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
#implement state evaluation logic using self.model
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
response = self.model_response_handler(self.value_program, state_text=state_text)
try:
value_text = response['Value']
print(f"Value text {value_text}")
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
response = self.model_response_handler(self.vote_program, states_text=states_text)
best_state_text = response['Vote']
print(f"Best state text: {best_state_text}")
best_state = int(best_state_text)
return {state: 1 if i == best_state else 0 for i in range(len(states))}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class GuidanceOpenAILanguageModel(GuidanceLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting)
def model_response_handler(self, program, **kargs):
error_msg = ''
while True:
try:
program.llm.max_retries = 60
guidance.llms.OpenAI.cache.clear()
response = program(**kargs)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
except Exception as e:
if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''':
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
else:
error_msg = str(e)
break
raise Exception(error_msg)
# #------------------------------------> v0 end
# class TreeofThoughts:
# """
# 1. Thought Decomposition --> based on problem properties
# 2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
# sequentially using a propose prompt
# 3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
# 4. Choose a search algo based on tree structure [BFS or DFS]
# Implement chosen search algorithm for bfs (algo1):
# init S0 with the input x
# for t = 1 to T (step limit):
# generate candidate thoughts for each state in St-1
# eveluate the candiate states using the state evaluator V
# select the b most promising states for St
# return the final output by genertaing the thought for the best state in St for DFS(algo2)
# defien a recurseive DFS function with the current state s, step t, and other required params
# if t > T record the output by generating the thought for current state S
# for each candidate state s in the sorted list of generated thoughts for s:
# if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
# with s and t + 1
# execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
# """
# def __init__(self, model, search_algorithm):
# self.model = model
# self.search_algorithm = search_algorithm
# def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# start_time = time.time()
# if self.search_algorithm == 'BFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_bfs(x, k, T, b)
# if result:
# return result
# elif self.search_algorithm == 'DFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_dfs(x, k, T, vth)
# if result:
# return result
# else:
# raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# def tot_bfs(self, x, k, T, b):
# S0 = {x}
# for t in range(1, T + 1):
# S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
# Vt = self.model.evaluate_states(S0_t, x)
# St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# S0 = set(St)
# print(f'S0L {S0}')
# return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# def tot_dfs(self, x, k, T, vth, pruning_threshold=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# output = []
# iteration_count = 0
# consecutive_convergence_count = 0
# prev_best_value = None
# def dfs(s, t):
# nonlocal consecutive_convergence_count, prev_best_value, iteration_count
# if t > T:
# thought = self.model.generate_thouhts(s, 1)
# value = self.model.evaluate_states({s}, x)[s]
# print(f'thought {thought} and value: {value}')
# output.append((thought, value))
# if confidence_threshold is not None and value >= confidence_threshold:
# return True
# if prev_best_value is not None and convergence_threshold is not None:
# if abs(value - prev_best_value) < convergence_threshold:
# consecutive_convergence_count += 1
# else:
# consecutive_convergence_count = 0
# prev_best_value = value
# iteration_count += 1
# if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
# return True
# return False
# for s_prime in sorted(self.model.generate_thoughts(s, k)):
# state_value = self.model.evaluate_states({s_prime}, x)[s_prime]
# if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
# if dfs((*s, s_prime), t + 1):
# return True
# return False
# dfs(x, 1)
# return max(output, key=lambda x: x[1]) if output else None
# #------------------------------------> v0 end
# #v1
# #------------------------------------> v1 start
# class OptimizedTreeofThoughts(TreeofThoughts):
# # def tot_bfs(self, x, k, T, b):
# # S0 = {x}
# # for t in range(1, T + 1):
# # S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
# # Vt = self.model.parallel_evaluate_states(S0_t)
# # St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# # S0 = set(St)
# # return self.model.parallel_generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# # def tot_dfs(self, x, k, T, vth):
# # output = []
# # def dfs(s, t):
# # if t > T:
# # output.append(self.model.parallel_generate_thoughts(s, 1))
# # return
# # for s_prime in sorted(self.model.parallel_generate_thoughts(s, k)):
# # if self.model.parallel_evaluate_states({s_prime})[s_prime] > vth:
# # dfs((*s, s_prime), t + 1)
# # dfs(x, 1)
# # return output
# def tot_bfs(self, x, k, T, b):
# S0 = {x}
# for t in range(1, T + 1):
# S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
# Vt = self.model.parallel_evaluate_states(S0_t)
# St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# S0 = set(St)
# return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# def tot_dfs(self, x, k, T, vth):
# output = []
# def dfs(s, t):
# if t > T:
# output.append(self.model.generate_thoughts(s, 1))
# return
# for s_prime in sorted(self.model.generate_thoughts(s, k)):
# if self.model.evaluate_states({s_prime})[s_prime] > vth:
# dfs((*s, s_prime), t + 1)
# dfs(x, 1)
# return output
# def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# start_time = time.time()
# if self.search_algorithm == 'BFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_bfs(x, k, T, b)
# print(f'result: {result}')
# if result:
# return result
# elif self.search_algorithm == 'DFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_dfs(x, k, T, vth,
# confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
# print(f'result: {result}')
# if result:
# return result
# else:
# raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# #------------------------------------> v1 end
# #v2
# # class OptimizedTreeofThoughts(TreeofThoughts):
# # def __init__(self, model, search_algorithm):
# # super().__init__(model, search_algorithm)
# # self.model = model
# # self.tree = {
# # "thoughts": [],
# # "states": []
# # }
# # self.metrics = {
# # "x": [],
# # "vth": [],
# # # "evaluations "
# # "b": [],
# # "k": [],
# # "new_best_value": [],
# # "result": [],
# # "search_algorithm": search_algorithm
# # }
# # def parallel_generate_thoughts(self, states, k):
# # with concurrent.futures.ThreadPoolExecutor() as executor:
# # thoughts = list(executor.map(lambda state: self.model.generate_thoughts(state, k), states))
# # print(f"Parallel generated thoughts: {thoughts}")
# # return thoughts
# # def parallel_evaluate_states(self, states, initial_prompt):
# # with concurrent.futures.ThreadPoolExecutor() as executor:
# # state_values = list(executor.map(lambda state: self.model.evaluate_states(state, initial_prompt), states))
# # print(f"Parallel evaluated state values: {state_values}")
# # return state_values
# # def tot_bfs(self, x, k, T, b):
# # S0 = {x}
# # for t in range(1, T + 1):
# # S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
# # Vt = self.model.parallel_evaluate_states(S0_t)
# # St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# # S0 = set(St)
# # return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# # def tot_dfs(self, x, k, T, vth):
# # output = []
# # def dfs(s, t):
# # if t > T:
# # output.append(self.model.generate_thoughts(s, 1))
# # return
# # for s_prime in sorted(self.model.generate_thoughts(s, k)):
# # if self.model.evaluate_states({s_prime})[s_prime] > vth:
# # dfs((*s, s_prime), t + 1)
# # dfs(x, 1)
# # return output
# # def save_tree_to_json(self, file_name):
# # output_data = {
# # "tree": self.tree,
# # "metrics": self.metrics
# # }
# # os.makedirs(os.path.dirname(file_name), exist_ok=True)
# # with open(file_name, 'w') as json_file:
# # json.dump(output_data, json_file, indent=4)
# # def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# # start_time = time.time()
# # file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
# # try:
# # if self.search_algorithm == 'BFS':
# # while timeout is None or time.time() - start_time < timeout:
# # result = self.tot_bfs(x, k, T, b)
# # print(f'result: {result}')
# # if result:
# # self.metrics["result"].append(result)
# # self.metrics["x"].append(x)
# # self.metrics["k"].append(k)
# # self.metrics["T"].append(T)
# # self.metrics["b"].append(b)
# # self.metrics["vth"].append(vth)
# # self.save_tree_to_json(file_name)
# # return result
# # elif self.search_algorithm == 'DFS':
# # while timeout is None or time.time() - start_time < timeout:
# # result = self.tot_dfs(x, k, T, vth, dynamic_pruning=True, early_stopping=True, early_stopping_threshold=0.001)
# # print(f'result: {result}')
# # if result:
# # self.metrics["result"].append(result)
# # self.metrics["x"].append(x)
# # self.metrics["k"].append(k)
# # self.metrics["T"].append(T)
# # self.metrics["b"].append(b)
# # self.metrics["vth"].append(vth)
# # self.save_tree_to_json(file_name)
# # return result
# # else:
# # raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# # except KeyboardInterrupt:
# # print("Keyboard interrupt detected.")
# # finally:
# # print("Saving the current tree and metrics.")
# # self.save_tree_to_json(file_name)
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
self.tree = {
"nodes": [],
"metrics": {
"thoughts": [],
"evaluations": []
}
}
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
try:
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
if result:
self.save_tree_to_json(file_name)
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth)
if result:
self.save_tree_to_json(file_name)
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
except KeyboardInterrupt:
print("Keyboard interrupt detected.")
finally:
print("Saving the current tree and metrics.")
self.save_tree_to_json(file_name)
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = set()
for s in S0:
for z in self.model.generate_thoughts(s, k):
if (type(s) == str):
S0_t.add((s, z))
else:
S0_t.add((*s, z))
Vt = self.model.evaluate_states(S0_t, x)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
# print(f'S0_t: {S0_t} Vt: {Vt} St: {St} S0: {S0}')
#store thoughts evaluations and parent nodes in a json file
for s in S0_t:
self.tree['nodes'].append(s)
self.tree["metrics"]["thoughts"].append(s[-1])
self.tree["metrics"]["evaluations"].append(Vt[s])
best_state = max(St, key=lambda s: Vt[s])
solution = self.model.generate_solution(x, best_state)
return solution
def tot_dfs(self, x, k, T, vth, pruning_threshold=0.5, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
output = []
iteration_count = 0
consecutive_convergence_count = 0
prev_best_value = None
def dfs(s, t):
nonlocal consecutive_convergence_count, prev_best_value, iteration_count
if t > T:
thought = self.model.generate_thoughts(s, 1)
value = self.model.evaluate_states({s}, x)[s]
output.append((thought, value))
if confidence_threshold is not None and value >= confidence_threshold:
return True
if prev_best_value is not None and convergence_threshold is not None:
if abs(value - prev_best_value) < convergence_threshold:
consecutive_convergence_count += 1
else:
consecutive_convergence_count = 0
prev_best_value = value
iteration_count += 1
if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
return True
return False
for s_prime in sorted(self.model.generate_thoughts(s, k)):
state_value = self.model.evaluate_states({s_prime}, x)[s_prime]
if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
if dfs((*s, s_prime), t + 1):
return True
return False
dfs(x, 1)
return max(output, key=lambda x: x[1]) if output else None
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
#does not output state after each thought --- idk why -- needs work
class OptimizedTreeofThoughts(TreeofThoughts):
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
print(f'Start time {start_time}')
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
print(f'resultttt in optimized tree of thoughts: {result}')
if result:
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
if result:
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
if __name__ == '__main__':
search_algorithm = "DFS"
strategy = "cot"
evaluation_strategy="vote"
#create instance
model = OptimizedOpenAILanguageModel('', api_model="gpt-3.5-turbo")
tree_of_thoughts = TreeofThoughts(model, search_algorithm)
input_problem = "use 4 numbers and basic arithmetic operations (+-*/) to obtain 24"
k = 1#number of thoughts to input
T = 5 # maximum depth of the search tree
b = 14 # branching factor -< number of child nodes for each branch
vth = 0.8 # pruning state -> any evaluated thought below this is eliminated
timeout = 10 #10 seconds timeout before stop
confidence = 0.8 #cmodel is confident on performance
max_iterations = 40 #tree branch nodes
convergence_threshold = 0.01 #determining when the search process has converged
convergence_count = 5 # number of searchers to be considered converged
#read documentation for more
#call the solve emthod with the input problem and other params
solution = tree_of_thoughts.solve(input_problem, k, T, b, vth, timeout, confidence, max_iterations, convergence_threshold, convergence_count)
#use the solution in yes
print(f"solution: {solution}")
# # Save the tree and metrics to a JSON file
# file_name = "logs/tree_of_thoughts_output.json"
# tree_of_thoughts.save_tree_to_json(file_name)
|
tree-of-thoughts-main
|
experiements/latest.py
|
class TreeofThoughts:
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth):
if self.search_algorithm == 'BFS':
return self.tot_bfs(x, k, T, b)
elif self.search_algorithm == 'DFS':
return self.tot_dfs(x, k, T, vth)
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
Vt = self.model.evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.model.generate_thoughts(s, 1))
return
for s_prime in sorted(self.model.generate_thoughts(s, k)):
if self.model.evaluate_states({s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
return output
|
tree-of-thoughts-main
|
experiements/first.py
|
import concurrent.futures
from abc import ABC, abstractmethod
import openai
import os
import guidance
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
import json
DATA_PATH = './data'
import logging
import argparse
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class HuggingLanguageModel(AbstractLanguageModel):
def __init__(self, model_name, model_tokenizer=None, verbose=False):
self.model = AutoModelForCausalLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_tokenizer or model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve {state_text}"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, num_return_sequences=1, max_length=max_length)
value_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
value = float(value_text)
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
class HFPipelineModel(AbstractLanguageModel):
def __init__(self, model_name, verbose=False):
self.model_name = model_name
self.pipeline = pipeline("text-generation", model=model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(input_ids=inputs["input_ids"], max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
generated_outputs = self.pipeline(prompt, max_length=max_length, num_return_sequences=1)
value_text = generated_outputs[0]["generated_text"]
value = float(value_text)
print(f'value {value}')
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
def load(model_nmae, verbose=False):
return HFPipelineModel(model_name, verbose)
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=True):
os.getenv("OPENAI_API_KEY")
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'."
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
return thoughts
def generate_thoughts(self, state, k, initial_prompt):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
# prompt = f"Given the current state of reasoning: \n\n\n'{state_text}'\n\n\nGenerate the next best coherent thought to achieve the reasoning process and get the solution: "
# prompt = f"Based on the current state of reasoning: \n\n\n'{state_text} Provide the next coherent thought that will help progress the reasoning process and reach an soluton "
# prompt = f"These are the thoughts you've had: \n\n\n{state_text}, provide the next coherent thought that will help advance the reasoning process and reach an solution for this problem {initial_prompt}. Think sharply, think out of the box, predict failure. Do not leave any open questions. Unleash your mind."
prompt = f"Considering the thoughts you've had until now:\n\n{state_text}\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to {initial_prompt}. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain."
prompt += self.ReAct_prompt
print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
prompt = f"Considering the reasoning provided:\n\n'{state_text}'\n\nDevise the best possible solution for the task: {initial_prompt}"
answer = self.generate_text(prompt, 1)
# print(thoughts)
print(f"General solution : {answer}")
return answer
def evaluate_states(self, states, initial_prompt):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {initial_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
print(f'state: {value_text}')
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {initial_prompt} and become very pessimistic very NOTHING ELSE"
response = self.openai_api_call_handler(prompt, 50, 1)
print(f'state response: {response}')
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
print(f'best_state: {best_state}')
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
# def solution(self, states, initial_prompt):
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states, initial_prompt):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states, initial_prompt))
print(f"Parallel evaluated state values: {state_values}")
return state_values
class GuidanceLanguageModel(AbstractLanguageModel):
def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False):
# gpt4 = guidance.llms.OpenAI("gpt-4")
# vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto")
self.model = model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = '''{{#assistant~}}
{{gen 'Observation' temperature=0.5 max_tokens=50}}
{{~/assistant}}'''
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.thoughts_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Generate {{k}} coherent thoughts as short as possible to continue the reasoning process.
Don't answer the question yet.
{{~/user}}
%s
{{#assistant~}}
{{gen 'Thoughts' temperature=0.5 max_tokens=50}}
{{~/assistant}}
''' % self.ReAct_prompt, llm=self.model)
self.value_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Evaluate its value as a float between 0 and 1, and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Value' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
self.vote_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the following states of reasoning, vote for the best state:
{{states_text}}
Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Vote' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
def model_response_handler(self, program, **kargs):
print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)")
reponse = program(**kargs)
return reponse
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
state_text = ' '.join(state)
thoughts = []
for _ in range(k):
response = self.model_response_handler(self.thoughts_program, state_text=state_text, k=1)
text = response['Thoughts']
thoughts += [text]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
#implement state evaluation logic using self.model
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
response = self.model_response_handler(self.value_program, state_text=state_text)
try:
value_text = response['Value']
print(f"Value text {value_text}")
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
response = self.model_response_handler(self.vote_program, states_text=states_text)
best_state_text = response['Vote']
print(f"Best state text: {best_state_text}")
best_state = int(best_state_text)
return {state: 1 if i == best_state else 0 for i in range(len(states))}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class GuidanceOpenAILanguageModel(GuidanceLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting)
def model_response_handler(self, program, **kargs):
error_msg = ''
while True:
try:
program.llm.max_retries = 60
guidance.llms.OpenAI.cache.clear()
response = program(**kargs)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
except Exception as e:
if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''':
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
else:
error_msg = str(e)
break
raise Exception(error_msg)
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
self.tree = {
"nodes": [],
"metrics": {
"thoughts": [],
"evaluations": []
}
}
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
try:
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
if result:
self.save_tree_to_json(file_name)
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth)
if result:
self.save_tree_to_json(file_name)
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
except KeyboardInterrupt:
logger.error("Keyboard interrupt detected.")
except ValueError as e:
logger.error(f"Error: {e}")
finally:
logger.info("Saving the current tree and metrics.")
self.save_tree_to_json(file_name)
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = set()
for s in S0:
for z in self.model.generate_thoughts(s, k, x):
if (type(s) == str):
S0_t.add((s, z))
else:
S0_t.add((*s, z))
Vt = self.model.evaluate_states(S0_t, x)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
logger.info(f'Step: {t}, S0_t: {S0_t}, Vt: {Vt}, St: {St}, S0: {S0}')
best_state = max(St, key=lambda s: Vt[s])
return best_state
def tot_dfs(self, x, k, T, vth, pruning_threshold=0.5, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
output = []
iteration_count = 0
consecutive_convergence_count = 0
prev_best_value = None
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
def dfs(s, t):
nonlocal consecutive_convergence_count, prev_best_value, iteration_count, output
if t > T:
thought = self.model.generate_thoughts(s, 1, x)
print(f'thoughts inside dfs {thought}')
value = self.model.evaluate_states({s}, x)[s]
print(f'values inside dfs {value}')
output.append((thought, value))
print(f'output {output}')
if confidence_threshold is not None and value >= confidence_threshold:
return True
if prev_best_value is not None and convergence_threshold is not None:
if abs(value - prev_best_value) < convergence_threshold:
consecutive_convergence_count += 1
else:
consecutive_convergence_count = 0
prev_best_value = value
iteration_count += 1
if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
return True
return False
for s_prime in sorted(self.model.generate_thoughts(s, k, x)):
state_value = self.model.evaluate_states({s_prime}, x)[s_prime]
logger.info(f"State: {s_prime}, Value: {state_value}")
if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
if (type(s) == str):
child = (s, s_prime)
else:
child = (*s, s_prime)
# self.tree['nodes'][child] = s
# self.tree["metrics"]["thoughts"][child] = s_prime
# self.tree["metrics"]["evaluations"][child] = state_value
if dfs(child, t + 1):
return True
self.save_tree_to_json(file_name)
return False
dfs(x, 4)
print(f'output {output}')
best_state = max(output, key=lambda x: x[1])
return best_state[0]
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
def print_tree(self, x, node=None, depth=0):
if node is None:
node = self.tree["nodes"][x]
thought = self.tree["metrics"]["thoughts"][node]
evaluation = self.tree["metrics"]["evaluations"][node]
tree_info = {
"node": node,
"thought": thought,
"evaluation": evaluation,
"children": []
}
for child, parent in self.tree["nodes"].items():
if parent == node:
child_info = self.print_tree(child, depth + 1)
tree_info["children"].append(child_info)
return tree_info
#does not output state after each thought --- idk why -- needs work
class OptimizedTreeofThoughts(TreeofThoughts):
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
print(f'Start time {start_time}')
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
print(f'resultttt in optimized tree of thoughts: {result}')
if result:
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
if result:
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
if __name__ == '__main__':
search_algorithm = "DFS"
strategy = "cot"
evaluation_strategy="vote"
#create instance
model = OptimizedOpenAILanguageModel('', api_model="gpt-3.5-turbo")
tree_of_thoughts = OptimizedTreeofThoughts(model, search_algorithm)
# input_problem = "use 4 numbers and basic arithmetic operations (+-*/) to obtain 24"
parser = argparse.ArgumentParser(description="Tree of Thoughts Solver")
parser.add_argument("--problem", type=str, required=True, help="Initial problem statement")
parser.add_argument("--search_algorithm", type=str, choices=["BFS", "DFS"], default="BFS", help="Search algorithm to use (BFS or DFS)")
parser.add_argument("--k", type=int, default=3, help="Number of thoughts to generate")
parser.add_argument("--T", type=int, default=10, help="Step limit")
parser.add_argument("--b", type=int, default=5, help="Number of most promising states")
parser.add_argument("--vth", type=float, default=0.4, help="Value threshold for DFS")
parser.add_argument("--timeout", type=int, default=10, help="Timeout in seconds before stopping")
parser.add_argument("--confidence", type=float, default=0.8, help="Model confidence threshold")
parser.add_argument("--max_iterations", type=int, default=40, help="Maximum number of tree branch nodes")
parser.add_argument("--convergence_threshold", type=float, default=0.01, help="Convergence threshold for the search process")
parser.add_argument("--convergence_count", type=int, default=5, help="Number of searches to be considered converged")
args = parser.parse_args()
#solve the problem using the tree of thoughts class
optimized_tree_of_thoughts = OptimizedTreeofThoughts(model, search_algorithm=args.search_algorithm)
#solve the porblem using tree of thoughts problem helper
best_state = optimized_tree_of_thoughts.solve(args.problem, k=args.k, T=args.T, b=args.b, vth=args.vth)
#generate the final silution
final_solution = optimized_tree_of_thoughts.model.generate_solution(best_state, args.problem)
#print the final solutions
print(f"Final solution: {final_solution}")
trees = optimized_tree_of_thoughts.print_tree(final_solution)
# tree_of_thoughts.print_tree(final_solution)
#generate solution prompt --> give me an solution is right now -> get_final_answer that takes into account the best state and provides the response
#python tree_of_thoughts.py --problem "Design a new transportation system for a city" --search_algorithm BFS --k 5 --T 3 --b 5 --vth 0.5 --timeout 10 --confidence 0.8 --max_iterations 40 --convergence_threshold 0.01 --convergence_count 5
|
tree-of-thoughts-main
|
experiements/main.py
|
from abc import ABC, abstractmethod
import openai
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value"):
openai.api_key = api_key
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def generate_thoughts(self, state, k):
"""
In this example, the generate_thoughts function takes the current state and the number of thoughts to generate (k) as input. It uses the OpenAI API to generate thoughts based on the chosen strategy ('cot' or 'propose'). The function returns a list of generated thoughts.
"""
state_text = ''.join(state)
if self.strategy == "cot":
prompt = f"{state_text} [CoT]"
response = openai.Completion.create(
engine="davinci-codex",
prompt=prompt,
n=k,
max_tokens=50,
stop=None,
temperature=0.5,
)
[choice.text.strip() for choice in response.choices]
elif self.strategy == 'propose':
prompt = "f{state_text} [Propose {k} thoughts]"
response = openai.Completion.create(
engine="davinci-codex",
prompt=prompt,
n=1,
max_tokens=50 * k,
stop=None,
temperature=0.5,
)
thoughts = response.choices[0].text.strip().split('\n')[:k]
else:
raise ValueError("Invalid strategy. Choose 'cot' or 'propose'")
return thoughts
def evaluate_states(self, states):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"{state_text} [Value]"
response = openai.Completion.create(
engine="davinci-codex",
prompt=prompt,
n=1,
max_tokens=10,
stop=None,
temperature=0.5,
)
value = float(response.choices[0].text.strip())
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Vote for the best state:\n{states_text}\n[Vote]"
response = openai.Completion.create(
engine="davinci-codex",
prompt=prompt,
n=1,
max_tokens=50,
stop=None,
temperature=0.5,
)
best_state_text = response.choices[0].text.strip()
best_state = tuple(best_state_text.split())
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth):
if self.search_algorithm == 'BFS':
return self.tot_bfs(x, k, T, b)
elif self.search_algorithm == 'DFS':
return self.tot_dfs(x, k, T, vth)
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
Vt = self.model.evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.model.generate_thoughts(s, 1))
return
for s_prime in sorted(self.model.generate_thoughts(s, k)):
if self.model.evaluate_states({s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
return output
# usage
openai_model = OpenAILanguageModel('sk')
#choose search algorithm('bfs or 'dfs)
search_algorithm = "BFS"
#create an instance of the tree of thoughts class
tree_of_thoughts= TreeofThoughts(openai_model, search_algorithm)
|
tree-of-thoughts-main
|
experiements/v2.py
|
import os
import time
import json
DATA_PATH = './data'
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
DATA_PATH = './data'
class TreeofThoughts:
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
self.tree = {
"nodes": {}
}
def solve(self, initial_prompt, num_thoughts=None, max_steps=None, max_states=None, value_threshold=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
self.file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
try:
best_thoughts = ""
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(initial_prompt, num_thoughts, max_steps, max_states, value_threshold)
if result:
self.save_tree_to_json(self.file_name)
best_thoughts = result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(initial_prompt, num_thoughts, max_steps, value_threshold, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
if result:
self.save_tree_to_json(self.file_name)
best_thoughts = result
if best_thoughts:
solution = self.model.generate_solution(initial_prompt, best_thoughts)
if solution:
return solution
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
except KeyboardInterrupt:
logger.error("Keyboard interrupt detected.")
except ValueError as e:
logger.error(f"Error: {e}")
finally:
logger.info("Saving the current tree and metrics.")
self.save_tree_to_json(self.file_name)
def tot_bfs(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold):
current_states = {initial_prompt}
for step in range(1, max_steps + 1):
generated_states = set()
for state in current_states:
for thought in self.model.generate_thoughts(state=state, k=num_thoughts, initial_prompt=initial_prompt):
if (type(state) == str):
generated_states.add((state, thought))
else:
generated_states.add((*state, thought))
state_values = self.model.evaluate_states(states=generated_states, initial_prompt=initial_prompt)
for state, value in state_values.items():
if not (type(state) == str):
state = " | ".join(state)
self.tree["nodes"][state] = value
print("Saving tree")
self.save_tree_to_json(self.file_name)
pruned_generated_states = {state: value for state, value in state_values.items() if value >= pruning_threshold}
selected_states = sorted(pruned_generated_states.keys(), key=lambda state: pruned_generated_states[state], reverse=True)[:max_states]
current_states = set(selected_states)
logger.info(f'Step: {step}, Generated states: {generated_states}, State values: {state_values}, Selected states: {selected_states}, Current states: {current_states}')
best_state = max(selected_states, key=lambda state: state_values[state])
return best_state
def tot_dfs(self, initial_prompt, num_thoughts, max_steps, value_threshold, pruning_threshold=0.5, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
output = []
iteration_count = 0
consecutive_convergence_count = 0
prev_best_value = None
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
def dfs(state, step):
nonlocal consecutive_convergence_count, prev_best_value, iteration_count, output
if step > max_steps:
thought = self.model.generate_thoughts(state, 1, initial_prompt)
value = self.model.evaluate_states({state}, initial_prompt)[state]
output.append((thought, value))
if confidence_threshold is not None and value >= confidence_threshold:
return True
if prev_best_value is not None and convergence_threshold is not None:
if abs(value - prev_best_value) < convergence_threshold:
consecutive_convergence_count += 1
else:
consecutive_convergence_count = 0
prev_best_value = value
iteration_count += 1
if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
return True
return False
for next_state in sorted(self.model.generate_thoughts(state, num_thoughts, initial_prompt)):
state_value = self.model.evaluate_states({next_state}, initial_prompt)[next_state]
logger.info(f"State: {next_state}, Value: {state_value}")
if state_value > value_threshold and (pruning_threshold is None or state_value >= pruning_threshold):
if (type(state) == str):
child = (state, next_state)
else:
child = (*state, next_state)
if dfs(child, step + 1):
return True
self.save_tree_to_json(file_name)
return False
dfs(initial_prompt, 1)
best_state = max(output, key=lambda x: x[1])
return best_state[0]
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
def print_tree(self, node, depth=0):
thought = self.tree["metrics"]["thoughts"].get(node, "")
evaluation = self.tree["metrics"]["evaluations"].get(node, "")
tree_info = f"{' ' * depth}Node: {node}, Thought: {thought}, Evaluation: {evaluation}\n"
for child, parent in self.tree["nodes"].items():
if parent == node:
tree_info += self.print_tree(child, depth + 1)
print(f'tree info: {tree_info}')
return tree_info
|
tree-of-thoughts-main
|
experiements/optimized.py
|
from abc import ABC, abstractmethod
import openai
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value"):
openai.api_key = api_key
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def generate_thoughts(self, state, k):
state_text = ' '.join(state)
if self.strategy == 'cot':
prompt = f"{state_text} [CoT]"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=k,
max_tokens=50,
stop=None,
temperature=0.5,
)
thoughts = [choice.text.strip() for choice in response.choices]
print(thoughts)
elif self.strategy == 'propose':
prompt = f"{state_text} [Propose {k} thoughts]"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=50 * k,
stop=None,
temperature=0.5,
)
thoughts = response.choices[0].text.strip().split('\n')[:k]
print(thoughts)
else:
raise ValueError("Invalid strategy. Choose 'cot' or 'propose'.")
return thoughts
def evaluate_states(self, states):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"{state_text} [Value]"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=10,
stop=None,
temperature=0.5,
)
result = response.choices[0].text
print(result)
try:
value = float(response.choices[0].text.strip())
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Vote for the best state:\n{states_text}\n[Vote]"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=50,
stop=None,
temperature=0.5,
)
best_state_text = response.choices[0].text.strip()
print(f"best state text {best_state_text}")
best_state = tuple(best_state_text.split())
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
model = OpenAILanguageModel('')
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth):
if self.search_algorithm == 'BFS':
return self.tot_bfs(x, k, T, b)
elif self.search_algorithm == 'DFS':
return self.tot_dfs(x, k, T, vth)
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
Vt = self.model.evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
thoughts = self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
print(thoughts)
return thoughts
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.model.generate_thoughts(s, 1))
return
for s_prime in sorted(self.model.generate_thoughts(s, k)):
if self.model.evaluate_states({s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
print(output)
return output
# usage
#choose search algorithm('bfs or 'dfs)
search_algorithm = "BFS"
#cot or propose
strategy="cot"
# value or vote
evaluation_strategy = "value"
#create an instance of the tree of thoughts class
tree_of_thoughts= TreeofThoughts(model, search_algorithm)
input_problem = "What is 2 + 2"
k = 5
T = 3
b = 5
vth = 0.5
#call the solve method with the inpit problem and other params
solution = tree_of_thoughts.solve(input_problem, k, T, b, vth, )
#use the solution in your production environment
print(solution)
|
tree-of-thoughts-main
|
experiements/old-main/treeofthoughts.py
|
from abc import ABC, abstractmethod
import openai
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value"):
openai.api_key = api_key
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def generate_thoughts(self, state, k):
state_text = ' '.join(state)
# Optimization 2: More sophisticated prompt engineering
if self.strategy == 'cot':
prompt = f"Given the current state of reasoning: '{state_text}', generate {k} coherent thoughts to continue the reasoning process:"
response = openai.Completion.create(
engine="text-davinci-003", #
prompt=prompt,
n=k,
max_tokens=50,
stop=None,
temperature=0.5,
)
thoughts = [choice.text.strip() for choice in response.choices]
print(thoughts)
elif self.strategy == 'propose':
prompt = f"Given the current state of reasoning: '{state_text}', propose {k} coherent thoughts to continue the reasoning process:"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=50 * k,
stop=None,
temperature=0.5,
)
thoughts = response.choices[0].text.strip().split('\n')[:k]
print(thoughts)
else:
raise ValueError("Invalid strategy. Choose 'cot' or 'propose'.")
return thoughts
def evaluate_states(self, states):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1:"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=10,
stop=None,
temperature=0.5,
)
result = response.choices[0].text
print(result)
try:
value = float(response.choices[0].text.strip())
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state:\n{states_text}\n\nVote:"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=50,
stop=None,
temperature=0.5,
)
best_state_text = response.choices[0].text.strip()
print(f"best state text {best_state_text}")
best_state = tuple(best_state_text.split())
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
model = OpenAILanguageModel('key')
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
def solve(self, x, k, T, b, vth):
if self.search_algorithm == 'BFS':
return self.tot_bfs(x, k, T, b)
elif self.search_algorithm == 'DFS':
return self.tot_dfs(x, k, T, vth)
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
Vt = self.model.evaluate_states(S0_t)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
def tot_dfs(self, x, k, T, vth):
output = []
def dfs(s, t):
if t > T:
output.append(self.model.generate_thoughts(s, 1))
return
for s_prime in sorted(self.model.generate_thoughts(s, k)):
if self.model.evaluate_states({s_prime})[s_prime] > vth:
dfs((*s, s_prime), t + 1)
dfs(x, 1)
return output
# usage
#choose search algorithm('bfs or 'dfs)
search_algorithm = "BFS"
strategy="cot"
evaluation_strategy = "value"
#create an instance of the tree of thoughts class
tree_of_thoughts= TreeofThoughts(model, search_algorithm)
input_problem = "What are next generation reasoning methods to advance the reasoning of large multi-modality models"
k = 5
T = 3
b = 5
vth = 0.5
#call the solve method with the input problem and other params
solution = tree_of_thoughts.solve(input_problem, k, T, b, vth, )
#use the solution in your production environment
print(solution)
|
tree-of-thoughts-main
|
experiements/old-main/treeofthoughts-v2.py
|
import os
import json
import itertools
import argparse
import numpy as np
from functools import partial
from models import gpt, gpt_usage
from tasks import get_task
def get_value(task, x, y, n_evaluate_sample, cache_value=True):
value_prompt = task.value_prompt_wrap(x, y)
if cache_value and value_prompt in task.value_cache:
return task.value_cache[value_prompt]
value_outputs = gpt(value_prompt, n=n_evaluate_sample, stop=None)
value = task.value_outputs_unwrap(x, y, value_outputs)
if cache_value:
task.value_cache[value_prompt] = value
return value
def get_values(task, x, ys, n_evaluate_sample, cache_value=True):
values = []
local_value_cache = {}
for y in ys: # each partial output
if y in local_value_cache: # avoid duplicate candidates
value = 0
else:
value = get_value(task, x, y, n_evaluate_sample, cache_value=cache_value)
local_value_cache[y] = value
values.append(value)
return values
def get_votes(task, x, ys, n_evaluate_sample):
vote_prompt = task.vote_prompt_wrap(x, ys)
vote_outputs = gpt(vote_prompt, n=n_evaluate_sample, stop=None)
values = task.vote_outputs_unwrap(vote_outputs, len(ys))
return values
def get_proposals(task, x, y):
propose_prompt = task.propose_prompt_wrap(x, y)
proposals = gpt(propose_prompt, n=1, stop=None)[0].split('\n')
return [y + _ + '\n' for _ in proposals]
def get_samples(task, x, y, n_generate_sample, prompt_sample, stop):
if prompt_sample == 'standard':
prompt = task.standard_prompt_wrap(x, y)
elif prompt_sample == 'cot':
prompt = task.cot_prompt_wrap(x, y)
else:
raise ValueError(f'prompt_sample {prompt_sample} not recognized')
samples = gpt(prompt, n=n_generate_sample, stop=stop)
return [y + _ for _ in samples]
def solve(args, task, idx, to_print=True):
print(gpt)
x = task.get_input(idx) # input
ys = [''] # current output candidates
infos = []
for step in range(task.steps):
# generation
if args.method_generate == 'sample':
new_ys = [get_samples(task, x, y, args.n_generate_sample, prompt_sample=args.prompt_sample, stop=task.stops[step]) for y in ys]
elif args.method_generate == 'propose':
new_ys = [get_proposals(task, x, y) for y in ys]
new_ys = list(itertools.chain(*new_ys))
ids = list(range(len(new_ys)))
# evaluation
if args.method_evaluate == 'vote':
values = get_votes(task, x, new_ys, args.n_evaluate_sample)
elif args.method_evaluate == 'value':
values = get_values(task, x, new_ys, args.n_evaluate_sample)
# selection
if args.method_select == 'sample':
ps = np.array(values) / sum(values)
select_ids = np.random.choice(ids, size=args.n_select_sample, p=ps).tolist()
elif args.method_select == 'greedy':
select_ids = sorted(ids, key=lambda x: values[x], reverse=True)[:args.n_select_sample]
select_new_ys = [new_ys[select_id] for select_id in select_ids]
# log
if to_print:
sorted_new_ys, sorted_values = zip(*sorted(zip(new_ys, values), key=lambda x: x[1], reverse=True))
print(f'-- new_ys --: {sorted_new_ys}\n-- sol values --: {sorted_values}\n-- choices --: {select_new_ys}\n')
infos.append({'step': step, 'x': x, 'ys': ys, 'new_ys': new_ys, 'values': values, 'select_new_ys': select_new_ys})
ys = select_new_ys
if to_print:
print(ys)
return ys, {'steps': infos}
def naive_solve(args, task, idx, to_print=True):
x = task.get_input(idx) # input
ys = get_samples(task, x, '', args.n_generate_sample, args.prompt_sample, stop=None)
return ys, {}
def run(args):
task = get_task(args.task, args.task_file_path)
logs, cnt_avg, cnt_any = [], 0, 0
global gpt
gpt = partial(gpt, model=args.backend, temperature=args.temperature)
if args.naive_run:
file = f'logs/{args.task}/{args.backend}_{args.temperature}_naive_{args.prompt_sample}_sample_{args.n_generate_sample}_start{args.task_start_index}_end{args.task_end_index}.json'
else:
file = f'logs/{args.task}/{args.backend}_{args.temperature}_{args.method_generate}{args.n_generate_sample}_{args.method_evaluate}{args.n_evaluate_sample}_{args.method_select}{args.n_select_sample}_start{args.task_start_index}_end{args.task_end_index}.json'
os.makedirs(os.path.dirname(file), exist_ok=True)
for i in range(args.task_start_index, args.task_end_index):
# solve
if args.naive_run:
ys, info = naive_solve(args, task, i)
else:
ys, info = solve(args, task, i)
# log
infos = [task.test_output(i, y) for y in ys]
info.update({'idx': i, 'ys': ys, 'infos': infos, 'usage_so_far': gpt_usage(args.backend)})
logs.append(info)
with open(file, 'w') as f:
json.dump(logs, f, indent=4)
# log main metric
accs = [info['r'] for info in infos]
cnt_avg += sum(accs) / len(accs)
cnt_any += any(accs)
print(i, 'sum(accs)', sum(accs), 'cnt_avg', cnt_avg, 'cnt_any', cnt_any, '\n')
n = args.task_end_index - args.task_start_index
print(cnt_avg / n, cnt_any / n)
print('usage_so_far', gpt_usage(args.backend))
def parse_args():
args = argparse.ArgumentParser()
args.add_argument('--backend', type=str, choices=['gpt-4', 'gpt-3.5-turbo'], default='gpt-4')
args.add_argument('--temperature', type=float, default=0.7)
args.add_argument('--task', type=str, required=True, choices=['game24', 'text', 'crosswords'])
args.add_argument('--task_file_path', type=str, required=True)
args.add_argument('--task_start_index', type=int, default=900)
args.add_argument('--task_end_index', type=int, default=1000)
args.add_argument('--naive_run', action='store_true')
args.add_argument('--prompt_sample', type=str, choices=['standard', 'cot']) # only used when method_generate = sample, or naive_run
args.add_argument('--method_generate', type=str, choices=['sample', 'propose'])
args.add_argument('--method_evaluate', type=str, choices=['value', 'vote'])
args.add_argument('--method_select', type=str, choices=['sample', 'greedy'])
args.add_argument('--n_generate_sample', type=int, default=1) # only thing needed if naive_run
args.add_argument('--n_evaluate_sample', type=int, default=1)
args.add_argument('--n_select_sample', type=int, default=1)
args = args.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
run(args)
|
tree-of-thoughts-main
|
experiements/tree-of-thought-llm/run.py
|
import openai
import backoff
completion_tokens = prompt_tokens = 0
@backoff.on_exception(backoff.expo, openai.error.OpenAIError)
def completions_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def gpt(prompt, model="gpt-4", temperature=0.7, max_tokens=1000, n=1, stop=None) -> list:
messages = [{"role": "user", "content": prompt}]
return chatgpt(messages, model=model, temperature=temperature, max_tokens=max_tokens, n=n, stop=stop)
def chatgpt(messages, model="gpt-4", temperature=0.7, max_tokens=1000, n=1, stop=None) -> list:
global completion_tokens, prompt_tokens
outputs = []
while n > 0:
cnt = min(n, 20)
n -= cnt
res = completions_with_backoff(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, n=cnt, stop=stop)
outputs.extend([choice["message"]["content"] for choice in res["choices"]])
# log completion tokens
completion_tokens += res["usage"]["completion_tokens"]
prompt_tokens += res["usage"]["prompt_tokens"]
return outputs
def gpt_usage(backend="gpt-4"):
global completion_tokens, prompt_tokens
if backend == "gpt-4":
cost = completion_tokens / 1000 * 0.06 + prompt_tokens / 1000 * 0.03
elif backend == "gpt-3.5-turbo":
cost = (completion_tokens + prompt_tokens) / 1000 * 0.0002
return {"completion_tokens": completion_tokens, "prompt_tokens": prompt_tokens, "cost": cost}
|
tree-of-thoughts-main
|
experiements/tree-of-thought-llm/models.py
|
def get_task(name, file=None):
if name == 'game24':
from .game24 import Game24Task
return Game24Task(file)
elif name == 'text':
from .text import TextTask
return TextTask(file)
elif name == 'crosswords':
from .crosswords import MiniCrosswordsTask
return MiniCrosswordsTask(file)
else:
raise NotImplementedError
|
tree-of-thoughts-main
|
experiements/tree-of-thought-llm/tasks/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.