friday / modeling_friday.py
kevin510's picture
Update modeling_friday.py
3c003f1 verified
from __future__ import annotations
# Model Constants
IMAGE_TOKEN = "<image>"
IMG_START_TOKEN = "<img_start>"
IMG_END_TOKEN = "<img_end>"
IGNORE_INDEX = -100
PAD_FOR_EOS = -300
import torch
import torch.nn.functional as F
from PIL import Image
import torch
def mask_token_segment(
start_id: int,
end_id: int,
input_ids: torch.Tensor,
fill_value: int = -100):
"""
Replace *every* token from each `start_id` **through** its matching `end_id`
(boundaries included) with `fill_value`. Any spans that start with some
other token are left untouched.
Works on CUDA, TorchScript, batched via vmap, etc.—no Python loops.
"""
if input_ids.dim() != 1:
raise ValueError("`input_ids` must be 1-D")
device = input_ids.device
n = input_ids.size(0)
# where the *target* start-tokens and end-tokens sit
start_pos = (input_ids == start_id).nonzero(as_tuple=True)[0] # ascending
end_pos = (input_ids == end_id).nonzero(as_tuple=True)[0] # ascending
if start_pos.numel() == 0:
return input_ids.clone()
# ── pair every start with the first end that comes *after* it ────────────────
# searchsorted gives the insertion index into the (sorted) end positions
idx_in_end = torch.searchsorted(end_pos, start_pos, right=False)
have_match = idx_in_end < end_pos.size(0) # safety: drop unmatched
start_pos = start_pos[have_match]
end_pos = end_pos[idx_in_end[have_match]]
# (rare) guard against pathological orderings
keep = end_pos > start_pos
start_pos, end_pos = start_pos[keep], end_pos[keep]
if start_pos.numel() == 0:
return input_ids
# ── differential “scan-line” trick to build the span mask in O(N) ───────────
# +1 at each start index, -1 at the element *after* each end
delta = torch.zeros(n + 1, dtype=torch.int8, device=device)
delta[start_pos] += 1
delta[end_pos + 1] -= 1 # +1 is safe because delta is length n+1
inside = torch.cumsum(delta[:-1], dim=0) > 0 # boolean mask, incl. boundaries
# ── apply ────────────────────────────────────────────────────────────────────
out = input_ids.clone()
out[inside] = fill_value
return out
def maybe_zero_3(param, ignore_status=False, name=None):
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
if hasattr(param, "ds_id"):
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if not ignore_status:
print(name, 'no ignore status')
with zero.GatheredParameters([param]):
param = param.data.detach().cpu().clone()
else:
param = param.detach().cpu().clone()
return param
# Borrowed from peft.util.get_peft_model_state_dict
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}
return to_return
def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
to_return = {k: t for k, t in named_params if "lora_" not in k}
if require_grad_only:
to_return = {k: t for k, t in to_return.items() if t.requires_grad}
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
return to_return
def find_all_linear_names(modules):
lora_module_names = set()
for name, module in modules():
if isinstance(module, torch.nn.Linear):
names = name.split('.')
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
if 'lm_head' in lora_module_names: # needed for 16-bit
lora_module_names.remove('lm_head')
return list(lora_module_names)
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
def pad_and_stack(img_list, pad_value=0.0):
"""
img_list : list[Tensor] each (C, H, W) already *normalised*
pad_value: float or tuple/list of 3 floats (one per channel)
Use 0.0 if your processor has already centred to mean 0.
Returns
-------
batch : Tensor (B, C, H_max, W_max)
"""
# 1. target square size ---------------------------------------------------
h_max = max(t.shape[1] for t in img_list)
w_max = max(t.shape[2] for t in img_list)
H, W = max(h_max, w_max), max(h_max, w_max)
# 2. create padded copies -------------------------------------------------
padded = []
for img in img_list:
c, h, w = img.shape
canvas = img.new_full((c, H, W), pad_value) # filled with mean/zeros
canvas[:, :h, :w] = img # top-left corner
padded.append(canvas)
return torch.stack(padded, 0) # (B,C,H,W)
# ------------------------------------------------------------------------------------------
# Copyright (c) 2024 Baifeng Shi.
# All rights reserved.
#
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import torch
def split_chessboard(x, num_split):
"""
x: b * c * h * w
Deividing x into num_split**2 sub-squares, and concatenate all the sub-squares on the batch dimension
"""
B, C, H, W = x.shape
assert H % num_split == 0 and W % num_split == 0
h, w = H // num_split, W // num_split
x_split = torch.cat([x[:, :, i*h:(i+1)*h, j*w:(j+1)*w] for i in range(num_split) for j in range(num_split)], dim=0)
return x_split
def merge_chessboard(x, num_split):
"""
x: b * c * h * w
Assuming x contains num_split**2 sub-squares concatenated along batch dimension, merge the sub-squares back to the original whole square.
(inverse of split_chessboard)
"""
B, C, H, W = x.shape
assert B % (num_split**2) == 0
b = B // (num_split**2)
x_merge = torch.cat([torch.cat([x[(i*num_split + j)*b:(i*num_split + j + 1)*b] for j in range(num_split)], dim=-1)
for i in range(num_split)], dim=-2)
return x_merge
def batched_forward(model, x, batch_size=-1):
if batch_size == -1:
return model(x)
else:
x_batched = x.split(batch_size)
outs = [model(x) for x in x_batched]
return torch.cat(outs, dim=0)
# ------------------------------------------------------------------------------------------
# Copyright (c) 2024 Baifeng Shi.
# All rights reserved.
#
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import math
import torch
import torch.nn.functional as F
from einops import rearrange
def multiscale_forward(model, input, scales=None, img_sizes=None, max_split_size=None, resize_output_to_idx=0, num_prefix_token=0,
output_shape='bnc', split_forward=False):
# print(f"Input shape: {input.shape}")
assert input.dim() == 4, "Input image must be in the shape of BxCxHxW."
assert input.shape[2] == input.shape[3], "Currently only square images are supported."
assert output_shape in ['bnc', 'bchw'], "Output shape should be either BxNxC (e.g., ViT) or BxCxHxW (e.g., ConvNet)."
assert output_shape == 'bnc' or num_prefix_token == 0, "For ConvNet there shouldn't be any prefix token."
b, c, input_size, _ = input.shape
# image size for each scale
assert scales is not None or img_sizes is not None, "Please assign either scales or img_sizes."
img_sizes = img_sizes or [int(input_size * scale) for scale in scales]
# prepare multiscale inputs
max_split_size = max_split_size or input_size # The maximum size of each split of image. Set as the input size by default
num_splits = [math.ceil(size / max_split_size) for size in img_sizes] # number of splits each scale
input_multiscale = []
for size, num_split in zip(img_sizes, num_splits):
x = F.interpolate(input.to(torch.float32), size=size, mode='bicubic').to(input.dtype)
x = split_chessboard(x, num_split=num_split)
input_multiscale.append(x)
# run feedforward on each scale
outs_multiscale = [batched_forward(model, x, b) if split_forward else model(x) for x in input_multiscale]
if num_prefix_token > 0:
outs_prefix_multiscale = [out[:, :num_prefix_token] for out in outs_multiscale]
outs_multiscale = [out[:, num_prefix_token:] for out in outs_multiscale]
if output_shape == 'bnc':
outs_multiscale = [rearrange(out, 'b (h w) c -> b c h w', h=int(out.shape[1] ** 0.5), w=int(out.shape[1] ** 0.5))
for out in outs_multiscale]
# merge outputs of different splits for each scale separately
outs_multiscale = [merge_chessboard(out, num_split=num_split) for num_split, out in zip(num_splits, outs_multiscale)]
# interpolate outputs from different scales and concat together
output_size = outs_multiscale[resize_output_to_idx].shape[-2]
out = torch.cat([F.interpolate(outs_multiscale[i].to(torch.float32), size=output_size,
mode='area').to(outs_multiscale[i].dtype)
for i in range(len(outs_multiscale))], dim=1)
if output_shape == 'bnc':
out = rearrange(out, 'b c h w -> b (h w) c')
if num_prefix_token > 0:
# take the mean of prefix tokens from different splits for each scale
outs_prefix_multiscale = [torch.stack(out.split(b, dim=0), dim=0).mean(dim=0) for out in outs_prefix_multiscale]
out_prefix_multiscale = torch.cat(outs_prefix_multiscale, dim=-1)
out = torch.cat([out_prefix_multiscale, out], dim=1)
return out
import torch
import torch.nn as nn
class MLPAdapter(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers=2, activation='gelu', checkpoint_path=None, device=None, **kwargs):
"""
Initialize the MLPAdapter with the given dimensions and activation function.
Args:
input_dim (int): Input dimension.
hidden_dim (int): Hidden dimension.
output_dim (int): Output dimension.
layers (int): Number of layers in the MLP.
activation (str): Activation function to use ('gelu' or 'relu').
"""
super().__init__()
self.num_layers = num_layers
self.activation = activation
self.output_dim = output_dim
# Define the first layer
layers_list = [nn.Linear(input_dim, hidden_dim, device=device)]
if activation == 'gelu':
layers_list.append(nn.GELU())
elif activation == 'relu':
layers_list.append(nn.ReLU())
else:
raise ValueError("Unsupported activation function. Use 'gelu' or 'relu'.")
# Define the subsequent layers
for _ in range(1, num_layers):
layers_list.append(nn.Linear(hidden_dim, hidden_dim, device=device))
if activation == 'gelu':
layers_list.append(nn.GELU())
elif activation == 'relu':
layers_list.append(nn.ReLU())
# Define the final output layer
layers_list.append(nn.Linear(hidden_dim, output_dim, device=device))
self.mlp = nn.Sequential(*layers_list)
# Load checkpoint if provided
if checkpoint_path:
self.load_state_dict(torch.load(checkpoint_path, map_location=device), strict=False)
print(f"Loaded MLPAdapter from {checkpoint_path}")
if device:
self.to(device)
def forward(self, x):
"""
Forward pass through the MLPAdapter.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after passing through the MLP.
"""
return self.mlp(x)
import torch
import torch.nn as nn
import torch.nn.functional as F
import PIL.Image
from typing import List
from transformers import AutoModel, AutoImageProcessor
class FastVitVisionTower(nn.Module):
def __init__(self, pretrained_model_name_or_path, model_params={}, pad_to_square=True, **kwargs):
super().__init__()
self.is_loaded = False
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.model_params = model_params
self.pad_to_square = pad_to_square
self.load_model()
@property
def output_dim(self):
return self.vision_tower.config.embed_dim if self.vision_tower else None
def load_model(self):
if self.is_loaded:
return
self.image_processor = AutoImageProcessor.from_pretrained(self.pretrained_model_name_or_path)
self.image_processor.crop_size = self.image_processor.size
self.vision_tower = AutoModel.from_pretrained(
self.pretrained_model_name_or_path,
**self.model_params,
)
self.vision_tower.requires_grad_(False)
self.is_loaded = True
def preprocess_images(self, imgs: List[PIL.Image.Image], pad_and_stack_tensors=True) -> torch.Tensor:
img_mean = tuple(int(x * 255) for x in self.image_processor.image_mean)
if self.pad_to_square:
imgs = [expand2square(img, img_mean) for img in imgs]
imgs = [self.image_processor(img, do_resize=True, do_center_crop=False, return_tensors="pt")['pixel_values'][0] for img in imgs]
if pad_and_stack_tensors:
imgs = pad_and_stack(imgs, pad_value=0.0)
imgs = imgs.to(dtype=torch.float32, device=self.device)
return imgs
def forward(self, images):
if type(images) is list:
image_features = []
for image in images:
image_feature = self.vision_tower(
image.to(device=self.device, dtype=self.dtype).unsqueeze(0)
)
image_features.append(image_feature)
else:
image_features = self.vision_tower(
images.to(device=self.device, dtype=self.dtype),
)
return image_features
@property
def dummy_feature(self):
return torch.zeros(1, self.embed_dim, device=self.device, dtype=self.dtype)
@property
def dtype(self):
return self.vision_tower.dtype
@property
def device(self):
return self.vision_tower.device
@property
def config(self):
if self.is_loaded:
return self.vision_tower.config
else:
return self.cfg_only
@property
def hidden_size(self):
return self.config.embed_dim
@property
def num_patches(self):
return (self.config.image_size // self.config.patch_size) ** 2
class FastVitVisionTowerS2(FastVitVisionTower):
def __init__(self, pretrained_model_name_or_path, s2_scales, model_params={}, **kwargs):
self.s2_scales = list(map(int, s2_scales.split(',')))
self.s2_scales.sort()
self.s2_split_size = self.s2_scales[0]
self.s2_image_size = self.s2_scales[-1]
super().__init__(pretrained_model_name_or_path, model_params)
self.multiscale_forward = multiscale_forward
@property
def output_dim(self):
return (2*self.vision_tower.config.embed_dim) if self.vision_tower else None
def load_model(self):
if self.is_loaded:
return
super().load_model()
self.image_processor.size = self.image_processor.crop_size = {
"height": self.s2_image_size,
"width": self.s2_image_size
}
def forward_feature(self, images):
image_size = self.vision_tower.config.image_size
if images.shape[2] != image_size or images.shape[3] != image_size:
images = F.interpolate(
images,
size=(image_size, image_size),
mode="bilinear",
align_corners=False,
antialias=True
)
return self.vision_tower(
images.to(device=self.device, dtype=self.dtype),
)
def forward(self, images):
if type(images) is list:
image_features = []
for image in images:
image_feature = self.multiscale_forward(
self.forward_feature,
image.unsqueeze(0),
img_sizes=self.s2_scales,
max_split_size=self.s2_split_size
)
image_features.append(image_feature)
else:
image_features = self.multiscale_forward(
self.forward_feature,
images,
img_sizes=self.s2_scales,
max_split_size=self.s2_split_size
)
return image_features
@property
def hidden_size(self):
return self.config.embed_dim * len(self.s2_scales)
import torch
import torch.nn as nn
import PIL.Image
from typing import List
from transformers import SiglipVisionModel, SiglipImageProcessor, SiglipVisionConfig
class SiglipVisionTower(nn.Module):
def __init__(self, pretrained_model_name_or_path, model_params={}, pad_to_square=True, **kwargs):
super().__init__()
self.is_loaded = False
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.model_params = model_params
self.pad_to_square = pad_to_square
self.select_layer = -2
self.load_model()
@property
def output_dim(self):
return self.vision_tower.config.hidden_size if self.vision_tower else None
def load_model(self):
if self.is_loaded:
return
self.image_processor = SiglipImageProcessor.from_pretrained(self.pretrained_model_name_or_path)
self.image_processor.crop_size = self.image_processor.size
self.vision_tower = SiglipVisionModel.from_pretrained(
self.pretrained_model_name_or_path,
**self.model_params,
)
self.vision_tower.requires_grad_(False)
self.is_loaded = True
def preprocess_images(self, imgs: List[PIL.Image.Image], pad_and_stack_tensors=True) -> torch.Tensor:
img_mean = tuple(int(x * 255) for x in self.image_processor.image_mean)
if self.pad_to_square:
imgs = [expand2square(img, img_mean) for img in imgs]
imgs = [self.image_processor(img, return_tensors="pt")['pixel_values'][0] for img in imgs]
if pad_and_stack_tensors:
imgs = pad_and_stack(imgs, pad_value=0.0)
imgs = imgs.to(dtype=torch.float32, device=self.device)
return imgs
def feature_select(self, image_forward_outs):
image_features = image_forward_outs.hidden_states[self.select_layer]
return image_features
def forward(self, images):
if type(images) is list:
image_features = []
for image in images:
image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0),
output_hidden_states=True)
image_feature = self.feature_select(image_forward_out).to(image.dtype)
image_features.append(image_feature)
else:
image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype),
output_hidden_states=True)
image_features = self.feature_select(image_forward_outs).to(images.dtype)
return image_features
@property
def dummy_feature(self):
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
@property
def dtype(self):
return self.vision_tower.dtype
@property
def device(self):
return self.vision_tower.device
@property
def config(self):
if self.is_loaded:
return self.vision_tower.config
else:
return self.cfg_only
@property
def hidden_size(self):
return self.config.hidden_size
@property
def num_patches(self):
return (self.config.image_size // self.config.patch_size) ** 2
class SiglipVisionTowerS2(SiglipVisionTower):
def __init__(self, pretrained_model_name_or_path, s2_scales, model_params={}, **kwargs):
self.s2_scales = list(map(int, s2_scales.split(',')))
self.s2_scales.sort()
self.s2_split_size = self.s2_scales[0]
self.s2_image_size = self.s2_scales[-1]
super().__init__(pretrained_model_name_or_path, model_params)
self.multiscale_forward = multiscale_forward
self.image_processor.size['height'] = self.image_processor.size['width'] = self.s2_image_size
self.image_processor.crop_size['height'] = self.image_processor.crop_size['width'] = self.s2_image_size
@property
def output_dim(self):
return (2*self.vision_tower.config.hidden_size) if self.vision_tower else None
def load_model(self):
if self.is_loaded:
return
super().load_model()
self.image_processor.size['height'] = self.image_processor.size['width'] = self.s2_image_size
self.image_processor.crop_size['height'] = self.image_processor.crop_size['width'] = self.s2_image_size
def forward_feature(self, images):
image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype),
output_hidden_states=True)
image_features = self.feature_select(image_forward_outs).to(images.dtype)
return image_features
def forward(self, images):
if type(images) is list:
image_features = []
for image in images:
image_feature = self.multiscale_forward(
self.forward_feature,
image.unsqueeze(0),
img_sizes=self.s2_scales,
max_split_size=self.s2_split_size
)
image_features.append(image_feature)
else:
image_features = self.multiscale_forward(
self.forward_feature,
images,
img_sizes=self.s2_scales,
max_split_size=self.s2_split_size
)
return image_features
@property
def hidden_size(self):
return self.config.hidden_size * len(self.s2_scales)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from typing import List, Tuple, Optional, Union
import PIL
from transformers import AutoTokenizer, AutoConfig
from transformers.modeling_outputs import CausalLMOutputWithPast
from .configuration_phi3 import Phi3Config
from .modeling_phi3 import Phi3Model, Phi3ForCausalLM
DEFAULT_CFG_SPECIAL_TOKENS = {
"image_token_id": 200029,
"image_start_token_id": 200030,
"image_end_token_id": 200031,
}
DEFAULT_CFG_VISION_TOWER = {
"pretrained_model_name_or_path": "kevin510/fast-vit-hd",
"type": "fastvit",
"s2_scales": "512,1024",
"use_s2": True,
"pad_to_square": True,
"freeze": False,
"model_params": { "trust_remote_code": True }
}
DEFAULT_CFG_VISION_ADAPTER = {
"input_dim": 6144,
"hidden_dim": 3072,
"output_dim": 3072,
"layers": 2,
"activation": "gelu",
"freeze": False,
}
class FridayConfig(Phi3Config):
model_type = "friday"
def __init__(self,
base_model_name_or_path: str | None = "microsoft/Phi-4-mini-reasoning",
delay_load=False,
tokenizer_model_max_length=None,
**kwargs
):
base_kwargs = {}
if base_model_name_or_path is not None:
base_cfg = Phi3Config.from_pretrained(
base_model_name_or_path,
trust_remote_code=True, # Phi‑4 uses custom code in the repo
)
base_kwargs = base_cfg.to_dict()
merged = {**base_kwargs, **kwargs}
self.delay_load = delay_load
self.tokenizer_model_max_length = tokenizer_model_max_length
self._cfg_vision_tower = DEFAULT_CFG_VISION_TOWER.copy()
if "cfg_vision_tower" in kwargs:
self._cfg_vision_tower.update(kwargs["cfg_vision_tower"])
self._cfg_vision_adapter = DEFAULT_CFG_VISION_ADAPTER.copy()
if "cfg_vision_adapter" in kwargs:
self._cfg_vision_adapter.update(kwargs["cfg_vision_adapter"])
self._cfg_special_tokens = DEFAULT_CFG_SPECIAL_TOKENS.copy()
if "cfg_special_tokens" in kwargs:
self._cfg_special_tokens.update(kwargs["cfg_special_tokens"])
super().__init__(**merged)
@property
def cfg_vision_tower(self):
return self._cfg_vision_tower
@cfg_vision_tower.setter
def cfg_vision_tower(self, value):
if not value:
raise ValueError("Name cannot be empty")
self._cfg_vision_tower.update(value)
@property
def cfg_vision_adapter(self):
return self._cfg_vision_adapter
@cfg_vision_adapter.setter
def cfg_vision_adapter(self, value):
if not value:
raise ValueError("Name cannot be empty")
self._cfg_vision_adapter.update(value)
@property
def cfg_special_tokens(self):
return self._cfg_special_tokens
@cfg_special_tokens.setter
def cfg_special_tokens(self, value):
if not value:
raise ValueError("Name cannot be empty")
self._cfg_special_tokens.update(value)
class FridayModel(Phi3Model):
config_class = FridayConfig
def __init__(self, config: FridayConfig):
super().__init__(config)
self.cfg_vision_adapter = config.cfg_vision_adapter
self.cfg_vision_tower = config.cfg_vision_tower
self.vision_tower = None
self.mm_projector = None
if not config.delay_load:
self.initialize_vision_modules()
def get_vision_tower(self):
return self.vision_tower
def initialize_vision_modules(self):
if self.vision_tower is not None:
return
if self.cfg_vision_tower.get("type", "siglip").lower() == "siglip":
if self.cfg_vision_tower.get("use_s2", True):
self.vision_tower = SiglipVisionTowerS2(**self.cfg_vision_tower)
else:
self.vision_tower = SiglipVisionTower(**self.cfg_vision_tower)
elif self.cfg_vision_tower.get("type", "siglip").lower() == "fastvit":
if self.cfg_vision_tower.get("use_s2", True):
self.vision_tower = FastVitVisionTowerS2(**self.cfg_vision_tower)
else:
self.vision_tower = FastVitVisionTower(**self.cfg_vision_tower)
else:
raise ValueError(f"Unsupported vision tower type: {self.cfg_vision_tower.get('type', 'siglip')}. Supported types are 'siglip' and 'fastvit'.")
self.vision_tower.load_model()
self.mm_projector = MLPAdapter(**self.cfg_vision_adapter)
if self.cfg_vision_tower.get("freeze", False):
self.set_vision_tower_requires_grad(False)
if self.cfg_vision_adapter.get("freeze", False):
self.set_vision_adapter_requires_grad(False)
def compute_image_features(self, imgs: torch.Tensor) -> torch.Tensor:
features = self.vision_tower(imgs)
if isinstance(features, list):
features = torch.stack(features, dim=1)
return self.mm_projector(features)
def set_vision_tower_requires_grad(self, requires_grad: bool):
if self.vision_tower is not None:
for param in self.vision_tower.parameters():
param.requires_grad = requires_grad
else:
raise ValueError("Vision tower is not initialized. Please call initialize_vision_modules() first.")
def set_vision_adapter_requires_grad(self, requires_grad: bool):
if self.mm_projector is not None:
for param in self.mm_projector.parameters():
param.requires_grad = requires_grad
else:
raise ValueError("Vision adapter is not initialized. Please call initialize_vision_modules() first.")
def set_vision_tower_dtype(self, dtype: torch.dtype):
if self.vision_tower is not None:
for p in self.vision_tower.parameters():
p.data = p.data.to(dtype)
else:
raise ValueError("Vision tower is not initialized. Please call initialize_vision_modules() first.")
def set_vision_adapter_dtype(self, dtype: torch.dtype):
if self.mm_projector is not None:
for p in self.mm_projector.parameters():
p.data = p.data.to(dtype)
else:
raise ValueError("Vision adapter is not initialized. Please call initialize_vision_modules() first.")
def is_vision_tower_frozen(self):
if self.vision_tower is not None:
return all(not p.requires_grad for p in self.vision_tower.parameters())
else:
raise ValueError("Vision tower is not initialized. Please call initialize_vision_modules() first.")
def is_vision_adapter_frozen(self):
if self.mm_projector is not None:
return all(not p.requires_grad for p in self.mm_projector.parameters())
else:
raise ValueError("Vision adapter is not initialized. Please call initialize_vision_modules() first.")
class FridayForCausalLM(Phi3ForCausalLM):
config_class = FridayConfig
def __init__(self, config: FridayConfig):
super().__init__(config)
self.config = config
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.image_token_id = config.cfg_special_tokens["image_token_id"]
self.image_start_id = config.cfg_special_tokens["image_start_token_id"]
self.image_end_id = config.cfg_special_tokens["image_end_token_id"]
self.model = FridayModel(config)
self.post_init()
def get_model(self) -> FridayModel:
return self.model
def get_vision_tower(self) -> SiglipVisionTower:
return self.model.get_vision_tower()
def get_vision_adapter(self) -> MLPAdapter:
return self.model.mm_projector
def get_llm_parameters(self, exclude_lora: bool = False):
return [
p for n, p in self.named_parameters()
if "vision_tower" not in n and "mm_projector" not in n and (not exclude_lora or ("lora_" not in n))
]
def get_llm_named_modules(self):
return {n: m for n, m in self.named_modules() if "vision_tower" not in n and "mm_projector" not in n}
def set_llm_requires_grad(self, requires_grad: bool, exclude_lora: bool = True):
for n, p in self.named_parameters():
if exclude_lora and ("lora_A" in n or "lora_B" in n):
continue
if "vision_tower" in n or "mm_projector" in n:
continue
p.requires_grad = requires_grad
def set_vision_tower_requires_grad(self, requires_grad: bool):
self.model.set_vision_tower_requires_grad(requires_grad)
def set_vision_adapter_requires_grad(self, requires_grad: bool):
self.model.set_vision_adapter_requires_grad(requires_grad)
def set_llm_dtype(self, dtype: torch.dtype):
for p in self.get_llm_parameters():
p.data = p.data.to(dtype)
def set_vision_tower_dtype(self, dtype: torch.dtype):
self.model.set_vision_tower_dtype(dtype)
def set_vision_adapter_dtype(self, dtype: torch.dtype):
self.model.set_vision_adapter_dtype(dtype)
def is_llm_frozen(self):
return all(not p.requires_grad for p in self.get_llm_parameters())
def is_vision_tower_frozen(self):
return self.model.is_vision_tower_frozen()
def is_vision_adapter_frozen(self):
return self.model.is_vision_adapter_frozen()
def initialize_vision_modules(self):
self.model.initialize_vision_modules()
def get_multimodal_input_embeddings(self, input_ids, image_features, return_labels=True) -> torch.Tensor:
emb_start_image_id = self.model.embed_tokens(torch.tensor([self.image_start_id], device=self.device))
emb_end_image_id = self.model.embed_tokens(torch.tensor([self.image_end_id], device=self.device))
id_ignore = torch.tensor([IGNORE_INDEX], device=self.device)
# repetition‑penalty safety ????
# input_ids[input_ids == self.image_token_id] = 0
# Iterate over each batch item
embeds_list, labels_list = [], []
for batch_id, item_ids in enumerate(input_ids):
image_token_positions = (item_ids == self.image_token_id).nonzero(as_tuple=True)[0]
if len(image_token_positions) != image_features[batch_id].shape[0]:
raise ValueError(
f"Mismatch between number of image tokens ({len(image_token_positions)}) and number of image features ({image_features[batch_id].shape[0]})"
)
cursor = 0
emb_parts, lbl_parts = [], []
for indx_image, image_token_pos in enumerate(image_token_positions):
if image_token_pos > cursor:
span = item_ids[cursor:image_token_pos]
emb_parts.append(self.model.embed_tokens(span))
lbl_parts.append(span)
# <image_start>
emb_parts.append(emb_start_image_id)
lbl_parts.append(id_ignore)
# vision embeddings
image_tokens = image_features[batch_id][indx_image]
if image_tokens.shape[0] == 1 and image_tokens.ndim == 3:
image_tokens = image_tokens.squeeze(0)
emb_parts.append(image_tokens)
lbl_parts.append(id_ignore.repeat(image_tokens.shape[0]))
# <image_end>
emb_parts.append(emb_end_image_id)
lbl_parts.append(id_ignore)
cursor = image_token_pos + 1
# tail text
if cursor < item_ids.shape[0]:
tail = item_ids[cursor:]
emb_parts.append(self.model.embed_tokens(tail))
lbl_parts.append(tail)
embeds_list.append(torch.cat(emb_parts, dim=0))
labels_list.append(torch.cat(lbl_parts, dim=0))
return (embeds_list, labels_list) if return_labels else embeds_list
def prepare_inputs_for_multimodal(
self,
input_ids: torch.LongTensor,
images: List[List[PIL.Image.Image]], # B x N
position_ids: Optional[torch.LongTensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[List[torch.FloatTensor]],
labels: Optional[torch.LongTensor],
) -> Tuple[Optional[torch.Tensor], Optional[torch.LongTensor], Optional[torch.Tensor], Optional[List[torch.FloatTensor]], torch.Tensor, Optional[torch.Tensor]]:
# ─────────────────── early return (no image / streaming step) ───────────────────
# if we have already processed images and are in a streaming step we can skip the multimodal processing
# but we need to ensure the attention mask and position ids are correct
if past_key_values is not None and attention_mask is not None and input_ids.shape[1] == 1:
tgt = past_key_values[-1][-1].shape[-2] + 1
attention_mask = torch.cat(
[attention_mask,
torch.ones((attention_mask.size(0),
tgt - attention_mask.size(1)),
dtype=attention_mask.dtype,
device=attention_mask.device)],
dim=1,
)
position_ids = (attention_mask.sum(dim=1, keepdim=True) - 1).long()
return input_ids, position_ids, attention_mask, past_key_values, None, labels
# ─────────────────────────── images: (B, N) ───────────────────────────
if isinstance(images, list) and isinstance(images[0], list):
# images is a list of lists, each containing multiple images, B x N
# e.g. [[img1, img2], [img3, img4]]
assert len(images) == input_ids.shape[0], f"Batch size mismatch: {len(images)} vs {input_ids.shape[0]}"
image_features = []
for sublst_images in images:
if len(sublst_images) == 0:
image_features.append(torch.zeros((0, self.get_model().mm_projector.output_dim), device=self.device))
else:
if isinstance(sublst_images[0], PIL.Image.Image):
image_features.append(
self.model.compute_image_features(
self.model.vision_tower.preprocess_images(sublst_images, pad_and_stack_tensors=True)
)
)
elif isinstance(sublst_images[0], torch.Tensor):
# This should be a list of tensors of pre-processed images, [(N X 3 X W x H), ...]
image_features.append(
self.model.compute_image_features(sublst_images)
)
elif isinstance(images, list) and isinstance(images[0], PIL.Image.Image):
# images is a list of images for a single batch item, 1 x N
# e.g. [img1, img2, img3]
assert input_ids.shape[0] == 1, f"Batch size mismatch: {len(images)} vs {input_ids.shape[0]}"
image_features = [
self.model.compute_image_features(
self.model.vision_tower.preprocess_images(images, pad_and_stack_tensors=True)
)
]
elif isinstance(images, list) and isinstance(images[0], torch.Tensor):
# This should be a list of tensors of pre-processed images, [(N X 3 X W x H), ...]
# The list length should match the batch size
assert input_ids.shape[0] == len(images), f"Batch size mismatch: {len(images)} vs {input_ids.shape[0]}"
image_features = [
self.model.compute_image_features(imgs) for imgs in images
]
elif isinstance(images, PIL.Image.Image):
# images is a single image, 1 x 1
# e.g. img1
assert input_ids.shape[0] == 1, f"Batch size mismatch: {len(images)} vs {input_ids.shape[0]}"
image_features = [
self.model.compute_image_features(
self.model.vision_tower.preprocess_images([images])
)
]
else:
raise ValueError(f"Unsupported images format: {type(images)}. Expected list of PIL images, a single PIL image or a Tensor of pre-processed images")
# ─────────────────────────── image_features: (B x N x D) ───────────────────────────
if isinstance(image_features, list):
assert input_ids.shape[0] == len(image_features), f"Incorrectly formatted image_features: list length should match batch size"
assert isinstance(image_features[0], torch.Tensor), f"Incorrectly formatted image_features: list items should be tensors"
elif isinstance(image_features, torch.Tensor):
assert input_ids.shape[0] == image_features.shape[0], f"Incorrectly formatted image_features: tensor should match batch size"
# ───────────────────────────── pad handling prelims ──────────────────────────────
if attention_mask is None:
attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
else:
attention_mask = attention_mask.bool()
if position_ids is None:
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
input_ids_nopad = [ids[mask] for ids, mask in zip(input_ids, attention_mask)]
embeds_list, labels_list = self.get_multimodal_input_embeddings(
input_ids_nopad,
image_features,
return_labels=True
)
# ───────────────────── truncate then pad back to rectangle ──────────────────────
new_input_embeds = torch.nn.utils.rnn.pad_sequence(
embeds_list,
batch_first=True,
padding_value=0.0
).to(dtype=self.dtype)
new_labels = torch.nn.utils.rnn.pad_sequence(
labels_list,
batch_first=True,
padding_value=IGNORE_INDEX
).long()
if self.config.tokenizer_model_max_length is not None:
new_input_embeds = new_input_embeds[:, :self.config.tokenizer_model_max_length]
new_labels = new_labels[:, :self.config.tokenizer_model_max_length]
# ────────────────────────────── attention mask and position ids ────────────────
attention_mask = (
torch.arange(new_input_embeds.size(1), device=input_ids.device)
.unsqueeze(0)
< torch.tensor([e.size(0) for e in embeds_list],
device=input_ids.device).unsqueeze(1)
)
raw_pos = attention_mask.cumsum(dim=1) - 1
position_ids = raw_pos.masked_fill(~attention_mask, 0).long()
if not self.training:
new_labels = None
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
# ------------------------------------------------------------------
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
images: Optional[PIL.Image.Image] = None,
**kwargs: Unpack[KwargsForCausalLM],
) -> Union[Tuple, CausalLMOutputWithPast]:
is_multi_modal = images is not None and not (
(
isinstance(images, list) and (len(images) == 0 or all(i == [] for i in images))
)
)
if inputs_embeds is None and is_multi_modal:
(
input_ids,
position_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels
) = self.prepare_inputs_for_multimodal(
input_ids=input_ids,
images=images,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
labels=labels,
)
if cache_position is not None and inputs_embeds is not None and cache_position.shape[0] != inputs_embeds.shape[1]:
cache_position = torch.arange(inputs_embeds.shape[1], device=self.device)
return Phi3ForCausalLM.forward(
self,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs
)
def print_device_configuration(self):
print("*************Device Configuration*********")
if len(self.get_llm_parameters()) > 0:
llm_device = set({str(p.device) for p in self.get_llm_parameters()})
llm_dtype = set({p.dtype for p in self.get_llm_parameters()})
print(f"LLM Parameters:\t\t\tdevice: {llm_device}\tdtype: {llm_dtype}\tfrozen: {self.is_llm_frozen()}")
else:
print("LLM parameters have not been initialized")
if self.get_model().vision_tower is not None:
vt_device = set({str(p.device) for p in self.get_model().vision_tower.parameters()})
vt_dtype = set({p.dtype for p in self.get_model().vision_tower.parameters()})
print(f"Vision Tower Parameters:\tdevice: {vt_device}\tdtype: {vt_dtype}\tfrozen: {self.is_vision_tower_frozen()}")
else:
print("Vision tower parameters have not been initialized")
if self.get_model().mm_projector is not None:
mm_device = set({str(p.device) for p in self.get_model().mm_projector.parameters()})
mm_dtype = set({p.dtype for p in self.get_model().mm_projector.parameters()})
print(f"MM Projector Parameters:\tdevice: {mm_device}\tdtype: {mm_dtype}\tfrozen: {self.is_vision_adapter_frozen()}")
else:
print("MM Projector parameters have not been initialized")
print("******************************************")
def build_tokenizer(base_model_id: str) -> Tuple[AutoTokenizer, dict]:
tok = AutoTokenizer.from_pretrained(base_model_id, padding_side="right")
specials = {t: tok.convert_tokens_to_ids(t) for t in [IMAGE_TOKEN, IMG_START_TOKEN, IMG_END_TOKEN] if t in tok.vocab}
if len(specials) < 3:
n = tok.add_tokens([IMAGE_TOKEN, IMG_START_TOKEN, IMG_END_TOKEN], special_tokens=True)
tok.pad_token = tok.eos_token
specials = {
"image": tok.convert_tokens_to_ids(IMAGE_TOKEN),
"start": tok.convert_tokens_to_ids(IMG_START_TOKEN),
"end": tok.convert_tokens_to_ids(IMG_END_TOKEN),
}
return tok, specials