SMARTIES-v1-ViT-L / smarties_config.py
gsumbul's picture
Hugging Face transformers model files
427561b verified
from transformers import PretrainedConfig
import os
import yaml
import requests
from functools import partial
import torch.nn as nn
class SMARTIESConfig(PretrainedConfig):
model_type = "SMARTIES-v1-ViT-B"
def __init__(
self,
img_size=224,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
norm_eps=1e-6,
spectrum_specs=None,
global_pool=False,
norm_layer_eps=1e-6,
mixed_precision='no',
decoder_embed_dim=512,
decoder_depth=8,
decoder_num_heads=16,
pos_drop_rate=0.0,
**kwargs
):
super().__init__(**kwargs)
self.img_size = img_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.depth = depth
self.num_heads = num_heads
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.norm_eps = norm_eps
self.spectrum_specs = spectrum_specs
self.global_pool = global_pool
self.pos_drop_rate = pos_drop_rate
self.num_heads = self.num_heads
self.norm_layer_eps = norm_layer_eps
self.mixed_precision = mixed_precision
self.decoder_embed_dim = decoder_embed_dim
self.decoder_depth = decoder_depth
self.decoder_num_heads = decoder_num_heads