LPX55 commited on
Commit
1d54127
·
1 Parent(s): 98cf7ca
Files changed (1) hide show
  1. flux/transformer_flux.py +2 -2
flux/transformer_flux.py CHANGED
@@ -22,7 +22,7 @@ import torch.nn.functional as F
22
  from diffusers.configuration_utils import ConfigMixin, register_to_config
23
  from .lora.peft import PeftAdapterMixin
24
  from diffusers.models.attention import FeedForward
25
- from .attention_processor import Attention, FluxAttnProcessor2_0, FluxSingleAttnProcessor2_0
26
  from diffusers.models.modeling_utils import ModelMixin
27
  from .normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
28
  from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
@@ -178,7 +178,7 @@ class FluxSingleTransformerBlock(nn.Module):
178
  self.act_mlp = nn.GELU(approximate="tanh")
179
  self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
180
 
181
- processor = FluxSingleAttnProcessor2_0()
182
  self.attn = Attention(
183
  query_dim=dim,
184
  cross_attention_dim=None,
 
22
  from diffusers.configuration_utils import ConfigMixin, register_to_config
23
  from .lora.peft import PeftAdapterMixin
24
  from diffusers.models.attention import FeedForward
25
+ from .attention_processor import Attention, FluxAttnProcessor2_0
26
  from diffusers.models.modeling_utils import ModelMixin
27
  from .normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
28
  from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
 
178
  self.act_mlp = nn.GELU(approximate="tanh")
179
  self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
180
 
181
+ processor = FluxAttnProcessor2_0()
182
  self.attn = Attention(
183
  query_dim=dim,
184
  cross_attention_dim=None,