Text Classification
Transformers
Safetensors
new
custom_code

[bugfix] Initialize attention bias on the same device as Query/Key/Value

#1
Files changed (1) hide show
  1. modeling.py +1 -1
modeling.py CHANGED
@@ -910,7 +910,7 @@ class NewModel(NewPreTrainedModel):
910
 
911
  batch_size, seq_length = input_shape
912
  if unpad_inputs and self.config.use_memory_efficient_attention:
913
- attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length)
914
  else:
915
  # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
916
  # ourselves in which case we just need to make it broadcastable to all heads.
 
910
 
911
  batch_size, seq_length = input_shape
912
  if unpad_inputs and self.config.use_memory_efficient_attention:
913
+ attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length, device=embedding_output.device)
914
  else:
915
  # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
916
  # ourselves in which case we just need to make it broadcastable to all heads.