|
|
from typing import Callable, List, Optional, Union |
|
|
|
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
from torch import nn |
|
|
from diffusers.models.attention_processor import Attention |
|
|
|
|
|
|
|
|
class JointAttnProcessor2_0: |
|
|
"""Attention processor used typically in processing the SD3-like self-attention projections.""" |
|
|
|
|
|
def __init__(self): |
|
|
if not hasattr(F, "scaled_dot_product_attention"): |
|
|
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") |
|
|
|
|
|
def __call__( |
|
|
self, |
|
|
attn: Attention, |
|
|
hidden_states: torch.FloatTensor, |
|
|
encoder_hidden_states: torch.FloatTensor = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
*args, |
|
|
**kwargs, |
|
|
) -> torch.FloatTensor: |
|
|
residual = hidden_states |
|
|
|
|
|
input_ndim = hidden_states.ndim |
|
|
if input_ndim == 4: |
|
|
batch_size, channel, height, width = hidden_states.shape |
|
|
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
|
|
context_input_ndim = encoder_hidden_states.ndim |
|
|
if context_input_ndim == 4: |
|
|
batch_size, channel, height, width = encoder_hidden_states.shape |
|
|
encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
|
|
|
|
|
batch_size = encoder_hidden_states.shape[0] |
|
|
|
|
|
|
|
|
query = attn.to_q(hidden_states) |
|
|
key = attn.to_k(hidden_states) |
|
|
value = attn.to_v(hidden_states) |
|
|
|
|
|
|
|
|
encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) |
|
|
encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) |
|
|
encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) |
|
|
|
|
|
|
|
|
query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) |
|
|
key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) |
|
|
value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) |
|
|
|
|
|
inner_dim = key.shape[-1] |
|
|
head_dim = inner_dim // attn.heads |
|
|
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
|
|
|
hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) |
|
|
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
|
|
hidden_states = hidden_states.to(query.dtype) |
|
|
|
|
|
|
|
|
hidden_states, encoder_hidden_states = ( |
|
|
hidden_states[:, : residual.shape[1]], |
|
|
hidden_states[:, residual.shape[1] :], |
|
|
) |
|
|
|
|
|
|
|
|
hidden_states = attn.to_out[0](hidden_states) |
|
|
|
|
|
hidden_states = attn.to_out[1](hidden_states) |
|
|
if not attn.context_pre_only: |
|
|
encoder_hidden_states = attn.to_add_out(encoder_hidden_states) |
|
|
|
|
|
if input_ndim == 4: |
|
|
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
|
|
if context_input_ndim == 4: |
|
|
encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
|
|
|
|
|
return hidden_states, encoder_hidden_states |
|
|
|
|
|
|
|
|
class IPJointAttnProcessor2_0(torch.nn.Module): |
|
|
"""Attention processor used typically in processing the SD3-like self-attention projections.""" |
|
|
|
|
|
def __init__(self, context_dim, hidden_dim, scale=1.0): |
|
|
if not hasattr(F, "scaled_dot_product_attention"): |
|
|
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") |
|
|
super().__init__() |
|
|
self.scale = scale |
|
|
|
|
|
self.add_k_proj_ip = nn.Linear(context_dim, hidden_dim) |
|
|
self.add_v_proj_ip = nn.Linear(context_dim, hidden_dim) |
|
|
|
|
|
|
|
|
def __call__( |
|
|
self, |
|
|
attn: Attention, |
|
|
hidden_states: torch.FloatTensor, |
|
|
encoder_hidden_states: torch.FloatTensor = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
ip_hidden_states: torch.FloatTensor = None, |
|
|
*args, |
|
|
**kwargs, |
|
|
) -> torch.FloatTensor: |
|
|
residual = hidden_states |
|
|
|
|
|
input_ndim = hidden_states.ndim |
|
|
if input_ndim == 4: |
|
|
batch_size, channel, height, width = hidden_states.shape |
|
|
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
|
|
context_input_ndim = encoder_hidden_states.ndim |
|
|
if context_input_ndim == 4: |
|
|
batch_size, channel, height, width = encoder_hidden_states.shape |
|
|
encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) |
|
|
|
|
|
batch_size = encoder_hidden_states.shape[0] |
|
|
|
|
|
|
|
|
query = attn.to_q(hidden_states) |
|
|
key = attn.to_k(hidden_states) |
|
|
value = attn.to_v(hidden_states) |
|
|
|
|
|
sample_query = query |
|
|
|
|
|
|
|
|
encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) |
|
|
encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) |
|
|
encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) |
|
|
|
|
|
|
|
|
query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) |
|
|
key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) |
|
|
value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) |
|
|
|
|
|
inner_dim = key.shape[-1] |
|
|
head_dim = inner_dim // attn.heads |
|
|
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
|
|
|
hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) |
|
|
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
|
|
hidden_states = hidden_states.to(query.dtype) |
|
|
|
|
|
|
|
|
hidden_states, encoder_hidden_states = ( |
|
|
hidden_states[:, : residual.shape[1]], |
|
|
hidden_states[:, residual.shape[1] :], |
|
|
) |
|
|
|
|
|
|
|
|
ip_key = self.add_k_proj_ip(ip_hidden_states) |
|
|
ip_value = self.add_v_proj_ip(ip_hidden_states) |
|
|
ip_query = sample_query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
|
|
|
ip_hidden_states = F.scaled_dot_product_attention(ip_query, ip_key, ip_value, dropout_p=0.0, is_causal=False) |
|
|
ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
|
|
ip_hidden_states = ip_hidden_states.to(ip_query.dtype) |
|
|
|
|
|
hidden_states = hidden_states + self.scale * ip_hidden_states |
|
|
|
|
|
|
|
|
hidden_states = attn.to_out[0](hidden_states) |
|
|
|
|
|
hidden_states = attn.to_out[1](hidden_states) |
|
|
if not attn.context_pre_only: |
|
|
encoder_hidden_states = attn.to_add_out(encoder_hidden_states) |
|
|
|
|
|
if input_ndim == 4: |
|
|
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
|
|
if context_input_ndim == 4: |
|
|
encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) |
|
|
|
|
|
return hidden_states, encoder_hidden_states |
|
|
|
|
|
|