| | |
| | |
| | |
| | |
| |
|
| | from collections import OrderedDict |
| | import math |
| | import requests |
| | from io import BytesIO |
| | from functools import partial |
| | from PIL import Image |
| | from typing import Callable, Optional, Sequence, Tuple, List, Union |
| | import numpy as np |
| |
|
| | import torch |
| | from torch import nn |
| | from torch.nn import functional as F |
| | from torch.nn.init import trunc_normal_ |
| | from torchvision import transforms |
| | from torchvision.transforms import InterpolationMode |
| |
|
| | from functools import partial |
| | import numpy as np |
| | import warnings |
| | from typing import Optional, Tuple |
| | import torch |
| | from torch import nn |
| | from torch import Tensor |
| | import torch.nn.functional as F |
| | from torch.nn.functional import * |
| | from torch.nn.modules.activation import * |
| | from torch.nn.init import trunc_normal_ |
| | from torch.nn.init import constant_, xavier_normal_, xavier_uniform_ |
| | from transformers import PreTrainedModel |
| | from transformers.integrations import is_deepspeed_zero3_enabled |
| | def get_abs_pos(abs_pos, tgt_size): |
| | |
| | |
| | |
| | src_size = int(math.sqrt(abs_pos.size(0))) |
| | |
| | dtype = abs_pos.dtype |
| |
|
| | return F.interpolate( |
| | abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2), |
| | size=(tgt_size[0], tgt_size[1]), |
| | mode="bicubic", |
| | align_corners=False, |
| | ).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype) |
| |
|
| |
|
| | |
| | def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): |
| | """ |
| | grid_size: int of the grid height and width |
| | return: |
| | pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
| | """ |
| | if isinstance(grid_size, int): |
| | grid_h_size, grid_w_size = grid_size, grid_size |
| | else: |
| | grid_h_size, grid_w_size = grid_size[0], grid_size[1] |
| |
|
| | grid_h = np.arange(grid_h_size, dtype=np.float32) |
| | grid_w = np.arange(grid_w_size, dtype=np.float32) |
| | grid = np.meshgrid(grid_w, grid_h) |
| | grid = np.stack(grid, axis=0) |
| |
|
| | grid = grid.reshape([2, 1, grid_h_size, grid_w_size]) |
| | pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
| | if cls_token: |
| | pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) |
| | return pos_embed |
| |
|
| |
|
| | def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
| | assert embed_dim % 2 == 0 |
| |
|
| | |
| | emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
| | emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
| |
|
| | emb = np.concatenate([emb_h, emb_w], axis=1) |
| | return emb |
| |
|
| |
|
| | def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
| | """ |
| | embed_dim: output dimension for each position |
| | pos: a list of positions to be encoded: size (M,) |
| | out: (M, D) |
| | """ |
| | assert embed_dim % 2 == 0 |
| | omega = np.arange(embed_dim // 2, dtype=np.float32) |
| | omega /= embed_dim / 2. |
| | omega = 1. / 10000 ** omega |
| |
|
| | pos = pos.reshape(-1) |
| | out = np.einsum('m,d->md', pos, omega) |
| |
|
| | emb_sin = np.sin(out) |
| | emb_cos = np.cos(out) |
| |
|
| | emb = np.concatenate([emb_sin, emb_cos], axis=1) |
| | return emb |
| |
|
| |
|
| | class Resampler(nn.Module): |
| | """ |
| | A 2D perceiver-resampler network with one cross attention layers by |
| | (grid_size**2) learnable queries and 2d sincos pos_emb |
| | Outputs: |
| | A tensor with the shape of (grid_size**2, embed_dim) |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | grid_size, |
| | embed_dim, |
| | num_heads, |
| | kv_dim=None, |
| | norm_layer=partial(nn.LayerNorm, eps=1e-6), |
| | adaptive=False |
| | ): |
| | super().__init__() |
| | self.num_queries = grid_size ** 2 |
| | self.embed_dim = embed_dim |
| | self.num_heads = num_heads |
| | self.adaptive = adaptive |
| |
|
| | self.pos_embed = nn.Parameter( |
| | torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float() |
| | ).requires_grad_(False) |
| | self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim)) |
| |
|
| | if kv_dim is not None and kv_dim != embed_dim: |
| | self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False) |
| | else: |
| | self.kv_proj = nn.Identity() |
| |
|
| | self.attn = MultiheadAttention(embed_dim, num_heads) |
| | self.ln_q = norm_layer(embed_dim) |
| | self.ln_kv = norm_layer(embed_dim) |
| |
|
| | self.ln_post = norm_layer(embed_dim) |
| | self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim)) |
| |
|
| | def _init_weights(self, m): |
| | if isinstance(m, nn.Linear): |
| | trunc_normal_(m.weight, std=.02) |
| | if isinstance(m, nn.Linear) and m.bias is not None: |
| | nn.init.constant_(m.bias, 0) |
| | elif isinstance(m, nn.LayerNorm): |
| | nn.init.constant_(m.bias, 0) |
| | nn.init.constant_(m.weight, 1.0) |
| |
|
| | def forward(self, x, tgt_size=None, attn_mask=None): |
| | if self.adaptive: |
| | pos_embed = torch.Tensor(get_2d_sincos_pos_embed(self.embed_dim, tgt_size)).float().to(device=x.device, dtype=x.dtype) |
| | else: |
| | pos_embed = get_abs_pos(self.pos_embed, tgt_size) |
| | |
| | x = self.kv_proj(x) |
| | x = self.ln_kv(x).permute(1, 0, 2) |
| |
|
| | N = x.shape[1] |
| | q = self.ln_q(self.query) |
| | |
| | out = self.attn( |
| | self._repeat(q, N) + self.pos_embed.unsqueeze(1), |
| | x + pos_embed.unsqueeze(1), |
| | x, |
| | attn_mask=attn_mask)[0] |
| | x = out.permute(1, 0, 2) |
| | x = self.ln_post(x) |
| | x = x @ self.proj |
| | return x |
| |
|
| | def _repeat(self, query, N: int): |
| | return query.unsqueeze(1).repeat(1, N, 1) |
| |
|
| |
|
| |
|
| | class MultiheadAttention(nn.MultiheadAttention): |
| | def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, |
| | add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None): |
| | super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype) |
| |
|
| | |
| | self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias,) |
| |
|
| | def forward( |
| | self, |
| | query: Tensor, |
| | key: Tensor, |
| | value: Tensor, |
| | key_padding_mask: Optional[Tensor] = None, |
| | need_weights: bool = True, |
| | attn_mask: Optional[Tensor] = None, |
| | average_attn_weights: bool = True, |
| | is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]: |
| | why_not_fast_path = '' |
| | if ((attn_mask is not None and torch.is_floating_point(attn_mask)) |
| | or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)): |
| | why_not_fast_path = "floating-point masks are not supported for fast path." |
| |
|
| | is_batched = query.dim() == 3 |
| |
|
| | key_padding_mask = F._canonical_mask( |
| | mask=key_padding_mask, |
| | mask_name="key_padding_mask", |
| | other_type=F._none_or_dtype(attn_mask), |
| | other_name="attn_mask", |
| | target_type=query.dtype |
| | ) |
| | |
| | attn_mask = F._canonical_mask( |
| | mask=attn_mask, |
| | mask_name="attn_mask", |
| | other_type=None, |
| | other_name="", |
| | target_type=query.dtype, |
| | check_other=False, |
| | ) |
| |
|
| |
|
| | if not is_batched: |
| | why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}" |
| | elif query is not key or key is not value: |
| | |
| | |
| | |
| | why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)" |
| | elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype: |
| | why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match" |
| | elif self.in_proj_weight is None: |
| | why_not_fast_path = "in_proj_weight was None" |
| | elif query.dtype != self.in_proj_weight.dtype: |
| | |
| | why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match" |
| | elif self.training: |
| | why_not_fast_path = "training is enabled" |
| | elif (self.num_heads % 2) != 0: |
| | why_not_fast_path = "self.num_heads is not even" |
| | elif not self.batch_first: |
| | why_not_fast_path = "batch_first was not True" |
| | elif self.bias_k is not None: |
| | why_not_fast_path = "self.bias_k was not None" |
| | elif self.bias_v is not None: |
| | why_not_fast_path = "self.bias_v was not None" |
| | elif self.add_zero_attn: |
| | why_not_fast_path = "add_zero_attn was enabled" |
| | elif not self._qkv_same_embed_dim: |
| | why_not_fast_path = "_qkv_same_embed_dim was not True" |
| | elif query.is_nested and (key_padding_mask is not None or attn_mask is not None): |
| | why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \ |
| | is not supported with NestedTensor input" |
| | elif torch.is_autocast_enabled(): |
| | why_not_fast_path = "autocast is enabled" |
| |
|
| | if not why_not_fast_path: |
| | tensor_args = ( |
| | query, |
| | key, |
| | value, |
| | self.in_proj_weight, |
| | self.in_proj_bias, |
| | self.out_proj.weight, |
| | self.out_proj.bias, |
| | ) |
| | |
| | |
| | if torch.overrides.has_torch_function(tensor_args): |
| | why_not_fast_path = "some Tensor argument has_torch_function" |
| | elif _is_make_fx_tracing(): |
| | why_not_fast_path = "we are running make_fx tracing" |
| | elif not all(_check_arg_device(x) for x in tensor_args): |
| | why_not_fast_path = ("some Tensor argument's device is neither one of " |
| | f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}") |
| | elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args): |
| | why_not_fast_path = ("grad is enabled and at least one of query or the " |
| | "input/output projection weights or biases requires_grad") |
| | if not why_not_fast_path: |
| | merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query) |
| |
|
| | if self.in_proj_bias is not None and self.in_proj_weight is not None: |
| | return torch._native_multi_head_attention( |
| | query, |
| | key, |
| | value, |
| | self.embed_dim, |
| | self.num_heads, |
| | self.in_proj_weight, |
| | self.in_proj_bias, |
| | self.out_proj.weight, |
| | self.out_proj.bias, |
| | merged_mask, |
| | need_weights, |
| | average_attn_weights, |
| | mask_type) |
| |
|
| | any_nested = query.is_nested or key.is_nested or value.is_nested |
| | assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " + |
| | f"The fast path was not hit because {why_not_fast_path}") |
| |
|
| | if self.batch_first and is_batched: |
| | |
| | if key is value: |
| | if query is key: |
| | query = key = value = query.transpose(1, 0) |
| | else: |
| | query, key = (x.transpose(1, 0) for x in (query, key)) |
| | value = key |
| | else: |
| | query, key, value = (x.transpose(1, 0) for x in (query, key, value)) |
| | |
| | if not self._qkv_same_embed_dim: |
| | attn_output, attn_output_weights = self.multi_head_attention_forward( |
| | query, key, value, self.embed_dim, self.num_heads, |
| | self.in_proj_weight, self.in_proj_bias, |
| | self.bias_k, self.bias_v, self.add_zero_attn, |
| | self.dropout, self.out_proj.weight, self.out_proj.bias, |
| | training=self.training, |
| | key_padding_mask=key_padding_mask, need_weights=need_weights, |
| | attn_mask=attn_mask, |
| | use_separate_proj_weight=True, |
| | q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, |
| | v_proj_weight=self.v_proj_weight, |
| | average_attn_weights=average_attn_weights, |
| | is_causal=is_causal) |
| | else: |
| | attn_output, attn_output_weights = self.multi_head_attention_forward( |
| | query, key, value, self.embed_dim, self.num_heads, |
| | self.in_proj_weight, self.in_proj_bias, |
| | self.bias_k, self.bias_v, self.add_zero_attn, |
| | self.dropout, self.out_proj.weight, self.out_proj.bias, |
| | training=self.training, |
| | key_padding_mask=key_padding_mask, |
| | need_weights=need_weights, |
| | attn_mask=attn_mask, |
| | average_attn_weights=average_attn_weights, |
| | is_causal=is_causal) |
| | if self.batch_first and is_batched: |
| | return attn_output.transpose(1, 0), attn_output_weights |
| | else: |
| | return attn_output, attn_output_weights |
| | |
| | def multi_head_attention_forward( |
| | self, |
| | query: Tensor, |
| | key: Tensor, |
| | value: Tensor, |
| | embed_dim_to_check: int, |
| | num_heads: int, |
| | in_proj_weight: Optional[Tensor], |
| | in_proj_bias: Optional[Tensor], |
| | bias_k: Optional[Tensor], |
| | bias_v: Optional[Tensor], |
| | add_zero_attn: bool, |
| | dropout_p: float, |
| | out_proj_weight: Tensor, |
| | out_proj_bias: Optional[Tensor], |
| | training: bool = True, |
| | key_padding_mask: Optional[Tensor] = None, |
| | need_weights: bool = True, |
| | attn_mask: Optional[Tensor] = None, |
| | use_separate_proj_weight: bool = False, |
| | q_proj_weight: Optional[Tensor] = None, |
| | k_proj_weight: Optional[Tensor] = None, |
| | v_proj_weight: Optional[Tensor] = None, |
| | static_k: Optional[Tensor] = None, |
| | static_v: Optional[Tensor] = None, |
| | average_attn_weights: bool = True, |
| | is_causal: bool = False, |
| | ) -> Tuple[Tensor, Optional[Tensor]]: |
| | tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias) |
| | if has_torch_function(tens_ops): |
| | return handle_torch_function( |
| | multi_head_attention_forward, |
| | tens_ops, |
| | query, |
| | key, |
| | value, |
| | embed_dim_to_check, |
| | num_heads, |
| | in_proj_weight, |
| | in_proj_bias, |
| | bias_k, |
| | bias_v, |
| | add_zero_attn, |
| | dropout_p, |
| | out_proj_weight, |
| | out_proj_bias, |
| | training=training, |
| | key_padding_mask=key_padding_mask, |
| | need_weights=need_weights, |
| | attn_mask=attn_mask, |
| | is_causal=is_causal, |
| | use_separate_proj_weight=use_separate_proj_weight, |
| | q_proj_weight=q_proj_weight, |
| | k_proj_weight=k_proj_weight, |
| | v_proj_weight=v_proj_weight, |
| | static_k=static_k, |
| | static_v=static_v, |
| | average_attn_weights=average_attn_weights, |
| | ) |
| | |
| | is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads) |
| | |
| | |
| | |
| | |
| | if not is_batched: |
| | |
| | query = query.unsqueeze(1) |
| | key = key.unsqueeze(1) |
| | value = value.unsqueeze(1) |
| | if key_padding_mask is not None: |
| | key_padding_mask = key_padding_mask.unsqueeze(0) |
| | |
| | |
| | tgt_len, bsz, embed_dim = query.shape |
| | src_len, _, _ = key.shape |
| | |
| | key_padding_mask = _canonical_mask( |
| | mask=key_padding_mask, |
| | mask_name="key_padding_mask", |
| | other_type=_none_or_dtype(attn_mask), |
| | other_name="attn_mask", |
| | target_type=query.dtype |
| | ) |
| | |
| | if is_causal and attn_mask is None: |
| | raise RuntimeError( |
| | "Need attn_mask if specifying the is_causal hint. " |
| | "You may use the Transformer module method " |
| | "`generate_square_subsequent_mask` to create this mask." |
| | ) |
| | |
| | if is_causal and key_padding_mask is None and not need_weights: |
| | |
| | |
| | |
| | attn_mask = None |
| | else: |
| | attn_mask = _canonical_mask( |
| | mask=attn_mask, |
| | mask_name="attn_mask", |
| | other_type=None, |
| | other_name="", |
| | target_type=query.dtype, |
| | check_other=False, |
| | ) |
| | |
| | if key_padding_mask is not None: |
| | |
| | |
| | |
| | is_causal = False |
| | |
| | assert embed_dim == embed_dim_to_check, \ |
| | f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}" |
| | if isinstance(embed_dim, torch.Tensor): |
| | |
| | head_dim = embed_dim.div(num_heads, rounding_mode='trunc') |
| | else: |
| | head_dim = embed_dim // num_heads |
| | assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}" |
| | if use_separate_proj_weight: |
| | |
| | assert key.shape[:2] == value.shape[:2], \ |
| | f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}" |
| | else: |
| | assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}" |
| | |
| | |
| | |
| | |
| | |
| | if not use_separate_proj_weight: |
| | assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None" |
| | q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) |
| | else: |
| | assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None" |
| | assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None" |
| | assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None" |
| | if in_proj_bias is None: |
| | b_q = b_k = b_v = None |
| | else: |
| | b_q, b_k, b_v = in_proj_bias.chunk(3) |
| | q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v) |
| | |
| | |
| | |
| | if attn_mask is not None: |
| | |
| | if attn_mask.dim() == 2: |
| | correct_2d_size = (tgt_len, src_len) |
| | if attn_mask.shape != correct_2d_size: |
| | raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.") |
| | attn_mask = attn_mask.unsqueeze(0) |
| | elif attn_mask.dim() == 3: |
| | correct_3d_size = (bsz * num_heads, tgt_len, src_len) |
| | if attn_mask.shape != correct_3d_size: |
| | raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.") |
| | else: |
| | raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported") |
| | |
| | |
| | if bias_k is not None and bias_v is not None: |
| | assert static_k is None, "bias cannot be added to static key." |
| | assert static_v is None, "bias cannot be added to static value." |
| | k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) |
| | v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) |
| | if attn_mask is not None: |
| | attn_mask = pad(attn_mask, (0, 1)) |
| | if key_padding_mask is not None: |
| | key_padding_mask = pad(key_padding_mask, (0, 1)) |
| | else: |
| | assert bias_k is None |
| | assert bias_v is None |
| | |
| | |
| | |
| | |
| | q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) |
| | if static_k is None: |
| | k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1) |
| | else: |
| | |
| | assert static_k.size(0) == bsz * num_heads, \ |
| | f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}" |
| | assert static_k.size(2) == head_dim, \ |
| | f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}" |
| | k = static_k |
| | if static_v is None: |
| | v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1) |
| | else: |
| | |
| | assert static_v.size(0) == bsz * num_heads, \ |
| | f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}" |
| | assert static_v.size(2) == head_dim, \ |
| | f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}" |
| | v = static_v |
| | |
| | |
| | if add_zero_attn: |
| | zero_attn_shape = (bsz * num_heads, 1, head_dim) |
| | k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1) |
| | v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1) |
| | if attn_mask is not None: |
| | attn_mask = pad(attn_mask, (0, 1)) |
| | if key_padding_mask is not None: |
| | key_padding_mask = pad(key_padding_mask, (0, 1)) |
| | |
| | |
| | src_len = k.size(1) |
| | |
| | |
| | if key_padding_mask is not None: |
| | assert key_padding_mask.shape == (bsz, src_len), \ |
| | f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}" |
| | key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \ |
| | expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) |
| | if attn_mask is None: |
| | attn_mask = key_padding_mask |
| | else: |
| | attn_mask = attn_mask + key_padding_mask |
| | |
| | |
| | if not training: |
| | dropout_p = 0.0 |
| | |
| | |
| | |
| | |
| | |
| | if need_weights: |
| | B, Nt, E = q.shape |
| | q_scaled = q / math.sqrt(E) |
| | |
| | assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights" |
| | |
| | if attn_mask is not None: |
| | attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1)) |
| | else: |
| | attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1)) |
| | attn_output_weights = softmax(attn_output_weights, dim=-1) |
| | if dropout_p > 0.0: |
| | attn_output_weights = dropout(attn_output_weights, p=dropout_p) |
| | |
| | attn_output = torch.bmm(attn_output_weights, v) |
| | |
| | attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim) |
| | attn_output = self.out_proj(attn_output) |
| | |
| | attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1)) |
| | |
| | |
| | attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) |
| | if average_attn_weights: |
| | attn_output_weights = attn_output_weights.mean(dim=1) |
| | |
| | if not is_batched: |
| | |
| | attn_output = attn_output.squeeze(1) |
| | attn_output_weights = attn_output_weights.squeeze(0) |
| | return attn_output, attn_output_weights |
| | else: |
| | |
| | |
| | |
| | if attn_mask is not None: |
| | if attn_mask.size(0) == 1 and attn_mask.dim() == 3: |
| | attn_mask = attn_mask.unsqueeze(0) |
| | else: |
| | attn_mask = attn_mask.view(bsz, num_heads, -1, src_len) |
| | |
| | q = q.view(bsz, num_heads, tgt_len, head_dim) |
| | k = k.view(bsz, num_heads, src_len, head_dim) |
| | v = v.view(bsz, num_heads, src_len, head_dim) |
| | |
| | attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal) |
| | attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim) |
| | |
| | attn_output = self.out_proj(attn_output) |
| | |
| | attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1)) |
| | if not is_batched: |
| | |
| | attn_output = attn_output.squeeze(1) |
| | return attn_output, None |
| |
|
| |
|
| | def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor, |
| | key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int): |
| | |
| | |
| | |
| |
|
| | |
| | if query.dim() == 3: |
| | |
| | is_batched = True |
| | assert key.dim() == 3 and value.dim() == 3, \ |
| | ("For batched (3-D) `query`, expected `key` and `value` to be 3-D" |
| | f" but found {key.dim()}-D and {value.dim()}-D tensors respectively") |
| | if key_padding_mask is not None: |
| | assert key_padding_mask.dim() == 2, \ |
| | ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D" |
| | f" but found {key_padding_mask.dim()}-D tensor instead") |
| | if attn_mask is not None: |
| | assert attn_mask.dim() in (2, 3), \ |
| | ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D" |
| | f" but found {attn_mask.dim()}-D tensor instead") |
| | elif query.dim() == 2: |
| | |
| | is_batched = False |
| | assert key.dim() == 2 and value.dim() == 2, \ |
| | ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D" |
| | f" but found {key.dim()}-D and {value.dim()}-D tensors respectively") |
| |
|
| | if key_padding_mask is not None: |
| | assert key_padding_mask.dim() == 1, \ |
| | ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D" |
| | f" but found {key_padding_mask.dim()}-D tensor instead") |
| |
|
| | if attn_mask is not None: |
| | assert attn_mask.dim() in (2, 3), \ |
| | ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D" |
| | f" but found {attn_mask.dim()}-D tensor instead") |
| | if attn_mask.dim() == 3: |
| | expected_shape = (num_heads, query.shape[0], key.shape[0]) |
| | assert attn_mask.shape == expected_shape, \ |
| | (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}") |
| | else: |
| | raise AssertionError( |
| | f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor") |
| |
|
| | return is_batched |
| |
|
| |
|
| | def _canonical_mask( |
| | mask: Optional[Tensor], |
| | mask_name: str, |
| | other_type: Optional[DType], |
| | other_name: str, |
| | target_type: DType, |
| | check_other: bool = True, |
| | ) -> Optional[Tensor]: |
| |
|
| | if mask is not None: |
| | _mask_dtype = mask.dtype |
| | _mask_is_float = torch.is_floating_point(mask) |
| | if _mask_dtype != torch.bool and not _mask_is_float: |
| | raise AssertionError( |
| | f"only bool and floating types of {mask_name} are supported") |
| | if check_other and other_type is not None: |
| | if _mask_dtype != other_type: |
| | warnings.warn( |
| | f"Support for mismatched {mask_name} and {other_name} " |
| | "is deprecated. Use same type for both instead." |
| | ) |
| | if not _mask_is_float: |
| | mask = ( |
| | torch.zeros_like(mask, dtype=target_type) |
| | .masked_fill_(mask, float("-inf")) |
| | ) |
| | return mask |
| |
|
| |
|
| | def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]: |
| | if input is None: |
| | return None |
| | elif isinstance(input, torch.Tensor): |
| | return input.dtype |
| | raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor") |
| |
|
| | def _in_projection_packed( |
| | q: Tensor, |
| | k: Tensor, |
| | v: Tensor, |
| | w: Tensor, |
| | b: Optional[Tensor] = None, |
| | ) -> List[Tensor]: |
| | r""" |
| | Performs the in-projection step of the attention operation, using packed weights. |
| | Output is a triple containing projection tensors for query, key and value. |
| | Args: |
| | q, k, v: query, key and value tensors to be projected. For self-attention, |
| | these are typically the same tensor; for encoder-decoder attention, |
| | k and v are typically the same tensor. (We take advantage of these |
| | identities for performance if they are present.) Regardless, q, k and v |
| | must share a common embedding dimension; otherwise their shapes may vary. |
| | w: projection weights for q, k and v, packed into a single tensor. Weights |
| | are packed along dimension 0, in q, k, v order. |
| | b: optional projection biases for q, k and v, packed into a single tensor |
| | in q, k, v order. |
| | Shape: |
| | Inputs: |
| | - q: :math:`(..., E)` where E is the embedding dimension |
| | - k: :math:`(..., E)` where E is the embedding dimension |
| | - v: :math:`(..., E)` where E is the embedding dimension |
| | - w: :math:`(E * 3, E)` where E is the embedding dimension |
| | - b: :math:`E * 3` where E is the embedding dimension |
| | Output: |
| | - in output list :math:`[q', k', v']`, each output tensor will have the |
| | same shape as the corresponding input tensor. |
| | """ |
| | E = q.size(-1) |
| | if k is v: |
| | if q is k: |
| | |
| | proj = linear(q, w, b) |
| | |
| | proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() |
| | return proj[0], proj[1], proj[2] |
| | else: |
| | |
| | w_q, w_kv = w.split([E, E * 2]) |
| | if b is None: |
| | b_q = b_kv = None |
| | else: |
| | b_q, b_kv = b.split([E, E * 2]) |
| | q_proj = linear(q, w_q, b_q) |
| | kv_proj = linear(k, w_kv, b_kv) |
| | |
| | kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() |
| | return (q_proj, kv_proj[0], kv_proj[1]) |
| | else: |
| | w_q, w_k, w_v = w.chunk(3) |
| | if b is None: |
| | b_q = b_k = b_v = None |
| | else: |
| | b_q, b_k, b_v = b.chunk(3) |
| | return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) |
| |
|
| |
|
| | def _in_projection( |
| | q: Tensor, |
| | k: Tensor, |
| | v: Tensor, |
| | w_q: Tensor, |
| | w_k: Tensor, |
| | w_v: Tensor, |
| | b_q: Optional[Tensor] = None, |
| | b_k: Optional[Tensor] = None, |
| | b_v: Optional[Tensor] = None, |
| | ) -> Tuple[Tensor, Tensor, Tensor]: |
| | r""" |
| | Performs the in-projection step of the attention operation. This is simply |
| | a triple of linear projections, with shape constraints on the weights which |
| | ensure embedding dimension uniformity in the projected outputs. |
| | Output is a triple containing projection tensors for query, key and value. |
| | Args: |
| | q, k, v: query, key and value tensors to be projected. |
| | w_q, w_k, w_v: weights for q, k and v, respectively. |
| | b_q, b_k, b_v: optional biases for q, k and v, respectively. |
| | Shape: |
| | Inputs: |
| | - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any |
| | number of leading dimensions. |
| | - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any |
| | number of leading dimensions. |
| | - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any |
| | number of leading dimensions. |
| | - w_q: :math:`(Eq, Eq)` |
| | - w_k: :math:`(Eq, Ek)` |
| | - w_v: :math:`(Eq, Ev)` |
| | - b_q: :math:`(Eq)` |
| | - b_k: :math:`(Eq)` |
| | - b_v: :math:`(Eq)` |
| | Output: in output triple :math:`(q', k', v')`, |
| | - q': :math:`[Qdims..., Eq]` |
| | - k': :math:`[Kdims..., Eq]` |
| | - v': :math:`[Vdims..., Eq]` |
| | """ |
| | Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1) |
| | assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}" |
| | assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}" |
| | assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}" |
| | assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}" |
| | assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}" |
| | assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}" |
| | return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) |
| |
|