Spaces:
Sleeping
Sleeping
up
Browse files- README.md +1 -1
- app.py +6 -6
- optimization.py +9 -22
- qwenimage/__init__.py +0 -0
- qwenimage/qwen_fa3_processor.py +0 -142
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
emoji: 🖼️
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: green
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Flux.1-Dev Compiled Graph
|
| 3 |
emoji: 🖼️
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: green
|
app.py
CHANGED
|
@@ -1,8 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
import torch
|
| 4 |
-
from diffusers import
|
| 5 |
-
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 6 |
from optimization import compile_transformer
|
| 7 |
from hub_utils import _push_compiled_graph_to_hub
|
| 8 |
from huggingface_hub import whoami
|
|
@@ -12,8 +11,9 @@ dtype = torch.bfloat16
|
|
| 12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 13 |
|
| 14 |
# Load the model pipeline
|
| 15 |
-
pipe =
|
| 16 |
-
|
|
|
|
| 17 |
|
| 18 |
@spaces.GPU(duration=120)
|
| 19 |
def push_to_hub(repo_id, filename, oauth_token: gr.OAuthToken):
|
|
@@ -50,8 +50,8 @@ css="""
|
|
| 50 |
"""
|
| 51 |
with gr.Blocks(css=css) as demo:
|
| 52 |
with gr.Column(elem_id="col-container"):
|
| 53 |
-
gr.Markdown("## Compile
|
| 54 |
-
gr.Markdown("Enter a **repo_id** and **filename**. This repo automatically compiles the [
|
| 55 |
|
| 56 |
repo_id = gr.Textbox(label="repo_id", placeholder="e.g. sayakpaul/qwen-aot")
|
| 57 |
filename = gr.Textbox(label="filename", placeholder="e.g. compiled.pt2")
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
import torch
|
| 4 |
+
from diffusers import DiffusionPipeline
|
|
|
|
| 5 |
from optimization import compile_transformer
|
| 6 |
from hub_utils import _push_compiled_graph_to_hub
|
| 7 |
from huggingface_hub import whoami
|
|
|
|
| 11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
|
| 13 |
# Load the model pipeline
|
| 14 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 15 |
+
"black-forest-labs/Flux.1-Dev", torch_dtype=dtype
|
| 16 |
+
).to(device)
|
| 17 |
|
| 18 |
@spaces.GPU(duration=120)
|
| 19 |
def push_to_hub(repo_id, filename, oauth_token: gr.OAuthToken):
|
|
|
|
| 50 |
"""
|
| 51 |
with gr.Blocks(css=css) as demo:
|
| 52 |
with gr.Column(elem_id="col-container"):
|
| 53 |
+
gr.Markdown("## Compile [Flux.1-Dev](https://hf.co/black-forest-labs/Flux.1-Dev) graph ahead of time & push to the Hub")
|
| 54 |
+
gr.Markdown("Enter a **repo_id** and **filename**. This repo automatically compiles the Flux.1-Dev model ahead of time. Read more about this in [this post](https://huggingface.co/blog/zerogpu-aoti).")
|
| 55 |
|
| 56 |
repo_id = gr.Textbox(label="repo_id", placeholder="e.g. sayakpaul/qwen-aot")
|
| 57 |
filename = gr.Textbox(label="filename", placeholder="e.g. compiled.pt2")
|
optimization.py
CHANGED
|
@@ -7,27 +7,15 @@ from torch.utils._pytree import tree_map
|
|
| 7 |
|
| 8 |
P = ParamSpec('P')
|
| 9 |
|
| 10 |
-
|
| 11 |
-
TRANSFORMER_TEXT_SEQ_LENGTH_DIM = torch.export.Dim('text_seq_length')
|
| 12 |
|
|
|
|
|
|
|
| 13 |
TRANSFORMER_DYNAMIC_SHAPES = {
|
| 14 |
-
'hidden_states': {
|
| 15 |
-
|
| 16 |
-
},
|
| 17 |
-
'encoder_hidden_states': {
|
| 18 |
-
1: TRANSFORMER_TEXT_SEQ_LENGTH_DIM,
|
| 19 |
-
},
|
| 20 |
-
'encoder_hidden_states_mask': {
|
| 21 |
-
1: TRANSFORMER_TEXT_SEQ_LENGTH_DIM,
|
| 22 |
-
},
|
| 23 |
-
'image_rotary_emb': ({
|
| 24 |
-
0: TRANSFORMER_IMAGE_SEQ_LENGTH_DIM,
|
| 25 |
-
}, {
|
| 26 |
-
0: TRANSFORMER_TEXT_SEQ_LENGTH_DIM,
|
| 27 |
-
}),
|
| 28 |
}
|
| 29 |
|
| 30 |
-
|
| 31 |
INDUCTOR_CONFIGS = {
|
| 32 |
'conv_1x1_as_mm': True,
|
| 33 |
'epilogue_fusion': False,
|
|
@@ -37,21 +25,20 @@ INDUCTOR_CONFIGS = {
|
|
| 37 |
'triton.cudagraphs': True,
|
| 38 |
}
|
| 39 |
|
| 40 |
-
|
| 41 |
def compile_transformer(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
|
| 42 |
@spaces.GPU(duration=1500)
|
| 43 |
def f():
|
| 44 |
with spaces.aoti_capture(pipeline.transformer) as call:
|
| 45 |
pipeline(*args, **kwargs)
|
| 46 |
|
| 47 |
-
dynamic_shapes = tree_map(lambda
|
| 48 |
dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
|
| 49 |
|
| 50 |
exported = torch.export.export(
|
| 51 |
-
mod=pipeline.transformer,
|
| 52 |
-
args=call.args,
|
| 53 |
kwargs=call.kwargs,
|
| 54 |
-
dynamic_shapes=dynamic_shapes
|
| 55 |
)
|
| 56 |
return spaces.aoti_compile(exported, INDUCTOR_CONFIGS)
|
| 57 |
|
|
|
|
| 7 |
|
| 8 |
P = ParamSpec('P')
|
| 9 |
|
| 10 |
+
TRANSFORMER_HIDDEN_DIM = torch.export.Dim('hidden', min=4096, max=8212)
|
|
|
|
| 11 |
|
| 12 |
+
# Specific to Flux. More about this is available in
|
| 13 |
+
# https://huggingface.co/blog/zerogpu-aoti
|
| 14 |
TRANSFORMER_DYNAMIC_SHAPES = {
|
| 15 |
+
'hidden_states': {1: TRANSFORMER_HIDDEN_DIM},
|
| 16 |
+
'img_ids': {0: TRANSFORMER_HIDDEN_DIM},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
}
|
| 18 |
|
|
|
|
| 19 |
INDUCTOR_CONFIGS = {
|
| 20 |
'conv_1x1_as_mm': True,
|
| 21 |
'epilogue_fusion': False,
|
|
|
|
| 25 |
'triton.cudagraphs': True,
|
| 26 |
}
|
| 27 |
|
|
|
|
| 28 |
def compile_transformer(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
|
| 29 |
@spaces.GPU(duration=1500)
|
| 30 |
def f():
|
| 31 |
with spaces.aoti_capture(pipeline.transformer) as call:
|
| 32 |
pipeline(*args, **kwargs)
|
| 33 |
|
| 34 |
+
dynamic_shapes = tree_map(lambda v: None, call.kwargs)
|
| 35 |
dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
|
| 36 |
|
| 37 |
exported = torch.export.export(
|
| 38 |
+
mod=pipeline.transformer,
|
| 39 |
+
args=call.args,
|
| 40 |
kwargs=call.kwargs,
|
| 41 |
+
dynamic_shapes=dynamic_shapes
|
| 42 |
)
|
| 43 |
return spaces.aoti_compile(exported, INDUCTOR_CONFIGS)
|
| 44 |
|
qwenimage/__init__.py
DELETED
|
File without changes
|
qwenimage/qwen_fa3_processor.py
DELETED
|
@@ -1,142 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Paired with a good language model. Thanks!
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import torch
|
| 6 |
-
from typing import Optional, Tuple
|
| 7 |
-
from diffusers.models.transformers.transformer_qwenimage import apply_rotary_emb_qwen
|
| 8 |
-
|
| 9 |
-
try:
|
| 10 |
-
from kernels import get_kernel
|
| 11 |
-
_k = get_kernel("kernels-community/vllm-flash-attn3")
|
| 12 |
-
_flash_attn_func = _k.flash_attn_func
|
| 13 |
-
except Exception as e:
|
| 14 |
-
_flash_attn_func = None
|
| 15 |
-
_kernels_err = e
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def _ensure_fa3_available():
|
| 19 |
-
if _flash_attn_func is None:
|
| 20 |
-
raise ImportError(
|
| 21 |
-
"FlashAttention-3 via Hugging Face `kernels` is required. "
|
| 22 |
-
"Tried `get_kernel('kernels-community/vllm-flash-attn3')` and failed with:\n"
|
| 23 |
-
f"{_kernels_err}"
|
| 24 |
-
)
|
| 25 |
-
|
| 26 |
-
@torch.library.custom_op("flash::flash_attn_func", mutates_args=())
|
| 27 |
-
def flash_attn_func(
|
| 28 |
-
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, causal: bool = False
|
| 29 |
-
) -> torch.Tensor:
|
| 30 |
-
outputs, lse = _flash_attn_func(q, k, v, causal=causal)
|
| 31 |
-
return outputs
|
| 32 |
-
|
| 33 |
-
@flash_attn_func.register_fake
|
| 34 |
-
def _(q, k, v, **kwargs):
|
| 35 |
-
# two outputs:
|
| 36 |
-
# 1. output: (batch, seq_len, num_heads, head_dim)
|
| 37 |
-
# 2. softmax_lse: (batch, num_heads, seq_len) with dtype=torch.float32
|
| 38 |
-
meta_q = torch.empty_like(q).contiguous()
|
| 39 |
-
return meta_q #, q.new_empty((q.size(0), q.size(2), q.size(1)), dtype=torch.float32)
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
class QwenDoubleStreamAttnProcessorFA3:
|
| 43 |
-
"""
|
| 44 |
-
FA3-based attention processor for Qwen double-stream architecture.
|
| 45 |
-
Computes joint attention over concatenated [text, image] streams using vLLM FlashAttention-3
|
| 46 |
-
accessed via Hugging Face `kernels`.
|
| 47 |
-
|
| 48 |
-
Notes / limitations:
|
| 49 |
-
- General attention masks are not supported here (FA3 path). `is_causal=False` and no arbitrary mask.
|
| 50 |
-
- Optional windowed attention / sink tokens / softcap can be plumbed through if you use those features.
|
| 51 |
-
- Expects an available `apply_rotary_emb_qwen` in scope (same as your non-FA3 processor).
|
| 52 |
-
"""
|
| 53 |
-
|
| 54 |
-
_attention_backend = "fa3" # for parity with your other processors, not used internally
|
| 55 |
-
|
| 56 |
-
def __init__(self):
|
| 57 |
-
_ensure_fa3_available()
|
| 58 |
-
|
| 59 |
-
@torch.no_grad()
|
| 60 |
-
def __call__(
|
| 61 |
-
self,
|
| 62 |
-
attn, # Attention module with to_q/to_k/to_v/add_*_proj, norms, to_out, to_add_out, and .heads
|
| 63 |
-
hidden_states: torch.FloatTensor, # (B, S_img, D_model) image stream
|
| 64 |
-
encoder_hidden_states: torch.FloatTensor = None, # (B, S_txt, D_model) text stream
|
| 65 |
-
encoder_hidden_states_mask: torch.FloatTensor = None, # unused in FA3 path
|
| 66 |
-
attention_mask: Optional[torch.FloatTensor] = None, # unused in FA3 path
|
| 67 |
-
image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # (img_freqs, txt_freqs)
|
| 68 |
-
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
| 69 |
-
if encoder_hidden_states is None:
|
| 70 |
-
raise ValueError("QwenDoubleStreamAttnProcessorFA3 requires encoder_hidden_states (text stream).")
|
| 71 |
-
if attention_mask is not None:
|
| 72 |
-
# FA3 kernel path here does not consume arbitrary masks; fail fast to avoid silent correctness issues.
|
| 73 |
-
raise NotImplementedError("attention_mask is not supported in this FA3 implementation.")
|
| 74 |
-
|
| 75 |
-
_ensure_fa3_available()
|
| 76 |
-
|
| 77 |
-
B, S_img, _ = hidden_states.shape
|
| 78 |
-
S_txt = encoder_hidden_states.shape[1]
|
| 79 |
-
|
| 80 |
-
# ---- QKV projections (image/sample stream) ----
|
| 81 |
-
img_q = attn.to_q(hidden_states) # (B, S_img, D)
|
| 82 |
-
img_k = attn.to_k(hidden_states)
|
| 83 |
-
img_v = attn.to_v(hidden_states)
|
| 84 |
-
|
| 85 |
-
# ---- QKV projections (text/context stream) ----
|
| 86 |
-
txt_q = attn.add_q_proj(encoder_hidden_states) # (B, S_txt, D)
|
| 87 |
-
txt_k = attn.add_k_proj(encoder_hidden_states)
|
| 88 |
-
txt_v = attn.add_v_proj(encoder_hidden_states)
|
| 89 |
-
|
| 90 |
-
# ---- Reshape to (B, S, H, D_h) ----
|
| 91 |
-
H = attn.heads
|
| 92 |
-
img_q = img_q.unflatten(-1, (H, -1))
|
| 93 |
-
img_k = img_k.unflatten(-1, (H, -1))
|
| 94 |
-
img_v = img_v.unflatten(-1, (H, -1))
|
| 95 |
-
|
| 96 |
-
txt_q = txt_q.unflatten(-1, (H, -1))
|
| 97 |
-
txt_k = txt_k.unflatten(-1, (H, -1))
|
| 98 |
-
txt_v = txt_v.unflatten(-1, (H, -1))
|
| 99 |
-
|
| 100 |
-
# ---- Q/K normalization (per your module contract) ----
|
| 101 |
-
if getattr(attn, "norm_q", None) is not None:
|
| 102 |
-
img_q = attn.norm_q(img_q)
|
| 103 |
-
if getattr(attn, "norm_k", None) is not None:
|
| 104 |
-
img_k = attn.norm_k(img_k)
|
| 105 |
-
if getattr(attn, "norm_added_q", None) is not None:
|
| 106 |
-
txt_q = attn.norm_added_q(txt_q)
|
| 107 |
-
if getattr(attn, "norm_added_k", None) is not None:
|
| 108 |
-
txt_k = attn.norm_added_k(txt_k)
|
| 109 |
-
|
| 110 |
-
# ---- RoPE (Qwen variant) ----
|
| 111 |
-
if image_rotary_emb is not None:
|
| 112 |
-
img_freqs, txt_freqs = image_rotary_emb
|
| 113 |
-
# expects tensors shaped (B, S, H, D_h)
|
| 114 |
-
img_q = apply_rotary_emb_qwen(img_q, img_freqs, use_real=False)
|
| 115 |
-
img_k = apply_rotary_emb_qwen(img_k, img_freqs, use_real=False)
|
| 116 |
-
txt_q = apply_rotary_emb_qwen(txt_q, txt_freqs, use_real=False)
|
| 117 |
-
txt_k = apply_rotary_emb_qwen(txt_k, txt_freqs, use_real=False)
|
| 118 |
-
|
| 119 |
-
# ---- Joint attention over [text, image] along sequence axis ----
|
| 120 |
-
# Shapes: (B, S_total, H, D_h)
|
| 121 |
-
q = torch.cat([txt_q, img_q], dim=1)
|
| 122 |
-
k = torch.cat([txt_k, img_k], dim=1)
|
| 123 |
-
v = torch.cat([txt_v, img_v], dim=1)
|
| 124 |
-
|
| 125 |
-
# FlashAttention-3 path expects (B, S, H, D_h) and returns (out, softmax_lse)
|
| 126 |
-
out = flash_attn_func(q, k, v, causal=False) # out: (B, S_total, H, D_h)
|
| 127 |
-
|
| 128 |
-
# ---- Back to (B, S, D_model) ----
|
| 129 |
-
out = out.flatten(2, 3).to(q.dtype)
|
| 130 |
-
|
| 131 |
-
# Split back to text / image segments
|
| 132 |
-
txt_attn_out = out[:, :S_txt, :]
|
| 133 |
-
img_attn_out = out[:, S_txt:, :]
|
| 134 |
-
|
| 135 |
-
# ---- Output projections ----
|
| 136 |
-
img_attn_out = attn.to_out[0](img_attn_out)
|
| 137 |
-
if len(attn.to_out) > 1:
|
| 138 |
-
img_attn_out = attn.to_out[1](img_attn_out) # dropout if present
|
| 139 |
-
|
| 140 |
-
txt_attn_out = attn.to_add_out(txt_attn_out)
|
| 141 |
-
|
| 142 |
-
return img_attn_out, txt_attn_out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|