Spaces:
Runtime error
Runtime error
small changes by Sara
Browse files
app.py
CHANGED
|
@@ -248,13 +248,13 @@ def process_and_display(
|
|
| 248 |
use_concpet_from_file_3 = False
|
| 249 |
):
|
| 250 |
if base_image is None:
|
| 251 |
-
raise gr.Error("
|
| 252 |
|
| 253 |
if concept_image1 is None:
|
| 254 |
-
raise gr.Error("
|
| 255 |
|
| 256 |
if concept_image1 is None:
|
| 257 |
-
raise gr.Error("
|
| 258 |
|
| 259 |
modified_images = process_images(
|
| 260 |
base_image,
|
|
@@ -300,14 +300,19 @@ a photo of a person feeling terrified
|
|
| 300 |
"""
|
| 301 |
with gr.Blocks(css=css) as demo:
|
| 302 |
gr.Markdown(f"""# IP Composer π
βποΈ
|
| 303 |
-
###
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
####
|
| 308 |
-
####
|
| 309 |
-
|
| 310 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
""")
|
| 312 |
concpet_from_file_1 = gr.State()
|
| 313 |
concpet_from_file_2 = gr.State()
|
|
@@ -318,61 +323,61 @@ following the algorithm proposed in [*IP-Composer: Semantic Composition of Visua
|
|
| 318 |
with gr.Row():
|
| 319 |
with gr.Column():
|
| 320 |
base_image = gr.Image(label="Base Image (Required)", type="numpy")
|
| 321 |
-
with gr.Tab("
|
| 322 |
with gr.Group():
|
| 323 |
concept_image1 = gr.Image(label="Concept Image 1", type="numpy")
|
| 324 |
with gr.Row():
|
| 325 |
-
concept_name1 = gr.Dropdown(concept_options, label="
|
| 326 |
-
with gr.Accordion("π‘
|
| 327 |
-
gr.Markdown("1.
|
| 328 |
-
gr.Markdown("2.
|
| 329 |
-
with gr.Accordion("
|
| 330 |
gr.Markdown(example)
|
| 331 |
-
concept_file_1 = gr.File(label="
|
| 332 |
|
| 333 |
-
with gr.Tab("
|
| 334 |
with gr.Group():
|
| 335 |
concept_image2 = gr.Image(label="Concept Image 2", type="numpy")
|
| 336 |
with gr.Row():
|
| 337 |
-
concept_name2 = gr.Dropdown(concept_options, label="
|
| 338 |
-
with gr.Accordion("π‘
|
| 339 |
-
gr.Markdown("1.
|
| 340 |
-
gr.Markdown("2.
|
| 341 |
-
with gr.Accordion("
|
| 342 |
gr.Markdown(example)
|
| 343 |
-
concept_file_2 = gr.File(label="
|
| 344 |
|
| 345 |
|
| 346 |
-
with gr.Tab("
|
| 347 |
with gr.Group():
|
| 348 |
concept_image3 = gr.Image(label="Concept Image 3", type="numpy")
|
| 349 |
with gr.Row():
|
| 350 |
-
concept_name3 = gr.Dropdown(concept_options, label="
|
| 351 |
-
with gr.Accordion("π‘
|
| 352 |
-
gr.Markdown("1.
|
| 353 |
-
gr.Markdown("2.
|
| 354 |
-
with gr.Accordion("
|
| 355 |
gr.Markdown(example)
|
| 356 |
-
concept_file_3 = gr.File(label="
|
| 357 |
|
| 358 |
|
| 359 |
|
| 360 |
with gr.Accordion("Advanced options", open=False):
|
| 361 |
prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation")
|
| 362 |
-
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="
|
| 363 |
with gr.Row():
|
| 364 |
scale = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Scale")
|
| 365 |
randomize_seed = gr.Checkbox(value=True, label="Randomize seed")
|
| 366 |
seed = gr.Number(value=0, label="Seed", precision=0)
|
| 367 |
with gr.Column():
|
| 368 |
-
gr.Markdown("
|
| 369 |
with gr.Row():
|
| 370 |
-
rank1 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="
|
| 371 |
-
rank2 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="
|
| 372 |
-
rank3 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="
|
| 373 |
|
| 374 |
with gr.Column():
|
| 375 |
-
output_image = gr.Image(label="
|
| 376 |
submit_btn = gr.Button("Generate")
|
| 377 |
|
| 378 |
gr.Examples(
|
|
|
|
| 248 |
use_concpet_from_file_3 = False
|
| 249 |
):
|
| 250 |
if base_image is None:
|
| 251 |
+
raise gr.Error("Please upload a base image")
|
| 252 |
|
| 253 |
if concept_image1 is None:
|
| 254 |
+
raise gr.Error("Choose at least one concept image")
|
| 255 |
|
| 256 |
if concept_image1 is None:
|
| 257 |
+
raise gr.Error("Choose at least one concept type")
|
| 258 |
|
| 259 |
modified_images = process_images(
|
| 260 |
base_image,
|
|
|
|
| 300 |
"""
|
| 301 |
with gr.Blocks(css=css) as demo:
|
| 302 |
gr.Markdown(f"""# IP Composer π
βποΈ
|
| 303 |
+
### Compose new images with visual concepts
|
| 304 |
+
Following the algorithm proposed in [*IP-Composer: Semantic Composition of Visual Concepts* by Dorfman et al.](https://arxiv.org/pdf/2502.13951)
|
| 305 |
+
(Built on IP-Adapter)
|
| 306 |
+
|
| 307 |
+
#### π οΈ How to Use:
|
| 308 |
+
#### 1. Upload a base image
|
| 309 |
+
#### 2. Upload 1β3 concept images
|
| 310 |
+
#### 3. Select a concept type to extract from each concept image:
|
| 311 |
+
- Choose a **predefined concept type** from the dropdown (e.g. pattern, emotion, pose), **or**
|
| 312 |
+
- Upload a **file with text variations of your concept** (e.g. prompts from an LLM).
|
| 313 |
+
- π If you're uploading a **new concept**, don't forget to **adjust the "rank" value** under **Advanced Options** for better results.
|
| 314 |
+
|
| 315 |
+
[[Project page](https://ip-composer.github.io/IP-Composer/)] [[arxiv](https://arxiv.org/pdf/2502.13951)]
|
| 316 |
""")
|
| 317 |
concpet_from_file_1 = gr.State()
|
| 318 |
concpet_from_file_2 = gr.State()
|
|
|
|
| 323 |
with gr.Row():
|
| 324 |
with gr.Column():
|
| 325 |
base_image = gr.Image(label="Base Image (Required)", type="numpy")
|
| 326 |
+
with gr.Tab("Concept 1"):
|
| 327 |
with gr.Group():
|
| 328 |
concept_image1 = gr.Image(label="Concept Image 1", type="numpy")
|
| 329 |
with gr.Row():
|
| 330 |
+
concept_name1 = gr.Dropdown(concept_options, label="Concept 1", value=None, info="Pick concept type")
|
| 331 |
+
with gr.Accordion("π‘ Or use a new concept π", open=False):
|
| 332 |
+
gr.Markdown("1. Upload a file with text variations of your concept (e.g. ask an LLM)")
|
| 333 |
+
gr.Markdown("2. Prefereably with > 100 variations.")
|
| 334 |
+
with gr.Accordion("File example for the concept 'emotions'", open=False):
|
| 335 |
gr.Markdown(example)
|
| 336 |
+
concept_file_1 = gr.File(label="Concept variations", file_types=["text"])
|
| 337 |
|
| 338 |
+
with gr.Tab("Concept 2 (Optional)"):
|
| 339 |
with gr.Group():
|
| 340 |
concept_image2 = gr.Image(label="Concept Image 2", type="numpy")
|
| 341 |
with gr.Row():
|
| 342 |
+
concept_name2 = gr.Dropdown(concept_options, label="Concept 2", value=None, info="Pick concept type")
|
| 343 |
+
with gr.Accordion("π‘ Or use a new concept π", open=False):
|
| 344 |
+
gr.Markdown("1. Upload a file with text variations of your concept (e.g. ask an LLM)")
|
| 345 |
+
gr.Markdown("2. Prefereably with > 100 variations.")
|
| 346 |
+
with gr.Accordion("File example for the concept 'emotions'", open=False):
|
| 347 |
gr.Markdown(example)
|
| 348 |
+
concept_file_2 = gr.File(label="Concept variations", file_types=["text"])
|
| 349 |
|
| 350 |
|
| 351 |
+
with gr.Tab("Concept 3 (optional)"):
|
| 352 |
with gr.Group():
|
| 353 |
concept_image3 = gr.Image(label="Concept Image 3", type="numpy")
|
| 354 |
with gr.Row():
|
| 355 |
+
concept_name3 = gr.Dropdown(concept_options, label="Concept 3", value= None, info="Pick concept type")
|
| 356 |
+
with gr.Accordion("π‘ Or use a new concept π", open=False):
|
| 357 |
+
gr.Markdown("1. Upload a file with text variations of your concept (e.g. ask an LLM)")
|
| 358 |
+
gr.Markdown("2. Prefereably with > 100 variations.")
|
| 359 |
+
with gr.Accordion("File example for the concept 'emotions'", open=False):
|
| 360 |
gr.Markdown(example)
|
| 361 |
+
concept_file_3 = gr.File(label="Concept variations", file_types=["text"])
|
| 362 |
|
| 363 |
|
| 364 |
|
| 365 |
with gr.Accordion("Advanced options", open=False):
|
| 366 |
prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation")
|
| 367 |
+
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Num steps")
|
| 368 |
with gr.Row():
|
| 369 |
scale = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Scale")
|
| 370 |
randomize_seed = gr.Checkbox(value=True, label="Randomize seed")
|
| 371 |
seed = gr.Number(value=0, label="Seed", precision=0)
|
| 372 |
with gr.Column():
|
| 373 |
+
gr.Markdown("If a concept is not showing enough, try to increase the rank")
|
| 374 |
with gr.Row():
|
| 375 |
+
rank1 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="Rank concept 1")
|
| 376 |
+
rank2 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="Rank concept 2")
|
| 377 |
+
rank3 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="Rank concept 3")
|
| 378 |
|
| 379 |
with gr.Column():
|
| 380 |
+
output_image = gr.Image(label="Composed output", show_label=True)
|
| 381 |
submit_btn = gr.Button("Generate")
|
| 382 |
|
| 383 |
gr.Examples(
|