Spaces:
Running
on
Zero
Running
on
Zero
Update app_local.py
Browse files- app_local.py +30 -19
app_local.py
CHANGED
|
@@ -15,7 +15,7 @@ import math
|
|
| 15 |
import json # Added json import
|
| 16 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 17 |
import logging
|
| 18 |
-
|
| 19 |
|
| 20 |
#############################
|
| 21 |
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
|
|
@@ -46,14 +46,16 @@ rewriter_model = AutoModelForCausalLM.from_pretrained(
|
|
| 46 |
device_map="auto",
|
| 47 |
quantization_config=bnb_config,
|
| 48 |
)
|
| 49 |
-
|
| 50 |
def get_fresh_presets():
|
| 51 |
-
|
| 52 |
-
|
|
|
|
| 53 |
|
|
|
|
|
|
|
|
|
|
| 54 |
# Store original presets for reference
|
| 55 |
ORIGINAL_PRESETS = deepcopy(PRESETS)
|
| 56 |
-
session_presets = get_fresh_presets()
|
| 57 |
# Preload enhancement model at startup
|
| 58 |
logger.info("🔄 Loading prompt enhancement model...")
|
| 59 |
rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
|
|
@@ -300,11 +302,15 @@ def update_prompt_preview(preset_type, base_prompt):
|
|
| 300 |
|
| 301 |
def update_preset_prompt_textbox(preset_type, p1, p2, p3, p4):
|
| 302 |
if preset_type and preset_type in preset_state.value:
|
| 303 |
-
|
| 304 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
return update_prompt_preview_with_presets(preset_type, prompt.value, preset_state.value)
|
| 306 |
return "Select a preset first."
|
| 307 |
-
|
| 308 |
def update_prompt_preview_with_presets(preset_type, base_prompt, custom_presets):
|
| 309 |
if preset_type and preset_type in custom_presets:
|
| 310 |
preset = custom_presets[preset_type]
|
|
@@ -499,14 +505,7 @@ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Ligh
|
|
| 499 |
preset_prompts_state = gr.State(value=[])
|
| 500 |
# preset_prompts_state = gr.State(value=["", "", "", ""])
|
| 501 |
preset_state = gr.State(value=get_fresh_presets())
|
| 502 |
-
|
| 503 |
-
gr.Markdown("""
|
| 504 |
-
<div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
|
| 505 |
-
<h1 style="margin-bottom: 5px;">⚡️ Qwen-Image-Edit Lightning</h1>
|
| 506 |
-
<p>✨ 4-step inferencing with lightx2v's LoRA.</p>
|
| 507 |
-
<p>📝 Local Prompt Enhancement, Batched Multi-image Generation, 🎨 Preset Batches</p>
|
| 508 |
-
</div>
|
| 509 |
-
""")
|
| 510 |
|
| 511 |
with gr.Row():
|
| 512 |
# Input Column
|
|
@@ -540,7 +539,8 @@ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Ligh
|
|
| 540 |
preset_prompt_3 = gr.Textbox(label="Prompt 3", lines=1, value="")
|
| 541 |
preset_prompt_4 = gr.Textbox(label="Prompt 4", lines=1, value="")
|
| 542 |
update_preset_button = gr.Button("Update Preset", variant="secondary")
|
| 543 |
-
|
|
|
|
| 544 |
|
| 545 |
|
| 546 |
# Add prompt preview component
|
|
@@ -609,14 +609,21 @@ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Ligh
|
|
| 609 |
columns=2,
|
| 610 |
height="60vh",
|
| 611 |
object_fit="contain",
|
| 612 |
-
preview=True,
|
| 613 |
container=True
|
| 614 |
)
|
| 615 |
prompt_info = gr.HTML(
|
| 616 |
value="<div style='padding:15px; margin-top:15px'>"
|
| 617 |
"Hint: depending on the original image, prompt quality, and complexity, you can often get away with 3 steps, even 2 steps without much loss in quality. </div>"
|
| 618 |
)
|
| 619 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 620 |
def show_preset_editor(preset_type):
|
| 621 |
if preset_type and preset_type in preset_state.value:
|
| 622 |
preset = preset_state.value[preset_type]
|
|
@@ -718,10 +725,14 @@ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Ligh
|
|
| 718 |
inputs=inputs,
|
| 719 |
outputs=outputs
|
| 720 |
)
|
|
|
|
|
|
|
|
|
|
| 721 |
prompt.submit(
|
| 722 |
fn=infer,
|
| 723 |
inputs=inputs,
|
| 724 |
outputs=outputs
|
| 725 |
)
|
|
|
|
| 726 |
|
| 727 |
demo.queue(max_size=5).launch()
|
|
|
|
| 15 |
import json # Added json import
|
| 16 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 17 |
import logging
|
| 18 |
+
import copy
|
| 19 |
|
| 20 |
#############################
|
| 21 |
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
|
|
|
|
| 46 |
device_map="auto",
|
| 47 |
quantization_config=bnb_config,
|
| 48 |
)
|
|
|
|
| 49 |
def get_fresh_presets():
|
| 50 |
+
return copy.deepcopy(PRESETS)
|
| 51 |
+
|
| 52 |
+
preset_state = gr.State(value=get_fresh_presets())
|
| 53 |
|
| 54 |
+
def reset_presets():
|
| 55 |
+
return get_fresh_presets()
|
| 56 |
+
|
| 57 |
# Store original presets for reference
|
| 58 |
ORIGINAL_PRESETS = deepcopy(PRESETS)
|
|
|
|
| 59 |
# Preload enhancement model at startup
|
| 60 |
logger.info("🔄 Loading prompt enhancement model...")
|
| 61 |
rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
|
|
|
|
| 302 |
|
| 303 |
def update_preset_prompt_textbox(preset_type, p1, p2, p3, p4):
|
| 304 |
if preset_type and preset_type in preset_state.value:
|
| 305 |
+
# Build new preset instead of mutating in place
|
| 306 |
+
new_preset = {
|
| 307 |
+
**preset_state.value[preset_type],
|
| 308 |
+
"prompts": [p1, p2, p3, p4]
|
| 309 |
+
}
|
| 310 |
+
preset_state.value[preset_type] = new_preset
|
| 311 |
return update_prompt_preview_with_presets(preset_type, prompt.value, preset_state.value)
|
| 312 |
return "Select a preset first."
|
| 313 |
+
|
| 314 |
def update_prompt_preview_with_presets(preset_type, base_prompt, custom_presets):
|
| 315 |
if preset_type and preset_type in custom_presets:
|
| 316 |
preset = custom_presets[preset_type]
|
|
|
|
| 505 |
preset_prompts_state = gr.State(value=[])
|
| 506 |
# preset_prompts_state = gr.State(value=["", "", "", ""])
|
| 507 |
preset_state = gr.State(value=get_fresh_presets())
|
| 508 |
+
gr.Markdown("## ⚡️ Qwen-Image-Edit Lightning Presets")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 509 |
|
| 510 |
with gr.Row():
|
| 511 |
# Input Column
|
|
|
|
| 539 |
preset_prompt_3 = gr.Textbox(label="Prompt 3", lines=1, value="")
|
| 540 |
preset_prompt_4 = gr.Textbox(label="Prompt 4", lines=1, value="")
|
| 541 |
update_preset_button = gr.Button("Update Preset", variant="secondary")
|
| 542 |
+
reset_button = gr.Button("Reset Presets", variant="stop")
|
| 543 |
+
|
| 544 |
|
| 545 |
|
| 546 |
# Add prompt preview component
|
|
|
|
| 609 |
columns=2,
|
| 610 |
height="60vh",
|
| 611 |
object_fit="contain",
|
|
|
|
| 612 |
container=True
|
| 613 |
)
|
| 614 |
prompt_info = gr.HTML(
|
| 615 |
value="<div style='padding:15px; margin-top:15px'>"
|
| 616 |
"Hint: depending on the original image, prompt quality, and complexity, you can often get away with 3 steps, even 2 steps without much loss in quality. </div>"
|
| 617 |
)
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
gr.Markdown("""
|
| 621 |
+
<div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
|
| 622 |
+
<p>✨ 4-step inferencing with lightx2v's LoRA.</p>
|
| 623 |
+
<p>📝 Local Prompt Enhancement, Batched Multi-image Generation, 🎨 Preset Batches</p>
|
| 624 |
+
</div>
|
| 625 |
+
""")
|
| 626 |
+
|
| 627 |
def show_preset_editor(preset_type):
|
| 628 |
if preset_type and preset_type in preset_state.value:
|
| 629 |
preset = preset_state.value[preset_type]
|
|
|
|
| 725 |
inputs=inputs,
|
| 726 |
outputs=outputs
|
| 727 |
)
|
| 728 |
+
# .then(
|
| 729 |
+
# fn=reset_presets, outputs=preset_state
|
| 730 |
+
# )
|
| 731 |
prompt.submit(
|
| 732 |
fn=infer,
|
| 733 |
inputs=inputs,
|
| 734 |
outputs=outputs
|
| 735 |
)
|
| 736 |
+
reset_button.click(fn=reset_presets, outputs=preset_state)
|
| 737 |
|
| 738 |
demo.queue(max_size=5).launch()
|