Spaces:
Running
on
Zero
Running
on
Zero
Update Gradio app with multiple files
Browse files- app.py +36 -37
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
import torch
|
|
@@ -29,11 +30,11 @@ pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
|
|
| 29 |
torch_dtype=dtype,
|
| 30 |
device_map='cuda'),torch_dtype=dtype).to(device)
|
| 31 |
|
| 32 |
-
pipe.load_lora_weights("
|
| 33 |
-
weight_name="
|
| 34 |
-
adapter_name="
|
| 35 |
-
pipe.set_adapters(["
|
| 36 |
-
pipe.fuse_lora(adapter_names=["
|
| 37 |
pipe.unload_lora_weights()
|
| 38 |
|
| 39 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
@@ -44,7 +45,7 @@ optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB",
|
|
| 44 |
MAX_SEED = np.iinfo(np.int32).max
|
| 45 |
|
| 46 |
@spaces.GPU
|
| 47 |
-
def
|
| 48 |
image,
|
| 49 |
seed,
|
| 50 |
randomize_seed,
|
|
@@ -54,7 +55,7 @@ def convert_to_anime(
|
|
| 54 |
width,
|
| 55 |
progress=gr.Progress(track_tqdm=True)
|
| 56 |
):
|
| 57 |
-
prompt = "
|
| 58 |
|
| 59 |
if randomize_seed:
|
| 60 |
seed = random.randint(0, MAX_SEED)
|
|
@@ -81,7 +82,7 @@ def convert_to_anime(
|
|
| 81 |
num_images_per_prompt=1,
|
| 82 |
).images[0]
|
| 83 |
|
| 84 |
-
return result, seed
|
| 85 |
|
| 86 |
|
| 87 |
# --- UI ---
|
|
@@ -153,10 +154,10 @@ def update_dimensions_on_upload(image):
|
|
| 153 |
|
| 154 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
| 155 |
with gr.Column(elem_id="col-container"):
|
| 156 |
-
gr.Markdown("#
|
| 157 |
gr.Markdown(
|
| 158 |
"""
|
| 159 |
-
|
| 160 |
<br>
|
| 161 |
<div style='text-align: center; margin-top: 1rem;'>
|
| 162 |
<a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank' style='color: #0071e3; text-decoration: none; font-weight: 500;'>Built with anycoder</a>
|
|
@@ -165,30 +166,28 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
| 165 |
elem_id="description"
|
| 166 |
)
|
| 167 |
|
| 168 |
-
with gr.
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
elem_classes="image-container"
|
| 191 |
-
)
|
| 192 |
|
| 193 |
inputs = [
|
| 194 |
image, seed, randomize_seed, true_guidance_scale,
|
|
@@ -196,9 +195,9 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
| 196 |
]
|
| 197 |
outputs = [result, seed]
|
| 198 |
|
| 199 |
-
#
|
| 200 |
-
|
| 201 |
-
fn=
|
| 202 |
inputs=inputs,
|
| 203 |
outputs=outputs
|
| 204 |
)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from gradio_image_slider import ImageSlider
|
| 3 |
import numpy as np
|
| 4 |
import random
|
| 5 |
import torch
|
|
|
|
| 30 |
torch_dtype=dtype,
|
| 31 |
device_map='cuda'),torch_dtype=dtype).to(device)
|
| 32 |
|
| 33 |
+
pipe.load_lora_weights("vafipas663/Qwen-Edit-2509-Upscale-LoRA",
|
| 34 |
+
weight_name="qwen-edit-enhance_64-v3_000001500.safetensors",
|
| 35 |
+
adapter_name="upscale")
|
| 36 |
+
pipe.set_adapters(["upscale"], adapter_weights=[1.])
|
| 37 |
+
pipe.fuse_lora(adapter_names=["upscale"], lora_scale=1.0)
|
| 38 |
pipe.unload_lora_weights()
|
| 39 |
|
| 40 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
|
|
| 45 |
MAX_SEED = np.iinfo(np.int32).max
|
| 46 |
|
| 47 |
@spaces.GPU
|
| 48 |
+
def upscale_image(
|
| 49 |
image,
|
| 50 |
seed,
|
| 51 |
randomize_seed,
|
|
|
|
| 55 |
width,
|
| 56 |
progress=gr.Progress(track_tqdm=True)
|
| 57 |
):
|
| 58 |
+
prompt = "Upscale and enhance this image with high quality details"
|
| 59 |
|
| 60 |
if randomize_seed:
|
| 61 |
seed = random.randint(0, MAX_SEED)
|
|
|
|
| 82 |
num_images_per_prompt=1,
|
| 83 |
).images[0]
|
| 84 |
|
| 85 |
+
return (image, result), seed
|
| 86 |
|
| 87 |
|
| 88 |
# --- UI ---
|
|
|
|
| 154 |
|
| 155 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
| 156 |
with gr.Column(elem_id="col-container"):
|
| 157 |
+
gr.Markdown("# 🔍 Image Upscaler", elem_id="title")
|
| 158 |
gr.Markdown(
|
| 159 |
"""
|
| 160 |
+
Upscale and enhance your images with AI-powered quality improvement ✨
|
| 161 |
<br>
|
| 162 |
<div style='text-align: center; margin-top: 1rem;'>
|
| 163 |
<a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank' style='color: #0071e3; text-decoration: none; font-weight: 500;'>Built with anycoder</a>
|
|
|
|
| 166 |
elem_id="description"
|
| 167 |
)
|
| 168 |
|
| 169 |
+
with gr.Column():
|
| 170 |
+
image = gr.Image(
|
| 171 |
+
label="Upload Image",
|
| 172 |
+
type="pil",
|
| 173 |
+
elem_classes="image-container"
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 177 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 178 |
+
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 179 |
+
true_guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
|
| 180 |
+
num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=40, step=1, value=4)
|
| 181 |
+
height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024, visible=False)
|
| 182 |
+
width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024, visible=False)
|
| 183 |
+
|
| 184 |
+
upscale_btn = gr.Button("Upscale Image", variant="primary", elem_id="convert-btn", size="lg")
|
| 185 |
+
|
| 186 |
+
result = ImageSlider(
|
| 187 |
+
label="Before / After",
|
| 188 |
+
interactive=False,
|
| 189 |
+
elem_classes="image-container"
|
| 190 |
+
)
|
|
|
|
|
|
|
| 191 |
|
| 192 |
inputs = [
|
| 193 |
image, seed, randomize_seed, true_guidance_scale,
|
|
|
|
| 195 |
]
|
| 196 |
outputs = [result, seed]
|
| 197 |
|
| 198 |
+
# Upscale button click
|
| 199 |
+
upscale_btn.click(
|
| 200 |
+
fn=upscale_image,
|
| 201 |
inputs=inputs,
|
| 202 |
outputs=outputs
|
| 203 |
)
|
requirements.txt
CHANGED
|
@@ -7,4 +7,4 @@ dashscope
|
|
| 7 |
kernels
|
| 8 |
torchvision
|
| 9 |
peft
|
| 10 |
-
torchao==0.11.0
|
|
|
|
| 7 |
kernels
|
| 8 |
torchvision
|
| 9 |
peft
|
| 10 |
+
torchao==0.11.0
|