Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -218,8 +218,36 @@ def encode_image(pil_image):
|
|
| 218 |
dtype = torch.bfloat16
|
| 219 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 220 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
# Load the model pipeline
|
| 222 |
-
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
# --- UI Constants and Helpers ---
|
| 225 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -307,7 +335,10 @@ with gr.Blocks(css=css) as demo:
|
|
| 307 |
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.")
|
| 308 |
with gr.Row():
|
| 309 |
with gr.Column():
|
| 310 |
-
input_images = gr.Gallery(label="Input Images",
|
|
|
|
|
|
|
|
|
|
| 311 |
|
| 312 |
# result = gr.Image(label="Result", show_label=False, type="pil")
|
| 313 |
result = gr.Gallery(label="Result", show_label=False, type="pil")
|
|
@@ -340,15 +371,15 @@ with gr.Blocks(css=css) as demo:
|
|
| 340 |
minimum=1.0,
|
| 341 |
maximum=10.0,
|
| 342 |
step=0.1,
|
| 343 |
-
value=
|
| 344 |
)
|
| 345 |
|
| 346 |
num_inference_steps = gr.Slider(
|
| 347 |
label="Number of inference steps",
|
| 348 |
minimum=1,
|
| 349 |
-
maximum=
|
| 350 |
step=1,
|
| 351 |
-
value=
|
| 352 |
)
|
| 353 |
|
| 354 |
height = gr.Slider(
|
|
|
|
| 218 |
dtype = torch.bfloat16
|
| 219 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 220 |
|
| 221 |
+
# Scheduler configuration for Lightning
|
| 222 |
+
scheduler_config = {
|
| 223 |
+
"base_image_seq_len": 256,
|
| 224 |
+
"base_shift": math.log(3),
|
| 225 |
+
"invert_sigmas": False,
|
| 226 |
+
"max_image_seq_len": 8192,
|
| 227 |
+
"max_shift": math.log(3),
|
| 228 |
+
"num_train_timesteps": 1000,
|
| 229 |
+
"shift": 1.0,
|
| 230 |
+
"shift_terminal": None,
|
| 231 |
+
"stochastic_sampling": False,
|
| 232 |
+
"time_shift_type": "exponential",
|
| 233 |
+
"use_beta_sigmas": False,
|
| 234 |
+
"use_dynamic_shifting": True,
|
| 235 |
+
"use_exponential_sigmas": False,
|
| 236 |
+
"use_karras_sigmas": False,
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
# Initialize scheduler with Lightning config
|
| 240 |
+
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
| 241 |
+
|
| 242 |
# Load the model pipeline
|
| 243 |
+
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
|
| 244 |
+
scheduler=scheduler,
|
| 245 |
+
torch_dtype=dtype).to(device)
|
| 246 |
+
pipe.load_lora_weights(
|
| 247 |
+
"lightx2v/Qwen-Image-Lightning",
|
| 248 |
+
weight_name="Qwen-Image-Lightning-8steps-V2.0-bf16.safetensors"
|
| 249 |
+
)
|
| 250 |
+
pipe.fuse_lora()
|
| 251 |
|
| 252 |
# --- UI Constants and Helpers ---
|
| 253 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 335 |
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.")
|
| 336 |
with gr.Row():
|
| 337 |
with gr.Column():
|
| 338 |
+
input_images = gr.Gallery(label="Input Images",
|
| 339 |
+
show_label=False,
|
| 340 |
+
type="pil",
|
| 341 |
+
interactive=True)
|
| 342 |
|
| 343 |
# result = gr.Image(label="Result", show_label=False, type="pil")
|
| 344 |
result = gr.Gallery(label="Result", show_label=False, type="pil")
|
|
|
|
| 371 |
minimum=1.0,
|
| 372 |
maximum=10.0,
|
| 373 |
step=0.1,
|
| 374 |
+
value=1.0
|
| 375 |
)
|
| 376 |
|
| 377 |
num_inference_steps = gr.Slider(
|
| 378 |
label="Number of inference steps",
|
| 379 |
minimum=1,
|
| 380 |
+
maximum=40,
|
| 381 |
step=1,
|
| 382 |
+
value=8,
|
| 383 |
)
|
| 384 |
|
| 385 |
height = gr.Slider(
|