Lightning-Painter-Multitool / app_inpaint2.py
LPX55's picture
Update app_inpaint2.py
3a61245 verified
import spaces
import gradio as gr
import torch
from diffusers import AutoencoderKL, TCDScheduler
from diffusers.models.model_loading_utils import load_state_dict
from gradio_imageslider import ImageSlider
from huggingface_hub import hf_hub_download
from controlnet_union import ControlNetModel_Union
from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
from PIL import Image, ImageFilter
import numpy as np
# from gradio.sketch.run import create
MODELS = {
"RealVisXL V5.0 Lightning": "SG161222/RealVisXL_V5.0_Lightning",
"Lustify Lightning": "GraydientPlatformAPI/lustify-lightning",
"Juggernaut XL Lightning": "RunDiffusion/Juggernaut-XL-Lightning",
"Juggernaut-XL-V9-GE-RDPhoto2": "AiWise/Juggernaut-XL-V9-GE-RDPhoto2-Lightning_4S",
"SatPony-Lightning": "John6666/satpony-lightning-v2-sdxl"
}
# --- ControlNet and Pipeline Setup (Retained) ---
config_file = hf_hub_download(
"xinsir/controlnet-union-sdxl-1.0",
filename="config_promax.json",
)
config = ControlNetModel_Union.load_config(config_file)
controlnet_model = ControlNetModel_Union.from_config(config)
model_file = hf_hub_download(
"xinsir/controlnet-union-sdxl-1.0",
filename="diffusion_pytorch_model_promax.safetensors",
)
state_dict = load_state_dict(model_file)
model, _, _, _, _ = ControlNetModel_Union._load_pretrained_model(
controlnet_model, state_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
)
model.to(device="cuda", dtype=torch.float16)
vae = AutoencoderKL.from_pretrained(
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
).to("cuda")
pipe = StableDiffusionXLFillPipeline.from_pretrained(
"SG161222/RealVisXL_V5.0_Lightning",
torch_dtype=torch.float16,
vae=vae,
controlnet=model,
variant="fp16",
)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
print(pipe)
def load_default_pipeline():
"""仅保留,但当前 Inpaint 逻辑未直接使用,可以删除,但保留以防将来扩展。"""
global pipe
pipe = StableDiffusionXLFillPipeline.from_pretrained(
"GraydientPlatformAPI/lustify-lightning",
torch_dtype=torch.float16,
vae=vae,
controlnet=model,
).to("cuda")
print("Default pipeline loaded!")
@spaces.GPU(duration=15)
def fill_image(prompt, image, model_selection, paste_back):
"""
Handles the fill/repair process for inputs from ImageMask (gr. ImageMask). Applies a default 5% expansion to user-drawn masks here.
"""
global pipe
print(f"Received image: {image}")
if image is None:
yield None, None
return
if model_selection in MODELS:
current_model = pipe.config.get("_name_or_path", "")
target_model = MODELS[model_selection]
if current_model != target_model:
# 释放旧模型显存
del pipe
torch.cuda.empty_cache()
pipe = StableDiffusionXLFillPipeline.from_pretrained(
target_model,
torch_dtype=torch.float16,
vae=vae,
controlnet=model
)
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
print(f"Loaded new SDXL model: {target_model}")
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = pipe.encode_prompt(prompt, "cuda", True)
source = image["background"]
# 用户绘制的 mask layer(通常是 RGBA)
mask = image["layers"][0]
# 取 alpha 通道并转为二值 mask(255 表示 mask 区域)
alpha_channel = mask.split()[3]
binary_mask = alpha_channel.point(lambda p: 255 if p > 0 else 0).convert("L")
# ==== 扩大 5%(针对 fill_image 的二值 mask) ====
expand_px = max(1, int(min(binary_mask.width, binary_mask.height) * 0.05))
kernel_size = expand_px * 2 + 1
binary_mask = binary_mask.filter(ImageFilter.MaxFilter(kernel_size))
# ==== END 扩大 ====
cnet_image = source.copy()
# 在控制网络输入图上把 mask 区域填黑(以便 ControlNet/pipe 根据此区域生成)
cnet_image.paste(0, (0, 0), binary_mask)
# 调用管线(通常是生成若干中间结果,这里按原逻辑 yield)
for image_out in pipe(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
image=cnet_image,
# Inpaint 流程使用 image=cnet_image(原图 masked with black),
# 管道内部应该处理了 mask,但如果 StableDiffusionXLFillPipeline
# 需要显式 mask,这里可能需要调整。根据原代码的命名和逻辑,
# 假定 pipe(image=cnet_image) 适用于此填充流程。
):
yield image_out, cnet_image # 这里的 yield 是为了流式输出
print(f"{model_selection=}")
print(f"{paste_back=}")
# 最后 paste 回原图(如用户选择)
if paste_back:
# image_out 是生成的修复部分
# cnet_image 在循环中已被用作 ControlNet 输入图(黑块版)
# 这里的 cnet_image 应该更新为 source.copy() 以避免和输入混淆,
# 但遵循原代码逻辑,使用 image_out + source/binary_mask
# 最终结果是 image_out(修复结果),我们将其粘贴回原图 source
# 的非 mask 区域(即只替换 mask 区域)
final_output = source.copy()
image_out_rgba = image_out.convert("RGBA")
# 使用二值 mask 的反转作为 paste 的 mask
inverted_mask = binary_mask.point(lambda p: 255 if p == 0 else 0).convert("L")
# 将 image_out 粘贴到 final_output 中,仅在 binary_mask 为 255 的区域(即修复区域)
final_output.paste(image_out_rgba, (0, 0), binary_mask)
yield cnet_image, final_output
else:
# 如果不 paste back,只返回生成的修复图像
yield cnet_image, image_out
def clear_result():
return gr.update(value=None)
def use_output_as_input(output_image):
"""
Receives the output of ImageSlider (image_out, cnet_image) and returns cnet_image as the new input.
"""
return gr.update(value=output_image[0])
css = """
.nulgradio-container {
width: 86vw !important;
}
.nulcontain {
overflow-y: scroll !important;
padding: 10px 40px !important;
}
div#component-17 {
height: auto !important;
}
@media screen and (max-width: 600px) {
.img-row{
display: block !important;
margin-bottom: 20px !important;
}
}
"""
title = """<h1 align="center">Diffusers Image Inpaint</h1>
<div align="center">Upload an image, draw a mask, and enter a prompt to repair/inpaint the masked area.</div>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<p style="display: flex;gap: 6px;">
<a href="https://huggingface.co/spaces/fffiloni/diffusers-image-outpout?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-md.svg" alt="Duplicate this Space">
</a> to skip the queue and enjoy faster inference on the GPU of your choice
</p>
</div>
"""
with gr.Blocks(css=css, fill_height=True) as demo:
gr.Markdown(title)
with gr.Column():
with gr.Row():
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
info="Describe what to inpaint the mask with",
lines=3,
)
with gr.Column():
model_selection = gr.Dropdown(
choices=list(MODELS.keys()),
value="RealVisXL V5.0 Lightning",
label="Model",
)
with gr.Row():
run_button = gr.Button("Generate")
paste_back = gr.Checkbox(True, label="Paste back original")
with gr.Row(equal_height=False):
input_image = gr.ImageMask(
type="pil", label="Input Image", layers=True, elem_classes="img-row"
)
result = ImageSlider(
interactive=False,
label="Generated Image",
elem_classes="img-row"
)
use_as_input_button = gr.Button("Use as Input Image", visible=False)
# --- Event Handlers for Inpaint ---
use_as_input_button.click(
fn=use_output_as_input,
inputs=[result],
outputs=[input_image],
queue=False
)
# Generates image on button click
run_button.click(
fn=clear_result,
inputs=None,
outputs=result,
queue=False,
).then(
fn=lambda: gr.update(visible=False),
inputs=None,
outputs=use_as_input_button,
queue=False,
).then(
fn=fill_image,
inputs=[prompt, input_image, model_selection, paste_back],
outputs=[result],
).then(
fn=lambda: gr.update(visible=True),
inputs=None,
outputs=use_as_input_button,
queue=False,
)
# Generates image on prompt submit
prompt.submit(
fn=clear_result,
inputs=None,
outputs=result,
queue=False,
).then(
fn=lambda: gr.update(visible=False),
inputs=None,
outputs=use_as_input_button,
queue=False,
).then(
fn=fill_image,
inputs=[prompt, input_image, model_selection, paste_back],
outputs=[result],
).then(
fn=lambda: gr.update(visible=True),
inputs=None,
outputs=use_as_input_button,
queue=False,
)
demo.queue(max_size=10).launch(show_error=True)