iclight-v2-vary / app.py
hrt10's picture
Update app.py
7c60481 verified
raw
history blame
2.32 kB
import torch
import gradio as gr
import numpy as np
from diffusers import FluxKontextPipeline
from gfpgan import GFPGANer
from PIL import Image
# Device setup
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load FLUX in-context editing pipeline
pipe = FluxKontextPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev",
torch_dtype=torch.bfloat16 if device.type=="cuda" else torch.float32
).to(device)
# Load face enhancement model
gfpgan = GFPGANer(
model_path="https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.3.pth",
upscale=1,
arch="clean",
channel_multiplier=2,
bg_upsampler=None,
device=device.type
)
def enhance_face(input_img: Image.Image) -> Image.Image:
img_np = np.array(input_img)
_, _, output = gfpgan.enhance(img_np, has_aligned=False, only_center_face=False, paste_back=True)
return Image.fromarray(output)
def infer(input_image, prompt, beautify, seed, randomize, steps, guidance_scale):
# Set random seed
generator = torch.Generator(device=device).manual_seed(seed if not randomize else torch.randint(0, 2**32-1, ()).item())
# In-context editing
out = pipe(
image=input_image.convert("RGB"),
prompt=prompt,
num_inference_steps=steps,
guidance_scale=guidance_scale,
generator=generator
).images[0]
# Apply face enhancement if selected
if beautify:
out = enhance_face(out)
return out
# UI setup
with gr.Blocks() as demo:
gr.Markdown("# FLUX Kontekt Editor + Beautify")
with gr.Row():
input_image = gr.Image(label="Upload Image", type="pil")
result = gr.Image(label="Edited Output")
prompt = gr.Textbox(label="Edit Prompt", placeholder="e.g., 'change background to beach'")
beautify = gr.Checkbox(label="Beautify Face", value=True)
seed = gr.Slider(0, 2**32-1, value=0, step=1, label="Seed")
randomize = gr.Checkbox(label="Randomize Seed", value=True)
steps = gr.Slider(1, 30, value=28, label="Steps")
guidance = gr.Slider(1.0, 10.0, value=2.5, step=0.1, label="Guidance Scale")
run = gr.Button("Run")
run.click(
fn=infer,
inputs=[input_image, prompt, beautify, seed, randomize, steps, guidance],
outputs=result
)
demo.launch()