File size: 9,318 Bytes
fbba505
 
6e1abe1
46657b2
 
 
fed9294
 
7eff547
3ba81e2
 
fbba505
46657b2
 
 
fed9294
 
 
 
 
 
 
 
 
46657b2
 
 
fed9294
 
 
 
 
 
 
 
 
 
 
 
46657b2
fed9294
 
46657b2
 
 
fbba505
 
fed9294
 
46657b2
 
 
fed9294
 
 
46657b2
 
e552c64
 
 
 
fed9294
 
46657b2
 
 
fbba505
3718bb4
fbba505
46657b2
 
 
 
fbba505
 
46657b2
 
fbba505
 
 
 
71a55d1
46657b2
fbba505
 
 
46657b2
 
 
 
058ae01
46657b2
fbba505
 
46657b2
fbba505
46657b2
2d539cf
46657b2
 
fed9294
46657b2
 
 
 
 
 
71a55d1
fed9294
46657b2
 
 
 
fbba505
46657b2
fbba505
46657b2
 
 
58b61fb
 
 
9fe2d1c
58b61fb
 
 
 
 
 
3f81f77
 
 
fbba505
 
127672a
9324da5
fbba505
 
cab43e0
fbba505
 
46657b2
 
 
 
 
 
 
fbba505
 
46657b2
fed9294
46657b2
 
fed9294
fbba505
 
 
46657b2
 
 
 
 
 
 
 
 
fbba505
46657b2
 
 
 
 
 
 
 
 
 
 
 
 
 
71a55d1
 
 
 
 
 
 
46657b2
fbba505
46657b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58b61fb
 
 
 
 
 
 
46657b2
 
 
 
 
 
 
 
 
 
 
 
 
71a55d1
46657b2
 
 
 
 
fbba505
 
58b61fb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
import gradio as gr
import numpy as np
import random, torch
import spaces  # [uncomment to use ZeroGPU]
from PIL import Image

from kontext.pipeline_flux_kontext import FluxKontextPipeline
from kontext.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
from diffusers import FluxTransformer2DModel
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file

# ---------------------------
# utils
# ---------------------------
def resize_by_bucket(images_pil, resolution=512):
    assert len(images_pil) > 0, "images_pil 不能为空"
    bucket_override = [
        (336, 784), (344, 752), (360, 728), (376, 696),
        (400, 664), (416, 624), (440, 592), (472, 552),
        (512, 512),
        (552, 472), (592, 440), (624, 416), (664, 400),
        (696, 376), (728, 360), (752, 344), (784, 336),
    ]
    # 按目标分辨率缩放,并对齐到 16
    bucket_override = [(int(h / 512 * resolution), int(w / 512 * resolution)) for h, w in bucket_override]
    bucket_override = [(h // 16 * 16, w // 16 * 16) for h, w in bucket_override]

    aspect_ratios = [img.height / img.width for img in images_pil]
    mean_aspect_ratio = float(np.mean(aspect_ratios))

    new_h, new_w = bucket_override[0]
    min_aspect_diff = abs(new_h / new_w - mean_aspect_ratio)
    for h, w in bucket_override:
        aspect_diff = abs(h / w - mean_aspect_ratio)
        if aspect_diff < min_aspect_diff:
            min_aspect_diff = aspect_diff
            new_h, new_w = h, w

    resized_images = [img.resize((new_w, new_h), resample=Image.BICUBIC) for img in images_pil]
    return resized_images

# ---------------------------
# pipeline init
# ---------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"

flux_pipeline = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev")
flux_pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(flux_pipeline.scheduler.config)
flux_pipeline.scheduler.config.stochastic_sampling = False

# precision & device
flux_pipeline.vae.to(device).to(torch.bfloat16)
flux_pipeline.text_encoder.to(device).to(torch.bfloat16)
flux_pipeline.text_encoder_2.to(device).to(torch.bfloat16)

# 替换 transformer 权重
ckpt_path = hf_hub_download("NoobDoge/Multi_Ref_Model", "full_model.safetensors")
new_weight = load_file(ckpt_path)
flux_pipeline.transformer.load_state_dict(new_weight)
# flux_pipeline.transformer = FluxTransformer2DModel.from_single_file(ckpt_path, torch_dtype=torch.bfloat16)
flux_pipeline.transformer.to(device).to(torch.bfloat16)

# ---------------------------
# constants
# ---------------------------
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1568   # 与下方滑块默认值 1024 保持一致

# ---------------------------
# inference
# ---------------------------
@spaces.GPU  # [uncomment to use ZeroGPU]
def infer(
    prompt,
    ref1,            # PIL.Image 或 None
    ref2,            # PIL.Image 或 None(可选)
    seed,
    randomize_seed,
    width,
    height,
    max_area,
    guidance_scale,      # 目前没传入 pipeline,如需要可在下面调用里加上
    num_inference_steps,
    progress=gr.Progress(track_tqdm=True),
):
    # 组装可选参考图列表
    refs = [x for x in (ref1, ref2) if x is not None]
    if len(refs) == 0:
        raise gr.Error("请至少上传一张参考图(ref1 或 ref2)。")
    
    # 随机种子
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device=device).manual_seed(int(seed))

    # 参考图按桶缩放
    raw_images = resize_by_bucket(refs, resolution=max_area)

    # 推理
    with torch.no_grad():
        out = flux_pipeline(
            image=raw_images,
            prompt=prompt,
            height=height,
            width=width,
            num_inference_steps=int(num_inference_steps),
            max_area=max_area ** 2,
            generator=generator,
            # 如需 guidance_scale,确保 pipeline 支持这个参数后再打开:
            # guidance_scale=float(guidance_scale),
        )
        output_img = out.images[0]

    return output_img, int(seed)

# ---------------------------
# UI
# ---------------------------
example_triples = [
    ["assets/-1KG_J6e_src1.png", "assets/-1KG_J6e_src2.png",
     "Transform the first image (pizza) into an oil painting style, using the warm and textured brushstrokes, color gradients, and artistic composition observed in the second image (stone house painting)."],
    ["assets/BgYeqlzB_src1.png", "assets/BgYeqlzB_src2.png",
     "Place the butterfly from the first image onto the landscape of the second image, positioning it either flying above the river near the bridge or perched on one of the trees in the foreground. Adjust the butterfly's size and blending to ensure it fits naturally in the scene."],
    ["assets/H99pnBoC_src1.png", "assets/H99pnBoC_src2.png",
     "Insert the person from the first image into the autumn park setting of the second image. Position them standing next to the person on the bench and have them interact by tipping their hat in greeting."],
    ["assets/pmkexBUx_src1.png", "assets/pmkexBUx_src2.png",
     "Place the person wearing a wide-brimmed hat and beige scarf/shawl from the first image onto the mountain ridge in the second image. Position them as if standing on the ridge, facing the valley view, to create an immersive outdoors scene."],
    ["assets/Uwn0WEbC_src1.png", "assets/Uwn0WEbC_src2.png",
     "Change the clothing of the person in the first image to match the attire shown in the second image, ensuring the details of the dress, including the blue bodice, white blouse, and the overall rustic aesthetic, are faithfully replicated."],
     ["assets/Uwn0WEbC_src2.png", "assets/Uwn0WEbC_src1.png",
     "Change the clothing of the person in the first image to match the attire shown in the second image, ensuring the details of the clothes, and the overall rustic aesthetic, are faithfully replicated."]
]

css = """ #col-container { margin: 0 auto; max-width: 800px; } """

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("# Multi Ref Edit Demo")

        with gr.Row():
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )
            run_button = gr.Button("Run", scale=0, variant="primary")

        # 两张输入图片(ref2 可空)
        with gr.Row():
            ref1_comp = gr.Image(label="Input Image 1", type="pil")
            ref2_comp = gr.Image(label="Input Image 2 (optional)", type="pil")

        result = gr.Image(label="Result", show_label=False)

        with gr.Accordion("Advanced Settings", open=False):
            seed_comp = gr.Slider(
                label="Seed",
                minimum=0,
                maximum=MAX_SEED,
                step=1,
                value=0,
            )
            randomize_seed_comp = gr.Checkbox(label="Randomize seed", value=True)

            with gr.Row():
                width_comp = gr.Slider(
                    label="Width",
                    minimum=256,
                    maximum=MAX_IMAGE_SIZE,
                    step=32,
                    value=512,
                )
                height_comp = gr.Slider(
                    label="Height",
                    minimum=256,
                    maximum=MAX_IMAGE_SIZE,
                    step=32,
                    value=512,
                )
                max_area_comp = gr.Slider(
                    label="Max Area",
                    minimum=512,
                    maximum=1024,
                    step=512,
                    value=512,
                )

            with gr.Row():
                guidance_scale_comp = gr.Slider(
                    label="Guidance scale",
                    minimum=0.0,
                    maximum=10.0,
                    step=0.1,
                    value=2.5,
                )
                num_inference_steps_comp = gr.Slider(
                    label="Number of inference steps",
                    minimum=1,
                    maximum=50,
                    step=1,
                    value=28,
                )

        # ⚠️ 这里把 inputs 的顺序改为 [ref1, ref2, prompt],示例条目也按这个顺序
        gr.Examples(
            examples=example_triples,
            inputs=[ref1_comp, ref2_comp, prompt],
            label="Examples (2 refs + prompt)",
            # run_on_click=True,  # 如果希望点示例后自动运行,可以取消注释
        )

    # 注意:不要把 [ref1, ref2] 当作列表传给 inputs!
    gr.on(
        triggers=[run_button.click, prompt.submit],
        fn=infer,
        inputs=[
            prompt,
            ref1_comp,
            ref2_comp,              # ref2 可为空
            seed_comp,
            randomize_seed_comp,
            width_comp,
            height_comp,
            max_area_comp,
            guidance_scale_comp,
            num_inference_steps_comp,
        ],
        outputs=[result, seed_comp],
    )

if __name__ == "__main__":
    demo.launch()