Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,10 @@ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype
|
|
| 9 |
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
|
| 10 |
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
#pipe.enable_model_cpu_offload()
|
| 13 |
pipe.enable_vae_tiling()
|
| 14 |
|
|
@@ -17,14 +21,28 @@ apply_hidiffusion(pipe)
|
|
| 17 |
|
| 18 |
@spaces.GPU
|
| 19 |
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)):
|
| 20 |
-
return pipe(prompt, guidance_scale=7.5, height=
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
with gr.Blocks() as demo:
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
output = gr.Image()
|
| 27 |
|
| 28 |
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
|
| 29 |
-
|
| 30 |
demo.launch()
|
|
|
|
| 9 |
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
|
| 10 |
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
|
| 11 |
|
| 12 |
+
model_15 = "runwayml/stable-diffusion-v1-5"
|
| 13 |
+
scheduler_15 = DDIMScheduler.from_pretrained(model_15, subfolder="scheduler")
|
| 14 |
+
pipe_15 = DiffusionPipeline.from_pretrained(model_15, vae=vae, scheduler=scheduler_15, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
|
| 15 |
+
|
| 16 |
#pipe.enable_model_cpu_offload()
|
| 17 |
pipe.enable_vae_tiling()
|
| 18 |
|
|
|
|
| 21 |
|
| 22 |
@spaces.GPU
|
| 23 |
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)):
|
| 24 |
+
return pipe(prompt, guidance_scale=7.5, height=2048, width=2048, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
|
| 25 |
+
|
| 26 |
+
@spaces.GPU
|
| 27 |
+
def run_hidiffusion_15(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)):
|
| 28 |
+
return pipe_15(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
with gr.Blocks() as demo:
|
| 32 |
+
gr.Markdown("# HiDiffusion Demo")
|
| 33 |
+
gr.Markdown("Make Stable Diffusion generated higher resolution images than what it was trained for")
|
| 34 |
+
with gr.Tab("SDXL in 2048x2048"):
|
| 35 |
+
with gr.Row():
|
| 36 |
+
prompt = gr.Textbox(label="Prompt")
|
| 37 |
+
negative_prompt = gr.Textbox(label="Negative Prompt")
|
| 38 |
+
btn = gr.Button("Run")
|
| 39 |
+
with gr.Tab("SD1.5 in 1024x1024"):
|
| 40 |
+
with gr.Row():
|
| 41 |
+
prompt_15 = gr.Textbox(label="Prompt")
|
| 42 |
+
negative_prompt_15 = gr.Textbox(label="Negative Prompt")
|
| 43 |
+
btn_15 = gr.Button("Run")
|
| 44 |
output = gr.Image()
|
| 45 |
|
| 46 |
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
|
| 47 |
+
btn_15.click(fn=run_hidiffusion, inputs=[prompt_15, negative_prompt_15], outputs=[output])
|
| 48 |
demo.launch()
|