| import os | |
| import torch | |
| from diffusers import ControlNetModel, UniPCMultistepScheduler, StableDiffusionControlNetPipeline | |
| from diffusers.utils import load_image, make_image_grid | |
| from diffusers.utils.torch_utils import randn_tensor | |
| from controlnet_aux import OpenposeDetector | |
| controlnet = ControlNetModel.from_pretrained( | |
| "lllyasviel/sd-controlnet-openpose", | |
| torch_dtype=torch.float16 | |
| ) | |
| pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
| "runwayml/stable-diffusion-v1-5", | |
| custom_pipeline="hyoungwoncho/sd_perturbed_attention_guidance_controlnet", | |
| controlnet=controlnet, | |
| torch_dtype=torch.float16 | |
| ) | |
| device="cuda" | |
| pipe = pipe.to(device) | |
| pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) | |
| openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") | |
| original_image = load_image( | |
| "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png" | |
| ) | |
| openpose_image = openpose(original_image) | |
| prompts="" | |
| base_dir = "./results/openpose/" | |
| if not os.path.exists(base_dir): | |
| os.makedirs(base_dir) | |
| latent_input = randn_tensor(shape=(1,4,64,64),generator=None, device=device, dtype=torch.float16) | |
| output_baseline = pipe( | |
| prompts, | |
| image=openpose_image, | |
| num_inference_steps=50, | |
| guidance_scale=0.0, | |
| pag_scale=0.0, | |
| pag_applied_layers_index=["m0"], | |
| latents=latent_input | |
| ).images[0] | |
| output_pag = pipe( | |
| prompts, | |
| image=openpose_image, | |
| num_inference_steps=50, | |
| guidance_scale=0.0, | |
| pag_scale=4.0, | |
| pag_applied_layers_index=["m0"], | |
| latents=latent_input | |
| ).images[0] | |
| grid_image = make_image_grid([original_image, openpose_image, output_baseline, output_pag], rows=1, cols=4) | |
| grid_image.save(base_dir + "sample.png") |