forked from facebookresearch/DiT
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsample_gradio_vanilla.py
102 lines (85 loc) · 3.71 KB
/
sample_gradio_vanilla.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Sample new images from a pre-trained DiT.
"""
import torch
import argparse
import gradio as gr
import torchvision
from torchvision.utils import make_grid
from diffusers.models import AutoencoderKL
from modules.diffusion import create_diffusion
from download import find_model
from modules.dit_builder import DiT_models
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def sample(class_idx, cfg_scale, num_sampling_steps):
# Setup PyTorch:
torch.manual_seed(args.seed)
torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"
# if args.ckpt is None:
# assert args.model == "DiT-XL/2", "Only DiT-XL/2 models are available for auto-download."
# assert args.image_size in [256, 512]
# assert args.num_classes == 1000
# Load model:
latent_size = args.image_size // 8
model = DiT_models[args.model](
input_size=latent_size,
num_classes=args.num_classes
).to(device)
# Auto-download a pre-trained model or load a custom DiT checkpoint from train.py:
if args.ckpt:
ckpt_path = args.ckpt or f"DiT-XL-2-{args.image_size}x{args.image_size}.pt"
state_dict = find_model(ckpt_path)
model.load_state_dict(state_dict)
model.eval()
diffusion = create_diffusion(str(num_sampling_steps))
vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device)
# Labels to condition the model with (balloon, banjo, electric guitar, velvet)
class_labels = [class_idx] * 4
# Create sampling noise:
n = len(class_labels)
z = torch.randn(n, 4, latent_size, latent_size, device=device)
y = torch.tensor(class_labels, device=device)
# Setup classifier-free guidance:
z = torch.cat([z, z], 0)
y_null = torch.tensor([1000] * n, device=device)
y = torch.cat([y, y_null], 0)
model_kwargs = dict(y=y, cfg_scale=cfg_scale)
# Sample images:
samples = diffusion.p_sample_loop(
model.forward_with_cfg, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device
)
samples, _ = samples.chunk(2, dim=0) # Remove null class samples
samples = vae.decode(samples / 0.18215).sample
# Save and display images:
samples = make_grid(samples, nrow=4, normalize=True, value_range=(-1, 1))
samples = torchvision.transforms.ToPILImage()(samples)
return samples
# save_image(samples, "sample.png", nrow=4, normalize=True, value_range=(-1, 1))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2")
parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="mse")
parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
parser.add_argument("--num-classes", type=int, default=1000)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--ckpt", type=str, default=None,
help="Optional path to a DiT checkpoint (default: auto-download a pre-trained DiT-XL/2 model).")
args = parser.parse_args()
demo = gr.Interface(
fn=sample,
inputs=[
gr.Slider(minimum=1, maximum=1000, value=417, step=1, label="Imagenet class index"),
gr.Slider(minimum=1, maximum=20, value=4, step=0.1, label="Cfg scale"),
gr.Slider(minimum=5, maximum=500, value=250, step=1, label="Sampling steps")
],
outputs=[
gr.Image()
]
)
demo.launch()