(args.path, transform=transform) loader = DataLoader(dataset, batch_size=128, shuffle=False, num_workers=4) model = VQVAE() model.load_state_dict(torch.load(args.ckpt)) model = model.to(device) model.eval() map_size = 100 * 1024 * 1024 * 1024 env = lmdb.open(args.name, map_...
Files master checkpoint sample .gitignore LICENSE README.md dataset.py extract_code.py pixelsnail.py pixelsnail_mnist.py sample.py scheduler.py stage1_sample.png train_pixelsnail.py train_vqvae.py vqvae.py Breadcrumbs vq-vae-2-pytorch /
Added distributed support for VQ-VAE Jun 1, 2020 vqvae.py Fixed inplace ReLU Jan 23, 2021 vqvae_560.pt Added VQ-VAE checkpoint Feb 8, 2020 README License vq-vae-2-pytorch Implementation of Generating Diverse High-Fidelity Images with VQ-VAE-2 in PyTorch ...
Feature request for AMP support in VQ-VAE training. So far, I tried naively modifying the train function in train_vqvae.py like so: # ... for i, (img, label) in enumerate(loader): model.zero_grad() img = img.to(device) with torch.cuda.amp.autocast(): out, latent_loss = ...
Implementation of Generating Diverse High-Fidelity Images with VQ-VAE-2 in PyTorch - Added VQ-VAE checkpoint · rosinality/vq-vae-2-pytorch@ede65ce
vqvae, device) model_top = load_model(model_top, args.top, device) model_bottom = load_model(model_bottom, args.bottom, device) model_vqvae = load_model('vqvae', args.vqvae, device) model_top = load_model('pixelsnail_top', args.top, device) model_bottom = load_model('pixelsnail...