with open('ffhq.pkl', 'rb') as f: G = pickle.load(f)['G_ema'].cuda() # torch.nn.Module z = torch.randn([1, G.z_dim]).cuda() # latent codes c = None # class labels (not used in this example) img = G(z, c) # NCHW, float32, dynamic range [-1, +1], no ...
# This is required for the speech to text module. Get your USERNAME from https://console.bluemix.net/docs/services/speech-to-text/getting-started.html IBM_WATSON_CRED_USERNAME=os.environ.get("IBM_WATSON_CRED_USERNAME",None) IBM_WATSON_CRED_PASSWORD=os.environ.get("IBM_WATSON_CRED_PASSWORD"...
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device) if rank == 0 and args.verbose: z = torch.empty([1, G.z_dim], device=device) c = torch.empty([1, G.c_dim], device=device) misc.print_module_summary(G, [z, c]) # Calculate each metric. for metric in ...