Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

render_multiview_images.py 2.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  1. import argparse
  2. import math
  3. import glob
  4. import numpy as np
  5. import sys
  6. import os
  7. import torch
  8. from torchvision.utils import save_image
  9. from tqdm import tqdm
  10. import curriculums
  11. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  12. def show(tensor_img):
  13. if len(tensor_img.shape) > 3:
  14. tensor_img = tensor_img.squeeze(0)
  15. tensor_img = tensor_img.permute(1, 2, 0).squeeze().cpu().numpy()
  16. plt.imshow(tensor_img)
  17. plt.show()
  18. def generate_img(gen, z, **kwargs):
  19. with torch.no_grad():
  20. img, depth_map = generator.staged_forward(z, **kwargs)
  21. tensor_img = img.detach()
  22. img_min = img.min()
  23. img_max = img.max()
  24. img = (img - img_min)/(img_max-img_min)
  25. img = img.permute(0, 2, 3, 1).squeeze().cpu().numpy()
  26. return img, tensor_img, depth_map
  27. if __name__ == '__main__':
  28. parser = argparse.ArgumentParser()
  29. parser.add_argument('path', type=str)
  30. parser.add_argument('--seeds', nargs='+', default=[0, 1, 2])
  31. parser.add_argument('--output_dir', type=str, default='imgs')
  32. parser.add_argument('--max_batch_size', type=int, default=2400000)
  33. parser.add_argument('--lock_view_dependence', action='store_true')
  34. parser.add_argument('--image_size', type=int, default=256)
  35. parser.add_argument('--ray_step_multiplier', type=int, default=2)
  36. parser.add_argument('--curriculum', type=str, default='CelebA')
  37. opt = parser.parse_args()
  38. curriculum = getattr(curriculums, opt.curriculum)
  39. curriculum['num_steps'] = curriculum[0]['num_steps'] * opt.ray_step_multiplier
  40. curriculum['img_size'] = opt.image_size
  41. curriculum['psi'] = 0.7
  42. curriculum['v_stddev'] = 0
  43. curriculum['h_stddev'] = 0
  44. curriculum['lock_view_dependence'] = opt.lock_view_dependence
  45. curriculum['last_back'] = curriculum.get('eval_last_back', False)
  46. curriculum['nerf_noise'] = 0
  47. curriculum = {key: value for key, value in curriculum.items() if type(key) is str}
  48. os.makedirs(opt.output_dir, exist_ok=True)
  49. generator = torch.load(opt.path, map_location=torch.device(device))
  50. ema_file = opt.path.split('generator')[0] + 'ema.pth'
  51. ema = torch.load(ema_file)
  52. ema.copy_to(generator.parameters())
  53. generator.set_device(device)
  54. generator.eval()
  55. face_angles = [-0.5, -0.25, 0., 0.25, 0.5]
  56. face_angles = [a + curriculum['h_mean'] for a in face_angles]
  57. for seed in tqdm(opt.seeds):
  58. images = []
  59. for i, yaw in enumerate(face_angles):
  60. curriculum['h_mean'] = yaw
  61. torch.manual_seed(seed)
  62. z = torch.randn((1, 256), device=device)
  63. img, tensor_img, depth_map = generate_img(generator, z, **curriculum)
  64. images.append(tensor_img)
  65. save_image(torch.cat(images), os.path.join(opt.output_dir, f'grid_{seed}.png'), normalize=True)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...