Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

webcam_inference.py 2.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  1. import torch
  2. import cv2
  3. from matplotlib import pyplot as plt
  4. from loss.loss_discriminator import *
  5. from loss.loss_generator import *
  6. from network.blocks import *
  7. from network.model import *
  8. from webcam_demo.webcam_extraction_conversion import *
  9. from params.params import path_to_chkpt
  10. """Init"""
  11. #Paths
  12. path_to_model_weights = path_to_chkpt
  13. path_to_embedding = 'e_hat_video.tar'
  14. device = torch.device("cuda:0")
  15. cpu = torch.device("cpu")
  16. checkpoint = torch.load(path_to_model_weights, map_location=cpu)
  17. e_hat = torch.load(path_to_embedding, map_location=cpu)
  18. e_hat = e_hat['e_hat'].to(device)
  19. G = Generator(256, finetuning=True, e_finetuning=e_hat)
  20. G.eval()
  21. """Training Init"""
  22. G.load_state_dict(checkpoint['G_state_dict'])
  23. G.to(device)
  24. G.finetuning_init()
  25. """Main"""
  26. print('PRESS Q TO EXIT')
  27. cap = cv2.VideoCapture(0)
  28. with torch.no_grad():
  29. while True:
  30. x, g_y, _ = generate_landmarks(cap=cap, device=device, pad=50)
  31. g_y = g_y.unsqueeze(0)/255
  32. x = x.unsqueeze(0)/255
  33. #forward
  34. # Calculate average encoding vector for video
  35. #f_lm_compact = f_lm.view(-1, f_lm.shape[-4], f_lm.shape[-3], f_lm.shape[-2], f_lm.shape[-1]) #BxK,2,3,224,224
  36. #train G
  37. x_hat = G(g_y, e_hat)
  38. plt.clf()
  39. out1 = x_hat.transpose(1,3)[0]
  40. #for img_no in range(1,x_hat.shape[0]):
  41. # out1 = torch.cat((out1, x_hat.transpose(1,3)[img_no]), dim = 1)
  42. out1 = out1.to(cpu).numpy()
  43. #plt.imshow(out1)
  44. #plt.show()
  45. #plt.clf()
  46. out2 = x.transpose(1,3)[0]
  47. #for img_no in range(1,x.shape[0]):
  48. # out2 = torch.cat((out2, x.transpose(1,3)[img_no]), dim = 1)
  49. out2 = out2.to(cpu).numpy()
  50. #plt.imshow(out2)
  51. #plt.show()
  52. #plt.clf()
  53. out3 = g_y.transpose(1,3)[0]
  54. #for img_no in range(1,g_y.shape[0]):
  55. # out3 = torch.cat((out3, g_y.transpose(1,3)[img_no]), dim = 1)
  56. out3 = out3.to(cpu).numpy()
  57. #plt.imshow(out3)
  58. #plt.show()
  59. cv2.imshow('fake', cv2.cvtColor(out1, cv2.COLOR_BGR2RGB))
  60. cv2.imshow('me', cv2.cvtColor(out2, cv2.COLOR_BGR2RGB))
  61. cv2.imshow('ladnmark', cv2.cvtColor(out3, cv2.COLOR_BGR2RGB))
  62. if cv2.waitKey(1) == ord('q'):
  63. break
  64. cap.release()
  65. cv2.destroyAllWindows()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...