forked from shunsukesaito/PIFu
-
Notifications
You must be signed in to change notification settings - Fork 0
/
colab_util.py
114 lines (96 loc) · 3.28 KB
/
colab_util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import io
import os
import torch
from skimage.io import imread
import numpy as np
import cv2
from tqdm import tqdm_notebook as tqdm
import base64
from IPython.display import HTML
# Util function for loading meshes
from pytorch3d.io import load_objs_as_meshes
from IPython.display import HTML
from base64 import b64encode
# Data structures and functions for rendering
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLOrthographicCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
HardPhongShader,
TexturesVertex
)
def set_renderer():
# Setup
device = torch.device("cuda:0")
torch.cuda.set_device(device)
# Initialize an OpenGL perspective camera.
R, T = look_at_view_transform(2.0, 0, 180)
cameras = OpenGLOrthographicCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512,
blur_radius=0.0,
faces_per_pixel=1,
bin_size = None,
max_faces_per_bin = None
)
lights = PointLights(device=device, location=((2.0, 2.0, 2.0),))
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=HardPhongShader(
device=device,
cameras=cameras,
lights=lights
)
)
return renderer
def get_verts_rgb_colors(obj_path):
rgb_colors = []
f = open(obj_path)
lines = f.readlines()
for line in lines:
ls = line.split(' ')
if len(ls) == 7:
rgb_colors.append(ls[-3:])
return np.array(rgb_colors, dtype='float32')[None, :, :]
def generate_video_from_obj(obj_path, video_path, renderer):
# Setup
device = torch.device("cuda:0")
torch.cuda.set_device(device)
# Load obj file
verts_rgb_colors = get_verts_rgb_colors(obj_path)
verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
textures = TexturesVertex(verts_features=verts_rgb_colors)
wo_textures = TexturesVertex(verts_features=torch.ones_like(verts_rgb_colors)*0.75)
# Load obj
mesh = load_objs_as_meshes([obj_path], device=device)
# Set mesh
vers = mesh._verts_list
faces = mesh._faces_list
mesh_w_tex = Meshes(vers, faces, textures)
mesh_wo_tex = Meshes(vers, faces, wo_textures)
# create VideoWriter
fourcc = cv2. VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024,512))
for i in tqdm(range(90)):
R, T = look_at_view_transform(1.8, 0, i*4, device=device)
images_w_tex = renderer(mesh_w_tex, R=R, T=T)
images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255
images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255
image = np.concatenate([images_w_tex, images_wo_tex], axis=1)
out.write(image.astype('uint8'))
out.release()
def video(path):
mp4 = open(path,'rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
return HTML('<video width=500 controls loop> <source src="%s" type="video/mp4"></video>' % data_url)