Skip to content

Commit

Permalink
modify comments
Browse files Browse the repository at this point in the history
  • Loading branch information
yfeng95 committed Jul 19, 2018
1 parent 4be7c33 commit 94e93a1
Show file tree
Hide file tree
Showing 6 changed files with 35 additions and 22 deletions.
17 changes: 14 additions & 3 deletions examples/1_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,14 @@
from face3d import mesh_cython

# ------------------------------ 1. load mesh data
# -- mesh data consists of: vertices, triangles, color(optinal), texture(optional)
# -- here use colors to represent the texture of face surface
C = sio.loadmat('Data/example1.mat')
vertices = C['vertices']; colors = C['colors']; triangles = C['triangles']
colors = colors/np.max(colors)

# ------------------------------ 2. modify vertices(transformation. change position of obj)
# -- change the position of mesh object in world space
# scale. target size=180 for example
s = 180/(np.max(vertices[:,1]) - np.min(vertices[:,1]))
# rotate 30 degree for example
Expand All @@ -28,22 +31,30 @@
transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t)

# ------------------------------ 3. modify colors/texture(add light)
# -- add point lights. light positions are defined in world space
# set lights
light_positions = np.array([[-128, -128, 300]])
light_intensities = np.array([[1, 1, 1]])
lit_colors = mesh.light.add_light(transformed_vertices, triangles, colors, light_positions, light_intensities)

# ------------------------------ 4. modify vertices(projection. change position of camera)
projected_vertices = mesh.transform.lookat_camera(transformed_vertices, eye = [0, 0, 200], at = np.array([0, 0, 0]), up = None)
# -- transform object from world space to camera space(what the world is in the eye of observer).
# -- omit if using standard camera
camera_vertices = mesh.transform.lookat_camera(transformed_vertices, eye = [0, 0, 200], at = np.array([0, 0, 0]), up = None)
# -- project object from 3d world space into 2d image plane. orthographic or perspective projection
projected_vertices = mesh.transform.orthographic_project(camera_vertices)

# ------------------------------ 5. render(to 2d image)
# set h, w of rendering
h = w = 256
# change to image coords for rendering
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
# render
rendering = face3d.mesh_cython.render.render_colors(image_vertices, triangles, lit_colors, h, w)

# ---- show
# ---- show rendering
plt.imshow(rendering)
plt.show()

# ---- show mesh
mesh.vis.plot_mesh(camera_vertices, triangles)
plt.show()
9 changes: 4 additions & 5 deletions examples/3_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ def transform_test(vertices, obj, camera, h = 256, w = 256):
else:

## world space to camera space. (Look at camera.)
projected_vertices = mesh.transform.lookat_camera(transformed_vertices, camera['eye'], camera['at'], camera['up'])
## camera space to image space. (Projection) if orth project, ignore
projected_vertices = mesh.transform.perspective_project(projected_vertices, camera['fovy'], near = camera['near'], far = camera['far'])
camera_vertices = mesh.transform.lookat_camera(transformed_vertices, camera['eye'], camera['at'], camera['up'])
## camera space to image space. (Projection) if orth project, omit
projected_vertices = mesh.transform.perspective_project(camera_vertices, camera['fovy'], near = camera['near'], far = camera['far'])
## to image coords(position in image)
image_vertices = mesh.transform.to_image(projected_vertices, h, w, True)

Expand Down Expand Up @@ -82,7 +82,7 @@ def transform_test(vertices, obj, camera, h = 256, w = 256):
io.imsave('{}/1_2_{}_{}.jpg'.format(save_folder, i, angle), image)
subprocess.call('convert {} {}/1_*.jpg {}'.format(options, save_folder, save_folder + '/obj.gif'), shell=True)

## 2. fix obj position(center=[0,0,0], front pose). change camera position&direction, using perspective proj(fovy fixed)
## 2. fix obj position(center=[0,0,0], front pose). change camera position&direction, using perspective projection(fovy fixed)
obj['s'] = scale_init
obj['angles'] = [0, 0, 0]
obj['t'] = [0, 0, 0]
Expand All @@ -92,7 +92,6 @@ def transform_test(vertices, obj, camera, h = 256, w = 256):
camera['at'] = [0, 0, 0]
camera['near'] = 1000
camera['far'] = -100

# eye position
camera['fovy'] = 30
camera['up'] = [0, 1, 0] #
Expand Down
2 changes: 1 addition & 1 deletion examples/4_light.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def light_test(vertices, light_positions, light_intensities, h = 256, w = 256):
s = 180/(np.max(vertices[:,1]) - np.min(vertices[:,1]))
R = mesh.transform.angle2matrix([0, 0, 0])
t = [0, 0, 0]
vertices = mesh.transform.similarity_transform(vertices, s, R, t)
vertices = mesh.transform.similarity_transform(vertices, s, R, t) # transformed vertices

# save settings
save_folder = 'results/light'
Expand Down
2 changes: 1 addition & 1 deletion examples/6_generate_image_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
t = [0, 0, 0]
transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t)

# ------------------------------ render(to 2d image)
# ------------------------------ render settings(to 2d image)
# set h, w of rendering
h = w = 256
# change to image coords for rendering
Expand Down
9 changes: 5 additions & 4 deletions examples/7_generate_uv_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256):
t = [0, 0, 0]
transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t)
# --load uv coords
uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat') #
uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat')

# -- start
save_folder = 'results/uv_map'
Expand All @@ -54,8 +54,9 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256):
#-- for face reconstruction & alginment(dense correspondences)
# To some extent, when uv space is regular, position map is a subclass of geometry image(recording geometry information in regular image)
# Notice: position map doesn't exit alone, it depends on the corresponding rendering(2d facical image).
# Attribute is position(with respect to image space, be careful when using perpestive projection)
image_vertices = mesh.transform.to_image(transformed_vertices, image_h, image_w) # use orth projection here
# Attribute is the position with respect to image coords system.
projected_vertices = transformed_vertices.copy() # use standard camera & orth projection here
image_vertices = mesh.transform.to_image(projected_vertices, image_h, image_w)
position = image_vertices.copy()
position[:,2] = position[:,2] - np.min(position[:,2]) # translate z
attribute = position
Expand All @@ -71,7 +72,7 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256):
# uv_texture_map_rec = cv2.remap(image, uv_position_map[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
# io.imsave('{}/uv_texture_map_rec.jpg'.format(save_folder), np.squeeze(uv_texture_map_rec))

#-- 3. general geometry image. attribute = vertices/transformed_vertices
#-- 3. general geometry image. attribute = vertices or transformed_vertices
# TODO
#-- 4. attribute = normals
# TODO
Expand Down
18 changes: 10 additions & 8 deletions examples/8_generate_posmap_300WLP.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,18 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256):
uv_coords = np.hstack((uv_coords, np.zeros((uv_coords.shape[0], 1)))) # add z
return uv_coords

def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h = 256, image_w = 256):
def run_posmap_300W_LP(bfm, image_path, mat_path, save_folder, uv_h = 256, uv_w = 256, image_h = 256, image_w = 256):
# 1. load image and fitted parameters
image_name = image_path.strip().split('/')[-1]
image = io.imread(image_path)/255.
[h, w, c] = image.shape

mat_path = image_path.replace('jpg', 'mat')
info = sio.loadmat(mat_path)
pose_para = info['Pose_Para'].T.astype(np.float32)
shape_para = info['Shape_Para'].astype(np.float32)
exp_para = info['Exp_Para'].astype(np.float32)

# 2. generate mesh
# load bfm
bfm = MorphabelModel('Data/BFM/Out/BFM.mat')
# generate shape
vertices = bfm.generate_vertices(shape_para, exp_para)
# transform mesh
Expand All @@ -58,13 +55,14 @@ def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h
bottom - (bottom - top) / 2.0])
old_size = (right - left + bottom - top)/2
size = int(old_size*1.5)
# random pertube
# random pertube. you can change the numbers
marg = old_size*0.1
t_x = np.random.rand()*marg*2 - marg
t_y = np.random.rand()*marg*2 - marg
center[0] = center[0]+t_x; center[1] = center[1]+t_y
size = size*(np.random.rand()*0.2 + 0.9)

# crop and record the transform parameters
src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])
DST_PTS = np.array([[0, 0], [0, image_h - 1], [image_w - 1, 0]])
tform = skimage.transform.estimate_transform('similarity', src_pts, DST_PTS)
Expand All @@ -74,10 +72,10 @@ def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h
position = image_vertices.copy()
position[:, 2] = 1
position = np.dot(position, tform.params.T)
position[:, 2] = projected_vertices[:, 2]*tform.params[0, 0] # scale z
position[:, 2] = image_vertices[:, 2]*tform.params[0, 0] # scale z
position[:, 2] = position[:, 2] - np.min(position[:, 2]) # translate z

# 4. uv position map: render position to uv space
# 4. uv position map: render position in uv space
uv_position_map = mesh_cython.render.render_colors(uv_coords, bfm.full_triangles, position, uv_h, uv_w, c = 3)

# 5. save files
Expand Down Expand Up @@ -105,7 +103,11 @@ def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h
uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat') #
uv_coords = process_uv(uv_coords, uv_h, uv_w)

# load bfm
bfm = MorphabelModel('Data/BFM/Out/BFM.mat')

# run
image_path = 'Data/IBUG_image_008_1_0.jpg'
run_posmap_300W_LP(image_path, save_folder)
mat_path = 'Data/IBUG_image_008_1_0.mat'
run_posmap_300W_LP(bfm, image_path, mat_path, save_folder)

0 comments on commit 94e93a1

Please sign in to comment.