From 94e93a19b2a815fb750b4921fc277a48f8769a56 Mon Sep 17 00:00:00 2001 From: YadiraF Date: Thu, 19 Jul 2018 12:27:11 +0800 Subject: [PATCH] modify comments --- examples/1_pipeline.py | 17 ++++++++++++++--- examples/3_transform.py | 9 ++++----- examples/4_light.py | 2 +- examples/6_generate_image_map.py | 2 +- examples/7_generate_uv_map.py | 9 +++++---- examples/8_generate_posmap_300WLP.py | 18 ++++++++++-------- 6 files changed, 35 insertions(+), 22 deletions(-) diff --git a/examples/1_pipeline.py b/examples/1_pipeline.py index 03f71fc..a0bc6e7 100644 --- a/examples/1_pipeline.py +++ b/examples/1_pipeline.py @@ -14,11 +14,14 @@ from face3d import mesh_cython # ------------------------------ 1. load mesh data +# -- mesh data consists of: vertices, triangles, color(optinal), texture(optional) +# -- here use colors to represent the texture of face surface C = sio.loadmat('Data/example1.mat') vertices = C['vertices']; colors = C['colors']; triangles = C['triangles'] colors = colors/np.max(colors) # ------------------------------ 2. modify vertices(transformation. change position of obj) +# -- change the position of mesh object in world space # scale. target size=180 for example s = 180/(np.max(vertices[:,1]) - np.min(vertices[:,1])) # rotate 30 degree for example @@ -28,22 +31,30 @@ transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t) # ------------------------------ 3. modify colors/texture(add light) +# -- add point lights. light positions are defined in world space # set lights light_positions = np.array([[-128, -128, 300]]) light_intensities = np.array([[1, 1, 1]]) lit_colors = mesh.light.add_light(transformed_vertices, triangles, colors, light_positions, light_intensities) # ------------------------------ 4. modify vertices(projection. change position of camera) -projected_vertices = mesh.transform.lookat_camera(transformed_vertices, eye = [0, 0, 200], at = np.array([0, 0, 0]), up = None) +# -- transform object from world space to camera space(what the world is in the eye of observer). +# -- omit if using standard camera +camera_vertices = mesh.transform.lookat_camera(transformed_vertices, eye = [0, 0, 200], at = np.array([0, 0, 0]), up = None) +# -- project object from 3d world space into 2d image plane. orthographic or perspective projection +projected_vertices = mesh.transform.orthographic_project(camera_vertices) # ------------------------------ 5. render(to 2d image) # set h, w of rendering h = w = 256 # change to image coords for rendering image_vertices = mesh.transform.to_image(projected_vertices, h, w) +# render rendering = face3d.mesh_cython.render.render_colors(image_vertices, triangles, lit_colors, h, w) -# ---- show +# ---- show rendering plt.imshow(rendering) plt.show() - +# ---- show mesh +mesh.vis.plot_mesh(camera_vertices, triangles) +plt.show() diff --git a/examples/3_transform.py b/examples/3_transform.py index 926e34e..fdb8f38 100644 --- a/examples/3_transform.py +++ b/examples/3_transform.py @@ -29,9 +29,9 @@ def transform_test(vertices, obj, camera, h = 256, w = 256): else: ## world space to camera space. (Look at camera.) - projected_vertices = mesh.transform.lookat_camera(transformed_vertices, camera['eye'], camera['at'], camera['up']) - ## camera space to image space. (Projection) if orth project, ignore - projected_vertices = mesh.transform.perspective_project(projected_vertices, camera['fovy'], near = camera['near'], far = camera['far']) + camera_vertices = mesh.transform.lookat_camera(transformed_vertices, camera['eye'], camera['at'], camera['up']) + ## camera space to image space. (Projection) if orth project, omit + projected_vertices = mesh.transform.perspective_project(camera_vertices, camera['fovy'], near = camera['near'], far = camera['far']) ## to image coords(position in image) image_vertices = mesh.transform.to_image(projected_vertices, h, w, True) @@ -82,7 +82,7 @@ def transform_test(vertices, obj, camera, h = 256, w = 256): io.imsave('{}/1_2_{}_{}.jpg'.format(save_folder, i, angle), image) subprocess.call('convert {} {}/1_*.jpg {}'.format(options, save_folder, save_folder + '/obj.gif'), shell=True) -## 2. fix obj position(center=[0,0,0], front pose). change camera position&direction, using perspective proj(fovy fixed) +## 2. fix obj position(center=[0,0,0], front pose). change camera position&direction, using perspective projection(fovy fixed) obj['s'] = scale_init obj['angles'] = [0, 0, 0] obj['t'] = [0, 0, 0] @@ -92,7 +92,6 @@ def transform_test(vertices, obj, camera, h = 256, w = 256): camera['at'] = [0, 0, 0] camera['near'] = 1000 camera['far'] = -100 - # eye position camera['fovy'] = 30 camera['up'] = [0, 1, 0] # diff --git a/examples/4_light.py b/examples/4_light.py index e05d1c9..297a5b4 100644 --- a/examples/4_light.py +++ b/examples/4_light.py @@ -33,7 +33,7 @@ def light_test(vertices, light_positions, light_intensities, h = 256, w = 256): s = 180/(np.max(vertices[:,1]) - np.min(vertices[:,1])) R = mesh.transform.angle2matrix([0, 0, 0]) t = [0, 0, 0] -vertices = mesh.transform.similarity_transform(vertices, s, R, t) +vertices = mesh.transform.similarity_transform(vertices, s, R, t) # transformed vertices # save settings save_folder = 'results/light' diff --git a/examples/6_generate_image_map.py b/examples/6_generate_image_map.py index d076635..0ef2bcc 100644 --- a/examples/6_generate_image_map.py +++ b/examples/6_generate_image_map.py @@ -28,7 +28,7 @@ t = [0, 0, 0] transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t) -# ------------------------------ render(to 2d image) +# ------------------------------ render settings(to 2d image) # set h, w of rendering h = w = 256 # change to image coords for rendering diff --git a/examples/7_generate_uv_map.py b/examples/7_generate_uv_map.py index 528d428..930cff8 100644 --- a/examples/7_generate_uv_map.py +++ b/examples/7_generate_uv_map.py @@ -34,7 +34,7 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256): t = [0, 0, 0] transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t) # --load uv coords -uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat') # +uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat') # -- start save_folder = 'results/uv_map' @@ -54,8 +54,9 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256): #-- for face reconstruction & alginment(dense correspondences) # To some extent, when uv space is regular, position map is a subclass of geometry image(recording geometry information in regular image) # Notice: position map doesn't exit alone, it depends on the corresponding rendering(2d facical image). -# Attribute is position(with respect to image space, be careful when using perpestive projection) -image_vertices = mesh.transform.to_image(transformed_vertices, image_h, image_w) # use orth projection here +# Attribute is the position with respect to image coords system. +projected_vertices = transformed_vertices.copy() # use standard camera & orth projection here +image_vertices = mesh.transform.to_image(projected_vertices, image_h, image_w) position = image_vertices.copy() position[:,2] = position[:,2] - np.min(position[:,2]) # translate z attribute = position @@ -71,7 +72,7 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256): # uv_texture_map_rec = cv2.remap(image, uv_position_map[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) # io.imsave('{}/uv_texture_map_rec.jpg'.format(save_folder), np.squeeze(uv_texture_map_rec)) -#-- 3. general geometry image. attribute = vertices/transformed_vertices +#-- 3. general geometry image. attribute = vertices or transformed_vertices # TODO #-- 4. attribute = normals # TODO diff --git a/examples/8_generate_posmap_300WLP.py b/examples/8_generate_posmap_300WLP.py index f60109a..3c1025f 100644 --- a/examples/8_generate_posmap_300WLP.py +++ b/examples/8_generate_posmap_300WLP.py @@ -22,21 +22,18 @@ def process_uv(uv_coords, uv_h = 256, uv_w = 256): uv_coords = np.hstack((uv_coords, np.zeros((uv_coords.shape[0], 1)))) # add z return uv_coords -def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h = 256, image_w = 256): +def run_posmap_300W_LP(bfm, image_path, mat_path, save_folder, uv_h = 256, uv_w = 256, image_h = 256, image_w = 256): # 1. load image and fitted parameters image_name = image_path.strip().split('/')[-1] image = io.imread(image_path)/255. [h, w, c] = image.shape - mat_path = image_path.replace('jpg', 'mat') info = sio.loadmat(mat_path) pose_para = info['Pose_Para'].T.astype(np.float32) shape_para = info['Shape_Para'].astype(np.float32) exp_para = info['Exp_Para'].astype(np.float32) # 2. generate mesh - # load bfm - bfm = MorphabelModel('Data/BFM/Out/BFM.mat') # generate shape vertices = bfm.generate_vertices(shape_para, exp_para) # transform mesh @@ -58,13 +55,14 @@ def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h bottom - (bottom - top) / 2.0]) old_size = (right - left + bottom - top)/2 size = int(old_size*1.5) - # random pertube + # random pertube. you can change the numbers marg = old_size*0.1 t_x = np.random.rand()*marg*2 - marg t_y = np.random.rand()*marg*2 - marg center[0] = center[0]+t_x; center[1] = center[1]+t_y size = size*(np.random.rand()*0.2 + 0.9) + # crop and record the transform parameters src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]]) DST_PTS = np.array([[0, 0], [0, image_h - 1], [image_w - 1, 0]]) tform = skimage.transform.estimate_transform('similarity', src_pts, DST_PTS) @@ -74,10 +72,10 @@ def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h position = image_vertices.copy() position[:, 2] = 1 position = np.dot(position, tform.params.T) - position[:, 2] = projected_vertices[:, 2]*tform.params[0, 0] # scale z + position[:, 2] = image_vertices[:, 2]*tform.params[0, 0] # scale z position[:, 2] = position[:, 2] - np.min(position[:, 2]) # translate z - # 4. uv position map: render position to uv space + # 4. uv position map: render position in uv space uv_position_map = mesh_cython.render.render_colors(uv_coords, bfm.full_triangles, position, uv_h, uv_w, c = 3) # 5. save files @@ -105,7 +103,11 @@ def run_posmap_300W_LP(image_path, save_folder, uv_h = 256, uv_w = 256, image_h uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat') # uv_coords = process_uv(uv_coords, uv_h, uv_w) + # load bfm + bfm = MorphabelModel('Data/BFM/Out/BFM.mat') + # run image_path = 'Data/IBUG_image_008_1_0.jpg' - run_posmap_300W_LP(image_path, save_folder) + mat_path = 'Data/IBUG_image_008_1_0.mat' + run_posmap_300W_LP(bfm, image_path, mat_path, save_folder)