Skip to content

Commit

Permalink
add examples
Browse files Browse the repository at this point in the history
  • Loading branch information
yfeng95 committed Jul 17, 2018
1 parent 2d3d7d0 commit e78b85a
Show file tree
Hide file tree
Showing 8 changed files with 644 additions and 0 deletions.
49 changes: 49 additions & 0 deletions examples/1_pipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
''' Simple example of pipeline
3D obj(process) --> 2d image
'''
import os, sys
import numpy as np
import scipy.io as sio
from skimage import io
from time import time
import matplotlib.pyplot as plt

sys.path.append('..')
import face3d
from face3d import mesh
from face3d import mesh_cython

# ------------------------------ 1. load mesh data
C = sio.loadmat('Data/example1.mat')
vertices = C['vertices']; colors = C['colors']; triangles = C['triangles']
colors = colors/np.max(colors)

# ------------------------------ 2. modify vertices(transformation. change position of obj)
# scale. target size=200 for example
s = 200/(np.max(vertices[:,1]) - np.min(vertices[:,1]))
# rotate 30 degree for example
R = mesh.transform.angle2matrix([0, 30, 0])
# no translation. center of obj:[0,0]
t = [0, 0, 0]
transformed_vertices = mesh.transform.similarity_transform(vertices, s, R, t)

# ------------------------------ 3. modify colors/texture(add light)
# set lights
light_positions = np.array([[-128, -128, 300]])
light_intensities = np.array([[1, 1, 1]])
lit_colors = mesh.light.add_light(transformed_vertices, triangles, colors, light_positions, light_intensities)

# ------------------------------ 4. modify vertices(projection. change position of camera)
projected_vertices = mesh.transform.lookat_camera(transformed_vertices, eye = [0, 0, 200], at = np.array([0, 0, 0]), up = None)

# ------------------------------ 5. render(to 2d image)
# set h, w of rendering
h = w = 256
# change to image coords for rendering
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
rendering = face3d.mesh_cython.render.render_colors(image_vertices, triangles, lit_colors, h, w)

# ---- show
plt.imshow(rendering)
plt.show()

69 changes: 69 additions & 0 deletions examples/2_3dmm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
''' 3d morphable model example
3dmm parameters --> mesh (and inverse)
'''
import os, sys
import numpy as np
import scipy.io as sio
from skimage import io
from time import time
import matplotlib.pyplot as plt

sys.path.append('..')
import face3d
from face3d import mesh
from face3d import mesh_cython
from face3d.morphable_model import MorphabelModel

# --------------------- Forward: parameters(shape, expression, pose) --> 3D obj --> 2D image ---------------
# --- 1. load model
bfm = MorphabelModel('Data/BFM/Out/BFM.mat')
print('init bfm model success')

# --- 2. generate face mesh: vertices(represent shape) & colors(represent texture)
sp = bfm.get_shape_para('random')
ep = bfm.get_exp_para('random')
vertices = bfm.generate_vertices(sp, ep)

tp = bfm.get_tex_para('random')
colors = bfm.generate_colors(tp)
colors = np.minimum(np.maximum(colors, 0), 1)

# --- 3. transform vertices to proper position
s = 1e-03
angles = [10, 30, 20]
t = [0, 0, 0]
transformed_vertices = bfm.transform(vertices, s, angles, t)
projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection

# --- 4. render(3d obj --> 2d image)
# set prop of rendering
h = w = 256; c = 3
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
image = mesh_cython.render.render_colors(image_vertices, bfm.triangles, colors, h, w)

# -------------------- Back: 2D image points and corresponding 3D vertex indices--> parameters(pose, shape, expression) ------
## only use 68 key points to fit
x = projected_vertices[bfm.kpt_ind, :2] # 2d keypoint, which can be detected from image
X_ind = bfm.kpt_ind # index of keypoints in 3DMM. fixed.

# fit
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(x, X_ind, max_iter = 4)

# verify fitted parameters
fitted_vertices = bfm.generate_vertices(fitted_sp, fitted_ep)
transformed_vertices = bfm.transform(fitted_vertices, fitted_s, fitted_angles, fitted_t)

image_vertices = mesh.transform.to_image(transformed_vertices, h, w)
fitted_image = mesh_cython.render.render_colors(image_vertices, bfm.triangles, colors, h, w)


# ------------- print & show
print('pose, groudtruth: \n', s, angles[0], angles[1], angles[2], t[0], t[1])
print('pose, fitted: \n', fitted_s, fitted_angles[0], fitted_angles[1], fitted_angles[2], fitted_t[0], fitted_t[1])

save_folder = 'results/3dmm'
if not os.path.exists(save_folder):
os.mkdir(save_folder)

io.imsave('{}/generated.jpg'.format(save_folder), image)
io.imsave('{}/fitted.jpg'.format(save_folder), fitted_image)
136 changes: 136 additions & 0 deletions examples/3_transform.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
''' Examples of transformation & camera model.
'''
import os, sys
import numpy as np
import math
import scipy.io as sio
from skimage import io
from time import time
import subprocess

sys.path.append('..')
import face3d
from face3d import mesh
from face3d import mesh_cython


def transform_test(vertices, obj, camera, h = 256, w = 256):
'''
Args:
obj: dict contains obj transform paras
camera: dict contains camera paras
'''
R = mesh.transform.angle2matrix(obj['angles'])
transformed_vertices = mesh.transform.similarity_transform(vertices, obj['s'], R, obj['t'])

if camera['proj_type'] == 'orthographic':
projected_vertices = transformed_vertices
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
else:

## world space to camera space. (Look at camera.)
projected_vertices = mesh.transform.lookat_camera(transformed_vertices, camera['eye'], camera['at'], camera['up'])
## camera space to image space. (Projection) if orth project, ignore
projected_vertices = mesh.transform.perspective_project(projected_vertices, camera['fovy'], near = camera['near'], far = camera['far'])
## to image coords(position in image)
image_vertices = mesh.transform.to_image(projected_vertices, h, w, True)

rendering = mesh_cython.render.render_colors(image_vertices, triangles, colors, h, w)
rendering = np.minimum((np.maximum(rendering, 0)), 1)
return rendering

# --------- load mesh data
C = sio.loadmat('Data/example1.mat')
vertices = C['vertices'];
global colors
global triangles
colors = C['colors']; triangles = C['triangles']
colors = colors/np.max(colors)
# move center to [0,0,0]
vertices = vertices - np.mean(vertices, 0)[np.newaxis, :]

# save folder
save_folder = 'results/transform'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
options = '-delay 10 -loop 0 -layers optimize' # gif options. need ImageMagick installed.

# ---- start
obj = {}
camera = {}
### face in reality: ~18cm height/width. set 180 = 18cm. image size: 256 x 256
scale_init = 180/(np.max(vertices[:,1]) - np.min(vertices[:,1])) # scale face model to real size

## 1. fix camera model(stadard camera& orth proj). change obj position.
camera['proj_type'] = 'orthographic'
# scale
for factor in np.arange(0.5, 1.2, 0.1):
obj['s'] = scale_init*factor
obj['angles'] = [0, 0, 0]
obj['t'] = [0, 0, 0]
image = transform_test(vertices, obj, camera)
io.imsave('{}/1_1_{:.2f}.jpg'.format(save_folder, factor), image)

# angles
for i in range(3):
for angle in np.arange(-50, 51, 10):
obj['s'] = scale_init
obj['angles'] = [0, 0, 0]
obj['angles'][i] = angle
obj['t'] = [0, 0, 0]
image = transform_test(vertices, obj, camera)
io.imsave('{}/1_2_{}_{}.jpg'.format(save_folder, i, angle), image)
subprocess.call('convert {} {}/1_*.jpg {}'.format(options, save_folder, save_folder + '/obj.gif'), shell=True)

## 2. fix obj position(center=[0,0,0], front pose). change camera position&direction, using perspective proj(fovy fixed)
obj['s'] = scale_init
obj['angles'] = [0, 0, 0]
obj['t'] = [0, 0, 0]
# obj: center at [0,0,0]. size:200

camera['proj_type'] = 'perspective'
camera['at'] = [0, 0, 0]
camera['near'] = 1000
camera['far'] = -100

# eye position
camera['fovy'] = 30
camera['up'] = [0, 1, 0] #
# z-axis: eye from far to near, looking at the center of face
for p in np.arange(500, 250-1, -40): # 0.5m->0.25m
camera['eye'] = [0, 0, p] # stay in front of face
image = transform_test(vertices, obj, camera)
io.imsave('{}/2_eye_1_{}.jpg'.format(save_folder, 1000-p), image)

# y-axis: eye from down to up, looking at the center of face
for p in np.arange(-300, 301, 60): # up 0.3m -> down 0.3m
camera['eye'] = [0, p, 250] # stay 0.25m far
image = transform_test(vertices, obj, camera)
io.imsave('{}/2_eye_2_{}.jpg'.format(save_folder, p/6), image)

# x-axis: eye from left to right, looking at the center of face
for p in np.arange(-300, 301, 60): # left 0.3m -> right 0.3m
camera['eye'] = [p, 0, 250] # stay 0.25m far
image = transform_test(vertices, obj, camera)
io.imsave('{}/2_eye_3_{}.jpg'.format(save_folder, -p/6), image)

# up direction
camera['eye'] = [0, 0, 250] # stay in front
for p in np.arange(-50, 51, 10):
world_up = np.array([0, 1, 0]) # default direction
z = np.deg2rad(p)
Rz=np.array([[math.cos(z), -math.sin(z), 0],
[math.sin(z), math.cos(z), 0],
[ 0, 0, 1]])
up = Rz.dot(world_up[:, np.newaxis]) # rotate up direction
# note that: rotating up direction is opposite to rotating obj
# just imagine: rotating camera 20 degree clockwise, is equal to keeping camera fixed and rotating obj 20 degree anticlockwise.
camera['up'] = np.squeeze(up)
image = transform_test(vertices, obj, camera)
io.imsave('{}/2_eye_4_{}.jpg'.format(save_folder, -p), image)

subprocess.call('convert {} {}/2_*.jpg {}'.format(options, save_folder, save_folder + '/camera.gif'), shell=True)

# -- delete jpg files
print('gifs have been generated, now delete jpgs')
subprocess.call('rm {}/*.jpg'.format(save_folder), shell=True)
77 changes: 77 additions & 0 deletions examples/4_light.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
'''
test light
'''
import os, sys
import numpy as np
import scipy.io as sio
from skimage import io
from time import time
import subprocess

sys.path.append('..')
import face3d
from face3d import mesh
from face3d import mesh_cython


def light_test(vertices, light_positions, light_intensities, h = 256, w = 256):
lit_colors = mesh_cython.light.add_light(vertices, triangles, colors, light_positions, light_intensities)
image_vertices = mesh.transform.to_image(vertices, h, w)
rendering = mesh_cython.render.render_colors(image_vertices, triangles, lit_colors, h, w)
rendering = np.minimum((np.maximum(rendering, 0)), 1)
return rendering

# --------- load mesh data
C = sio.loadmat('Data/example1.mat')
vertices = C['vertices'];
global colors
global triangles
colors = C['colors']; triangles = C['triangles']
colors = colors/np.max(colors)
# move center to [0,0,0]
vertices = vertices - np.mean(vertices, 0)[np.newaxis, :]
s = 200/(np.max(vertices[:,1]) - np.min(vertices[:,1]))
R = mesh.transform.angle2matrix([0, 0, 0])
t = [0, 0, 0]
vertices = mesh.transform.similarity_transform(vertices, s, R, t)


save_folder = 'results/light'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
options = '-delay 12 -loop 0 -layers optimize' # gif. need ImageMagick.

# ---- start
# 1. fix light intensities. change light positions.
# x axis: light from left to right
light_intensities = np.array([[1, 1, 1]])
for i,p in enumerate(range(-200, 201, 40)):
light_positions = np.array([[p, 0, 300]])
image = light_test(vertices, light_positions, light_intensities)
io.imsave('{}/1_1_{:0>2d}.jpg'.format(save_folder, i), image)
# y axis: light from up to down
for i,p in enumerate(range(200, -201, -40)):
light_positions = np.array([[0, p, 300]])
image = light_test(vertices, light_positions, light_intensities)
io.imsave('{}/1_2_{:0>2d}.jpg'.format(save_folder, i), image)
# z axis: light near down to far
for i,p in enumerate(range(100, 461, 40)):
light_positions = np.array([[0, 0, p]])
image = light_test(vertices, light_positions, light_intensities)
io.imsave('{}/1_3_{:0>2d}.jpg'.format(save_folder, i), image)
subprocess.call('convert {} {}/1_*.jpg {}'.format(options, save_folder, save_folder + '/position.gif'), shell=True)


# 2. fix light positions. change light intensities.
light_positions = np.array([[0, 0, 300]])
for k in range(3):
for i,p in enumerate(np.arange(0.4,1.1,0.2)):
light_intensities = np.array([[0, 0, 0]], dtype = np.float32)
light_intensities[0,k] = p
image = light_test(vertices, light_positions, light_intensities)
io.imsave('{}/2_{}_{:0>2d}.jpg'.format(save_folder, k, i), image)
subprocess.call('convert {} {}/2_*.jpg {}'.format(options, save_folder, save_folder + '/intensity.gif'), shell=True)

# -- delete jpg files
print('gifs have been generated, now delete jpgs')
subprocess.call('rm {}/*.jpg'.format(save_folder), shell=True)
56 changes: 56 additions & 0 deletions examples/5_render.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
''' Test render speed.
'''
import os, sys
import numpy as np
import scipy.io as sio
from skimage import io
from time import time
import matplotlib.pyplot as plt

np.set_printoptions(suppress=True)

sys.path.append('..')
import face3d
from face3d import mesh
from face3d import mesh_cython
from face3d.morphable_model import MorphabelModel

# load data
C = sio.loadmat('Data/example1.mat')
vertices = C['vertices']; colors = C['colors']; triangles = C['triangles']
colors = colors/np.max(colors)
# move center to [0,0,0]
vertices = vertices - np.mean(vertices, 0)[np.newaxis, :]
s = 200/(np.max(vertices[:,1]) - np.min(vertices[:,1]))
R = mesh.transform.angle2matrix([0, 0, 0])
t = [0, 0, 0]
vertices = mesh.transform.similarity_transform(vertices, s, R, t)
# h, w of rendering
h = w = 256
image_vertices = mesh.transform.to_image(vertices, h, w)

# -----------------------------------------render
# # render texture python
# st = time()
# rendering_tp = face3d.mesh.render.render_texture(vertices, triangles, texture, texcoord, triangles, h, w)
# print('----------texture python: ', time() - st)

# # render texture c++
# st = time()
# rendering_tc = face3d.mesh_cython.render.render_texture(vertices, triangles, texture, texcoord, triangles, h, w)
# print('----------texture c++: ', time() - st)

# render colors python
st = time()
rendering_cp = face3d.mesh.render.render_colors(image_vertices, triangles, colors, h, w)
print('----------colors python: ', time() - st)

# render colors python ras
st = time()
rendering_cpr = face3d.mesh.render.render_colors_ras(image_vertices, triangles, colors, h, w)
print('----------colors python ras: ', time() - st)

# render colors python c++
st = time()
rendering_cc = face3d.mesh_cython.render.render_colors(image_vertices, triangles, colors, h, w)
print('----------colors c++: ', time() - st)
Loading

0 comments on commit e78b85a

Please sign in to comment.