Skip to content

Commit

Permalink
support exporting detailed mesh
Browse files Browse the repository at this point in the history
  • Loading branch information
biwen147 committed Apr 21, 2023
1 parent 1ca7ff5 commit ecec7cb
Show file tree
Hide file tree
Showing 9 changed files with 45 additions and 7 deletions.
14 changes: 8 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ We present a novel hierarchical representation network (HRN) to achieve accurate

## News

* [04/21/2023] Add the codes of exporting mesh with high frequency details.
* [04/19/2023] The source codes are available!
* [03/01/2023] HRN achieved top-1 results on single image face reconstruction benchmark [REALY](https://realy3dface.com/)!
* [02/28/2023] Paper [HRN](https://arxiv.org/abs/2302.14434) released!
Expand All @@ -32,11 +33,11 @@ Clone the repo:
### Requirements
**This implementation is only tested under Ubuntu/CentOS environment with Nvidia GPUs and CUDA installed.**

* Python >= 3.6
* Python >= 3.8
* PyTorch >= 1.6
* Basic requirements, you can run
```bash
conda create -n HRN python=3.6
conda create -n HRN python=3.8
source activate HRN
pip install -r requirements.txt
```
Expand Down Expand Up @@ -84,12 +85,13 @@ Clone the repo:
We haven't released the training code yet.
## Note
This implementation has made a few changes on the basis of the original HRN to improve the effect and robustness:
1. This implementation has made a few changes on the basis of the original HRN to improve the effect and robustness:
- Introduce a valid mask to alleviate the interference caused by the occlusion of objects such as hair.
- Re-implement texture map generation and re-alignment module, which is faster than the original implementation.
- Introduce two trainable parameters α and β to improve the training stability at the beginning stage.
- Introduce a valid mask to alleviate the interference caused by the occlusion of objects such as hair.
- Re-implement texture map generation and re-alignment module, which is faster than the original implementation.
- Introduce two trainable parameters α and β to improve the training stability at the beginning stage.
2. The displacement map is designed to apply on the rendering process, so the effect of the exported mesh with high frequency details may not be as ideal as the rendered 2D image.
## Results
Expand Down
Binary file added assets/examples/single_view_image/00016.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/examples/single_view_image/00034.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/examples/single_view_image/00133.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/examples/single_view_image/00437.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added assets/examples/single_view_image/00521.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed assets/examples/single_view_image/ffhq_example.jpg
Binary file not shown.
19 changes: 19 additions & 0 deletions models/bfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,22 @@ def compute_for_render_hierarchical_mid(self, coeffs, deformation_map, UVs, visu

return face_vertex, face_color_map, landmark, face_proj, face_albedo_map, face_shape_transformed, face_norm_roted, extra_results

def get_dense_mesh(self, uv_z, coarse_verts, coarse_normals):
''' Convert displacement map into detail normal map
'''
batch_size = uv_z.shape[0]
uv_coarse_vertices = self.render.world2uv(coarse_verts)
uv_coarse_normals = self.render.world2uv(coarse_normals)

uv_detail_vertices = uv_coarse_vertices + uv_z * uv_coarse_normals
dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3])
dense_faces = self.render.dense_faces.expand(batch_size, -1, -1)
dense_mesh = {
'vertices': dense_vertices,
'faces': dense_faces,
}
return dense_mesh

def compute_for_render_hierarchical_high(self, coeffs, displacement_uv, face_albedo_map, face_shape_transformed, face_norm_roted, extra_results=None):

if type(coeffs) == dict:
Expand All @@ -567,6 +583,9 @@ def compute_for_render_hierarchical_high(self, coeffs, displacement_uv, face_alb
face_color_map = self.compute_color_with_displacement(face_albedo_map, face_shape_transformed, face_norm_roted, displacement_uv, coef_dict['gamma'])

if extra_results is not None:
dense_mesh = self.get_dense_mesh(displacement_uv, face_shape_transformed, face_norm_roted)
extra_results['dense_mesh'] = dense_mesh

extra_results['tex_high_color'] = face_color_map

batch_size = face_albedo_map.shape[0]
Expand Down
19 changes: 18 additions & 1 deletion models/facerecon_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from util import util_
from util.nv_diffrast import MeshRenderer
import os
from util.util_ import read_obj, write_obj2, viz_flow, split_vis, estimate_normals, write_video
from util.util_ import read_obj, write_obj2, viz_flow, split_vis, estimate_normals, write_video, crop_mesh
import time
from models.de_retouching_module import DeRetouchingModule
from pix2pix.pix2pix_model import Pix2PixModel
Expand Down Expand Up @@ -794,12 +794,19 @@ def save_results(self, out_dir, save_name='test'):
vertices_batch[..., -1] = 10 - vertices_batch[..., -1] # from camera space to world space
vertices_batch = vertices_batch.cpu().numpy()

# dense mesh
dense_vertices_batch = self.extra_results['dense_mesh']['vertices']
dense_vertices_batch = dense_vertices_batch.detach().cpu().numpy()
dense_faces_batch = self.extra_results['dense_mesh']['faces'].detach().cpu().numpy()


texture_map_batch = (255.0 * self.pred_color_high).permute(0, 2, 3, 1).detach().cpu().numpy()[..., ::-1]

for i in range(batch_size):
cv2.imwrite(os.path.join(out_dir, save_name + '_{}_hrn_output.jpg'.format(i)), hrn_output_vis_batch[i])
# split_vis(os.path.join(out_dir, save_name + '_{}_hrn_output.jpg'.format(i)))

# export mesh with mid frequency details
texture_map = texture_map_batch[i]
vertices = vertices_batch[i]
normals = estimate_normals(vertices, self.facemodel_front.face_buf.cpu().numpy())
Expand All @@ -814,6 +821,16 @@ def save_results(self, out_dir, save_name='test'):
write_obj2(os.path.join(out_dir, save_name + '_{}_hrn_mid_mesh.obj'.format(i)), face_mesh)
results['face_mesh'] = face_mesh

# export mesh with mid and high frequency details
dense_mesh = {
'vertices': dense_vertices_batch[i],
'faces': dense_faces_batch[i],
}
vertices_zero = dense_mesh['vertices'] == 0.0
keep_inds = np.where((vertices_zero[:, 0] * vertices_zero[:, 1] * vertices_zero[:, 2]) == False)[0]
dense_mesh, _ = crop_mesh(dense_mesh, keep_inds) # remove the redundant vertices and faces
write_obj2(os.path.join(out_dir, save_name + '_{}_hrn_high_mesh.obj'.format(i)), dense_mesh)

pred_face_gray_list = []
if 'pred_face_high_gray_list' in self.extra_results:
for j in range(len(self.extra_results['pred_face_high_gray_list'])):
Expand Down

0 comments on commit ecec7cb

Please sign in to comment.