From 1eb50b3b205b35122682038e5f0e93184a23a781 Mon Sep 17 00:00:00 2001 From: nanopoteto Date: Sun, 12 Apr 2020 13:31:59 +0900 Subject: [PATCH 1/3] add colab modules --- lib/colab_util.py | 113 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 lib/colab_util.py diff --git a/lib/colab_util.py b/lib/colab_util.py new file mode 100644 index 00000000..b663560c --- /dev/null +++ b/lib/colab_util.py @@ -0,0 +1,113 @@ +import io +import os +import torch +from skimage.io import imread +import numpy as np +import cv2 +from tqdm import tqdm_notebook as tqdm +import base64 +from IPython.display import HTML + +# Util function for loading meshes +from pytorch3d.io import load_objs_as_meshes + +from IPython.display import HTML +from base64 import b64encode + +# Data structures and functions for rendering +from pytorch3d.structures import Meshes, Textures +from pytorch3d.renderer import ( + look_at_view_transform, + OpenGLOrthographicCameras, + PointLights, + DirectionalLights, + Materials, + RasterizationSettings, + MeshRenderer, + MeshRasterizer, + TexturedSoftPhongShader, + HardPhongShader +) + +def set_renderer(): + # Setup + device = torch.device("cuda:0") + torch.cuda.set_device(device) + + # Initialize an OpenGL perspective camera. + R, T = look_at_view_transform(2.0, 0, 180) + cameras = OpenGLOrthographicCameras(device=device, R=R, T=T) + + raster_settings = RasterizationSettings( + image_size=512, + blur_radius=0.0, + faces_per_pixel=1, + bin_size = None, + max_faces_per_bin = None + ) + + lights = PointLights(device=device, location=((2.0, 2.0, 2.0),)) + + renderer = MeshRenderer( + rasterizer=MeshRasterizer( + cameras=cameras, + raster_settings=raster_settings + ), + shader=HardPhongShader( + device=device, + cameras=cameras, + lights=lights + ) + ) + return renderer + +def get_verts_rgb_colors(obj_path): + rgb_colors = [] + + f = open(obj_path) + lines = f.readlines() + for line in lines: + ls = line.split(' ') + if len(ls) == 7: + rgb_colors.append(ls[-3:]) + + return np.array(rgb_colors, dtype='float32')[None, :, :] + +def generate_video_from_obj(obj_path, video_path, renderer): + # Setup + device = torch.device("cuda:0") + torch.cuda.set_device(device) + + # Load obj file + verts_rgb_colors = get_verts_rgb_colors(obj_path) + verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device) + textures = Textures(verts_rgb=verts_rgb_colors) + wo_textures = Textures(verts_rgb=torch.ones_like(verts_rgb_colors)*0.75) + + # Load obj + mesh = load_objs_as_meshes([obj_path], device=device) + + # Set mesh + vers = mesh._verts_list + faces = mesh._faces_list + mesh_w_tex = Meshes(vers, faces, textures) + mesh_wo_tex = Meshes(vers, faces, wo_textures) + + # create VideoWriter + fourcc = cv2. VideoWriter_fourcc(*'MP4V') + out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024,512)) + + for i in tqdm(range(90)): + R, T = look_at_view_transform(1.8, 0, i*4, device=device) + images_w_tex = renderer(mesh_w_tex, R=R, T=T) + images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255 + images_wo_tex = renderer(mesh_wo_tex, R=R, T=T) + images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255 + image = np.concatenate([images_w_tex, images_wo_tex], axis=1) + out.write(image.astype('uint8')) + out.release() + +def video(path): + mp4 = open(path,'rb').read() + data_url = "data:video/mp4;base64," + b64encode(mp4).decode() + return HTML('' % data_url) From d2cd39f1cb8bf63c3e649db2c98e35568e26e30b Mon Sep 17 00:00:00 2001 From: ryota natsume Date: Sun, 12 Apr 2020 13:37:40 +0900 Subject: [PATCH 2/3] Update README.md --- README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index fa244318..1a056602 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization +[![report](https://img.shields.io/badge/arxiv-report-red)](https://arxiv.org/abs/1905.05172) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1GFSsqP2BWz4gtq0e-nki00ZHSirXwFyY) + News: * \[2020/02/26\] License is updated to MIT license! Enjoy! @@ -81,6 +83,11 @@ sh ./scripts/download_trained_model.sh sh ./scripts/test.sh ``` +## Demo on Google Colab +If you do not have a suitable environment to run this projects then you could give Google Colab a try. +It allows you to run the project in the cloud, free of charge. You may try our Colab demo using the notebook we prepare: +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1GFSsqP2BWz4gtq0e-nki00ZHSirXwFyY) + ## Data Generation (Linux Only) While we are unable to release the full training data due to the restriction of commertial scans, we provide rendering code using free models in [RenderPeople](https://renderpeople.com/free-3d-people/). This tutorial uses `rp_dennis_posed_004` model. Please download the model from [this link](https://renderpeople.com/sample/free/rp_dennis_posed_004_OBJ.zip) and unzip the content under a folder named `rp_dennis_posed_004_OBJ`. The same process can be applied to other RenderPeople data. @@ -149,4 +156,4 @@ Implict surface learning for sparse view human performance capture! For commercial queries, please contact: -Hao Li: hao@hao-li.com ccto: saitos@usc.edu Baker!! \ No newline at end of file +Hao Li: hao@hao-li.com ccto: saitos@usc.edu Baker!! From 71d794d38f2a1899c1b5dc799b25a2a375d6c0c1 Mon Sep 17 00:00:00 2001 From: Shunsuke Date: Mon, 13 Apr 2020 10:01:47 +0900 Subject: [PATCH 3/3] updated readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1a056602..cd827fde 100755 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![report](https://img.shields.io/badge/arxiv-report-red)](https://arxiv.org/abs/1905.05172) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1GFSsqP2BWz4gtq0e-nki00ZHSirXwFyY) News: +* \[2020/04/13\] Demo with Google Colab (incl. visualization) is available. Special thanks to [@nanopoteto](https://github.com/nanopoteto)!!! * \[2020/02/26\] License is updated to MIT license! Enjoy! This repository contains a pytorch implementation of "[PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization](https://arxiv.org/abs/1905.05172)". @@ -84,8 +85,7 @@ sh ./scripts/test.sh ``` ## Demo on Google Colab -If you do not have a suitable environment to run this projects then you could give Google Colab a try. -It allows you to run the project in the cloud, free of charge. You may try our Colab demo using the notebook we prepare: +If you do not have a setup to run PIFu, we offer Google Colab version to give it a try, allowing you to run PIFu in the cloud, free of charge. Try our Colab demo using the following notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1GFSsqP2BWz4gtq0e-nki00ZHSirXwFyY) ## Data Generation (Linux Only)