Skip to content

Commit

Permalink
Save
Browse files Browse the repository at this point in the history
  • Loading branch information
sxyu committed Nov 10, 2021
1 parent bb49235 commit 2391dd1
Show file tree
Hide file tree
Showing 13 changed files with 151 additions and 102 deletions.
16 changes: 5 additions & 11 deletions opt/opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@
group.add_argument('--reso',
type=str,
default=
# "[[128, 128, 128], [256, 256, 256], [512, 512, 512], [768, 768, 768]]",
# "[[128, 128, 128], [256, 256, 256], [512, 512, 512]]",
"[[256, 256, 256], [512, 512, 512]]",
help='List of grid resolution (will be evaled as json);'
'resamples to the next one every upsamp_every iters, then ' +
Expand Down Expand Up @@ -161,12 +159,12 @@

group = parser.add_argument_group("losses")
# Foreground TV
group.add_argument('--lambda_tv', type=float, default=1e-5)#1e-5)
group.add_argument('--lambda_tv', type=float, default=1e-3)
group.add_argument('--tv_sparsity', type=float, default=0.01)
group.add_argument('--tv_logalpha', action='store_true', default=False,
help='Use log(1-exp(-delta * sigma)) as in neural volumes')

group.add_argument('--lambda_tv_sh', type=float, default=1e-3)
group.add_argument('--lambda_tv_sh', type=float, default=1e-1)
group.add_argument('--tv_sh_sparsity', type=float, default=0.01)

group.add_argument('--lambda_tv_lumisphere', type=float, default=0.0)#1e-2)#1e-3)
Expand Down Expand Up @@ -254,7 +252,6 @@
background_nlayers=args.background_nlayers,
background_reso=args.background_reso)

grid.opt.last_sample_opaque = dset.last_sample_opaque

# DC -> gray; mind the SH scaling!
grid.sh_data.data[:] = 0.0
Expand Down Expand Up @@ -289,11 +286,7 @@


grid.requires_grad_(True)
grid.opt.step_size = args.step_size
grid.opt.sigma_thresh = args.sigma_thresh
grid.opt.stop_thresh = args.stop_thresh
grid.opt.background_brightness = args.background_brightness
grid.opt.backend = args.renderer_backend
config_util.setup_render_opts(grid.opt, args)

gstep_id_base = 0

Expand Down Expand Up @@ -419,7 +412,6 @@ def eval_step():

def train_step():
print('Train step')
grid.opt.stop_thresh = args.train_stop_thresh
pbar = tqdm(enumerate(range(0, epoch_size, args.batch_size)), total=batches_per_epoch)
stats = {"mse" : 0.0, "psnr" : 0.0, "invsqr_mse" : 0.0}
for iter_id, batch_begin in pbar:
Expand Down Expand Up @@ -549,6 +541,8 @@ def train_step():
last_upsamp_step = gstep_id_base
if reso_id < len(reso_list) - 1:
print('* Upsampling from', reso_list[reso_id], 'to', reso_list[reso_id + 1])
args.lambda_tv = 0.0
args.lambda_tv_sh = 0.0
reso_id += 1
use_sparsify = True
z_reso = reso_list[reso_id] if isinstance(reso_list[reso_id], int) else reso_list[reso_id][2]
Expand Down
36 changes: 29 additions & 7 deletions opt/util/config_util.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
import argparse
from util.dataset import datasets
import yaml
import json


def define_common_args(parser : argparse.ArgumentParser):
Expand Down Expand Up @@ -62,10 +62,6 @@ def define_common_args(parser : argparse.ArgumentParser):
type=float,
default=1e-7,
help="Ray march stopping threshold")
group.add_argument('--train_stop_thresh',
type=float,
default=1e-7,
help="Ray march stopping threshold (for training)")
group.add_argument('--background_brightness',
type=float,
default=1.0,
Expand All @@ -74,6 +70,22 @@ def define_common_args(parser : argparse.ArgumentParser):
choices=['cuvol'],
default='cuvol',
help="Renderer backend")
group.add_argument('--random_sigma_std',
type=float,
default=1.0,
help="Random Gaussian std to add to density values (only if enable_random)")
group.add_argument('--random_sigma_std_background',
type=float,
default=1.0,
help="Random Gaussian std to add to density values for BG (only if enable_random)")
group.add_argument('--enable_random',
action='store_true',
defaullt=False,
help="Random Gaussian std to add to density values")
group.add_argument('--last_sample_opaque',
action='store_true',
defaullt=False,
help="Last sample has +1e9 density (used for LLFF)")


def build_data_options(args):
Expand All @@ -91,12 +103,22 @@ def build_data_options(args):

def maybe_merge_config_file(args):
"""
Load yaml config file if specified and merge the arguments
Load json config file if specified and merge the arguments
"""
if args.config is not None:
with open(args.config, "r") as config_file:
configs = yaml.load(config_file, Loader=yaml.FullLoader)
configs = json.load(config_file)
invalid_args = list(set(configs.keys()) - set(dir(args)))
if invalid_args:
raise ValueError(f"Invalid args {invalid_args} in {args.config}.")
args.__dict__.update(configs)

def setup_render_opts(opt, args):
opt.step_size = args.step_size
opt.sigma_thresh = args.sigma_thresh
opt.stop_thresh = args.stop_thresh
opt.background_brightness = args.background_brightness
opt.backend = args.renderer_backend
opt.random_sigma_std = args.random_sigma_std
opt.random_sigma_std_background = args.random_sigma_std_background
opt.last_sample_opaque = args.last_sample_opaque
6 changes: 2 additions & 4 deletions opt/util/ff_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,13 @@ def _load_images(self):

print('z_bounds from LLFF:', self.z_bounds)

radx = 1.35 #1 + 2 * self.sfm.offset / self.gt.size(2)
rady = 1.75 #+ 2 * self.sfm.offset / self.gt.size(1)
radx = 1 + 2 * self.sfm.offset / self.gt.size(2)
rady = 1 + 2 * self.sfm.offset / self.gt.size(1)

self.scene_center = [0.0, 0.0, 0.0]
self.scene_radius = [radx, rady, 1.0]
print('scene_radius', self.scene_radius)
self.use_sphere_bound = False
self.last_sample_opaque = False


def gen_rays(self, factor=1):
print(" Generating rays, scaling factor", factor)
Expand Down
3 changes: 1 addition & 2 deletions opt/util/nsvf_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,7 @@ def look_for_dir(cands, required=True):
self.scene_radius = 1.0
self.ndc_coeffs = (-1.0, -1.0) # disable
self.use_sphere_bound = True
self.last_sample_opaque = False
self.should_use_background = True
self.should_use_background = True # a hint


def gen_rays(self, factor=1):
Expand Down
1 change: 0 additions & 1 deletion opt/util/obj_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ def __init__(
self.scene_radius = 1.0
self.ndc_coeffs = (-1.0, -1.0) # disable
self.use_sphere_bound = True
self.last_sample_opaque = False
self.should_use_background = False # Give warning

def gen_rays(self, factor=1):
Expand Down
7 changes: 5 additions & 2 deletions svox2/csrc/include/data_spec.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,9 @@ struct RenderOptions {

bool last_sample_opaque;

// bool randomize;
// uint32_t _m1, _m2, _m3;
bool randomize;
float random_sigma_std;
float random_sigma_std_background;
// 32-bit RNG state masks
uint32_t _m1, _m2, _m3;
};
2 changes: 2 additions & 0 deletions svox2/csrc/include/data_spec_packed.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <torch/extension.h>
#include "data_spec.hpp"
#include "cuda_util.cuh"
#include "random_util.cuh"

namespace {
namespace device {
Expand Down Expand Up @@ -116,6 +117,7 @@ struct SingleRaySpec {

float pos[3];
int32_t l[3];
RandomEngine32 rng;
};

} // namespace device
Expand Down
13 changes: 7 additions & 6 deletions svox2/csrc/include/random_util.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <cmath>

// A custom xorshift random generator
// Maybe replace with some CUDA internal stuff?
struct RandomEngine32 {
uint32_t x, y, z;

Expand All @@ -28,17 +29,17 @@ struct RandomEngine32 {
}

__host__ __device__
float rand() {
uint32_t z = (*this)();
return float(z) / (1LL << 32);
}
float rand() {
uint32_t z = (*this)();
return float(z) / (1LL << 32);
}


__host__ __device__
void randn2(float* out1, float* out2) {
rand2(out1, out2);
// Box-Muller transform
const float srlog = sqrtf(-2 * logf(*out1));
const float srlog = sqrtf(-2 * logf(*out1 + 1e-32f));
*out2 *= 2 * M_PI;
*out1 = srlog * cosf(*out2);
*out2 = srlog * sinf(*out2);
Expand All @@ -49,7 +50,7 @@ struct RandomEngine32 {
float x, y;
rand2(&x, &y);
// Box-Muller transform
return sqrtf(-2 * logf(x))* cosf(2 * M_PI * y);
return sqrtf(-2 * logf(x + 1e-32f))* cosf(2 * M_PI * y);
}

__host__ __device__
Expand Down
64 changes: 41 additions & 23 deletions svox2/csrc/include/render_util.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

#include <cstdint>
#include "data_spec_packed.cuh"
#include "random_util.cuh"

namespace {
namespace device {
Expand Down Expand Up @@ -438,28 +439,6 @@ __device__ __inline__ void cam2world_ray(
world2ndc(cam, dir, origin);
}

__device__ __inline__ void ray_find_bounds(
SingleRaySpec& __restrict__ ray,
const PackedSparseGridSpec& __restrict__ grid,
const RenderOptions& __restrict__ opt) {
// Warning: modifies ray.origin
transform_coord(ray.origin, grid._scaling, grid._offset);
// Warning: modifies ray.dir
ray.world_step = _get_delta_scale(grid._scaling, ray.dir) * opt.step_size;

ray.tmin = opt.near_clip;
ray.tmax = 2e3f;
for (int i = 0; i < 3; ++i) {
const float invdir = 1.0 / ray.dir[i];
const float t1 = (-0.5f - ray.origin[i]) * invdir;
const float t2 = (grid.size[i] - 0.5f - ray.origin[i]) * invdir;
if (ray.dir[i] != 0.f) {
ray.tmin = max(ray.tmin, min(t1, t2));
ray.tmax = min(ray.tmax, max(t1, t2));
}
}
}

struct ConcentricSpheresIntersector {
__device__
ConcentricSpheresIntersector(
Expand Down Expand Up @@ -496,9 +475,41 @@ struct ConcentricSpheresIntersector {
float q2a, qb, f;
};

__device__ __inline__ void ray_find_bounds(
SingleRaySpec& __restrict__ ray,
const PackedSparseGridSpec& __restrict__ grid,
const RenderOptions& __restrict__ opt,
uint32_t ray_id) {
// Warning: modifies ray.origin
transform_coord(ray.origin, grid._scaling, grid._offset);
// Warning: modifies ray.dir
ray.world_step = _get_delta_scale(grid._scaling, ray.dir) * opt.step_size;

ray.tmin = opt.near_clip;
ray.tmax = 2e3f;
for (int i = 0; i < 3; ++i) {
const float invdir = 1.0 / ray.dir[i];
const float t1 = (-0.5f - ray.origin[i]) * invdir;
const float t2 = (grid.size[i] - 0.5f - ray.origin[i]) * invdir;
if (ray.dir[i] != 0.f) {
ray.tmin = max(ray.tmin, min(t1, t2));
ray.tmax = min(ray.tmax, max(t1, t2));
}
}

if (opt.randomize && opt.random_sigma_std > 0.0) {
// Seed the RNG
ray.rng.x = opt._m1 ^ ray_id;
ray.rng.y = opt._m2 ^ ray_id;
ray.rng.z = opt._m3 ^ ray_id;
}
}

__device__ __inline__ void ray_find_bounds_bg(
SingleRaySpec& __restrict__ ray,
const PackedSparseGridSpec& __restrict__ grid) {
const PackedSparseGridSpec& __restrict__ grid,
const RenderOptions& __restrict__ opt,
uint32_t ray_id) {
// Warning: modifies ray.origin
transform_coord(ray.origin, grid._scaling, grid._offset);
// Warning: modifies ray.dir
Expand Down Expand Up @@ -533,6 +544,13 @@ __device__ __inline__ void ray_find_bounds_bg(
// } else {
// ray.tmin = (-qb + sqrtf(det)) / q2a;
// }

if (opt.randomize && opt.random_sigma_std_background > 0) {
// Seed the RNG (hacks)
ray.rng.x = opt._m2 ^ (ray_id - 1);
ray.rng.y = opt._m3 ^ (ray_id - 1);
ray.rng.z = opt._m1 ^ (ray_id - 1);
}
}

} // namespace device
Expand Down
30 changes: 15 additions & 15 deletions svox2/csrc/loss_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,24 +28,24 @@ void calculate_ray_scale(float ndc_coeffx,
// float maxy,
float maxz,
float* __restrict__ scale) {
if (ndc_coeffx > 0.f) {
// FF NDC

// Normalized to [-1, 1] (with 0.5 padding)
// const float x_norm = (x + 0.5) / maxx * 2 - 1;
// const float y_norm = (y + 0.5) / maxy * 2 - 1;
const float z_norm = (z + 0.5) / maxz * 2 - 1;

// NDC distances
const float disparity = (1 - z_norm) / 2.f; // in [0, 1]
scale[0] = (ndc_coeffx * disparity);//maxx * 0.5f;
scale[1] = (ndc_coeffy * disparity);//maxy * 0.5f;
scale[2] = -((z_norm - 1.f + 2.f / maxz) * disparity) / (maxz * 0.5f);
} else {
// if (ndc_coeffx > 0.f) {
// // FF NDC
//
// // Normalized to [-1, 1] (with 0.5 padding)
// // const float x_norm = (x + 0.5) / maxx * 2 - 1;
// // const float y_norm = (y + 0.5) / maxy * 2 - 1;
// const float z_norm = (z + 0.5) / maxz * 2 - 1;
//
// // NDC distances
// const float disparity = (1 - z_norm) / 2.f; // in [0, 1]
// scale[0] = (ndc_coeffx * disparity);//maxx * 0.5f;
// scale[1] = (ndc_coeffy * disparity);//maxy * 0.5f;
// scale[2] = -((z_norm - 1.f + 2.f / maxz) * disparity) / (maxz * 0.5f);
// } else {
scale[0] = 1.f; //maxx * 0.5f;
scale[1] = 1.f; //maxy * 0.5f;
scale[2] = 1.f; //maxz * 0.5f;
}
// }
}

// __device__ __inline__
Expand Down
Loading

0 comments on commit 2391dd1

Please sign in to comment.