8
8
from easyvolcap .utils .console_utils import *
9
9
from easyvolcap .utils .sh_utils import eval_sh
10
10
from easyvolcap .utils .blend_utils import batch_rodrigues
11
- from easyvolcap .utils .math_utils import torch_inverse_2x2
12
11
from easyvolcap .utils .data_utils import to_x , add_batch , load_pts
13
12
from easyvolcap .utils .net_utils import make_buffer , make_params , typed
13
+ from easyvolcap .utils .math_utils import torch_inverse_2x2 , point_padding
14
+
15
+
16
+ # def in_frustrum(xyz: torch.Tensor, ixt: torch.Tensor, ext: torch.Tensor):
17
+ def in_frustrum (xyz : torch .Tensor , full_proj_matrix : torch .Tensor , padding : float = 0.01 ):
18
+ # __forceinline__ __device__ bool in_frustum(int idx,
19
+ # const float* orig_points,
20
+ # const float* viewmatrix,
21
+ # const float* projmatrix,
22
+ # bool prefiltered,
23
+ # float3& p_view,
24
+ # const float padding = 0.01f // padding in ndc space
25
+ # )
26
+ # {
27
+ # float3 p_orig = { orig_points[3 * idx], orig_points[3 * idx + 1], orig_points[3 * idx + 2] };
28
+
29
+ # // Bring points to screen space
30
+ # float4 p_hom = transformPoint4x4(p_orig, projmatrix);
31
+ # float p_w = 1.0f / (p_hom.w + 0.0000001f);
32
+ # float3 p_proj = { p_hom.x * p_w, p_hom.y * p_w, p_hom.z * p_w };
33
+ # p_view = transformPoint4x3(p_orig, viewmatrix); // write this outside
34
+
35
+ # // if (idx % 32768 == 0) printf("Viewspace point: %f, %f, %f\n", p_view.x, p_view.y, p_view.z);
36
+ # // if (idx % 32768 == 0) printf("Projected point: %f, %f, %f\n", p_proj.x, p_proj.y, p_proj.z);
37
+ # return (p_proj.z > -1 - padding) && (p_proj.z < 1 + padding) && (p_proj.x > -1 - padding) && (p_proj.x < 1. + padding) && (p_proj.y > -1 - padding) && (p_proj.y < 1. + padding);
38
+ # }
39
+
40
+ # xyz: N, 3
41
+ # ndc = (xyz @ R.mT + T)[..., :3] @ K # N, 3
42
+ # ndc[..., :2] = ndc[..., :2] / ndc[..., 2:] / torch.as_tensor([W, H], device=ndc.device) # N, 2, normalized x and y
43
+ ndc = point_padding (xyz ) @ full_proj_matrix
44
+ ndc = ndc [..., :3 ] / ndc [..., 3 :]
45
+ return (ndc [..., 2 ] > - 1 - padding ) & (ndc [..., 2 ] < 1 + padding ) & (ndc [..., 0 ] > - 1 - padding ) & (ndc [..., 0 ] < 1. + padding ) & (ndc [..., 1 ] > - 1 - padding ) & (ndc [..., 1 ] < 1. + padding ) # N,
14
46
15
47
16
48
@torch .jit .script
@@ -199,7 +231,8 @@ def prepare_gaussian_camera(batch):
199
231
def convert_to_gaussian_camera (K : torch .Tensor ,
200
232
R : torch .Tensor ,
201
233
T : torch .Tensor ,
202
- H : int , W : int ,
234
+ H : int ,
235
+ W : int ,
203
236
znear : float = 0.01 ,
204
237
zfar : float = 100.
205
238
):
@@ -220,7 +253,7 @@ def convert_to_gaussian_camera(K: torch.Tensor,
220
253
221
254
output .world_view_transform = getWorld2View (output .R , output .T ).transpose (0 , 1 )
222
255
output .projection_matrix = getProjectionMatrix (output .K , output .image_height , output .image_width , znear , zfar ).transpose (0 , 1 )
223
- output .full_proj_transform = torch .matmul (output .world_view_transform , output .projection_matrix )
256
+ output .full_proj_transform = torch .matmul (output .world_view_transform , output .projection_matrix ) # 4, 4
224
257
output .camera_center = output .world_view_transform .inverse ()[3 :, :3 ]
225
258
226
259
# Set up rasterization configuration
@@ -686,6 +719,9 @@ def render(self, batch: dotdict):
686
719
# Prepare the camera transformation for Gaussian
687
720
gaussian_camera = to_x (prepare_gaussian_camera (batch ), torch .float )
688
721
722
+ # is_in_frustrum = in_frustrum(xyz, gaussian_camera.full_proj_transform)
723
+ # print('Number of points to render:', is_in_frustrum.sum().item())
724
+
689
725
# Prepare rasterization settings for gaussian
690
726
raster_settings = GaussianRasterizationSettings (
691
727
image_height = gaussian_camera .image_height ,
0 commit comments