Skip to content

Commit

Permalink
evc: wip enerfi demo
Browse files Browse the repository at this point in the history
  • Loading branch information
dendenxu committed Apr 7, 2024
1 parent bd5f335 commit 844b8d9
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 6 deletions.
2 changes: 0 additions & 2 deletions configs/datasets/enerf_outdoor/enerf_outdoor.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ viewer_cfg:
T: [[-0.14367961883544922],[0.0021661361679434776],[5.292649269104004]]
n: 4.0
f: 9.0
H: 768
W: 1366
bounds: [[-4.0,-4.0,-2.0],[4.0,4.0,2.0]]
# {"H":768,"W":1366,"K":[[1639.199951171875,0.0,683.0],[0.0,1639.199951171875,384.0],[0.0,0.0,1.0]],"R":[[0.8829049468040466,-0.46955186128616333,0.0],[-0.005646302364766598,-0.010616821236908436,-0.9999276995658875],[0.46951788663864136,0.8828410506248474,-0.012024874798953533]],"T":[[-0.14367961883544922],[0.0021661361679434776],[5.292649269104004]],"n":4.0,"f":9.0,"t":0.0,"v":0.0,"bounds":[[-4.0,-4.0,-2.0],[4.0,4.0,2.0]],"mass":0.10000000149011612,"moment_of_inertia":0.10000000149011612,"movement_force":1.0,"movement_torque":1.0,"movement_speed":1.0,"origin":[0.0,0.0,0.0],"world_up":[0.0,0.0,1.0]}
# {"H":768,"W":1366,"K":[[4373.33447265625,0.0,1822.22265625],[0.0,4373.33447265625,1024.5],[0.0,0.0,1.0]],"R":[[0.8829049468040466,-0.46955186128616333,0.0],[-0.005646302364766598,-0.010616821236908436,-0.9999276995658875],[0.46951788663864136,0.8828410506248474,-0.012024874798953533]],"T":[[-0.14367961883544922],[0.0021661361679434776],[5.292649269104004]],"n":4.0,"f":9.0,"t":0.0,"v":0.0,"bounds":[[-1.0,-1.0,-1.0],[1.0,1.0,1.0]],"mass":0.10000000149011612,"moment_of_inertia":0.10000000149011612,"movement_force":1.0,"movement_torque":1.0,"movement_speed":1.0,"origin":[0.0,0.0,0.0],"world_up":[0.0,0.0,1.0]}
2 changes: 2 additions & 0 deletions easyvolcap/runners/volumetric_video_viewer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1397,6 +1397,8 @@ def init_camera(self, camera_cfg: dotdict = dotdict(), view_index: int = None):
f = camera_cfg.pop('f', dataset.far)
t = camera_cfg.pop('t', 0)
v = camera_cfg.pop('v', 0)
camera_cfg.pop('H', 0)
camera_cfg.pop('W', 0)
bounds = camera_cfg.pop('bounds', dataset.bounds.clone()) # avoids modification
self.camera = Camera(H, W, K, R, T, n, f, t, v, bounds, **camera_cfg)
self.camera.front = self.camera.front # perform alignment correction
Expand Down
9 changes: 5 additions & 4 deletions easyvolcap/runners/websocket_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ def __init__(self,
self.runner.visualizer.uncrop_output_images = False # manual uncropping
self.visualization_type = Visualization.RENDER
self.epoch = self.runner.load_network() # load weights only (without optimizer states)
self.iter = self.epoch * self.runner.ep_iter # loaded iter
self.dataset = self.runner.val_dataloader.dataset
self.model = self.runner.model
self.model.eval()
Expand Down Expand Up @@ -130,7 +131,7 @@ async def server_loop(self, websocket: websockets.WebSocket, path: str):
self.stream.synchronize() # waiting for the copy event to complete
with self.lock:
image = self.image.numpy() # copy to new memory space
image = encode_jpeg(torch.from_numpy(image)[..., :3].permute(2, 0, 1), quality=self.jpeg_quality).numpy().tobytes()
image = encode_jpeg(torch.from_numpy(image).permute(2, 0, 1), quality=self.jpeg_quality).numpy().tobytes()
await websocket.send(image)

response = await websocket.recv()
Expand All @@ -152,14 +153,14 @@ async def server_loop(self, websocket: websockets.WebSocket, path: str):

def render(self, batch: dotdict):
batch = self.dataset.get_viewer_batch(batch)
batch = to_cuda(add_batch(add_iter(batch, 0, 1)))
batch = to_cuda(add_batch(add_iter(batch,self.iter, self.runner.total_iter)))

# Forward pass
self.runner.maybe_jit_model(batch)
with torch.inference_mode(self.runner.test_using_inference_mode), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.runner.test_use_amp, cache_enabled=self.runner.test_amp_cached):
output = self.model(batch)

image = self.runner.visualizer.generate_type(output, batch, self.visualization_type)[0][0] # RGBA (should we use alpha?)
image = image[..., :3] * image[..., 3:]
image = (image.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform

return image # H, W, 4
return image # H, W, 3
2 changes: 2 additions & 0 deletions easyvolcap/scripts/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,8 @@ def __init__(self,
self.dynamic = dotdict()

def init_camera(self, camera_cfg: dotdict):
camera_cfg.H = camera_cfg.pop('H', self.H)
camera_cfg.W = camera_cfg.pop('W', self.W)
self.camera = Camera(**camera_cfg)
self.camera.front = self.camera.front

Expand Down

0 comments on commit 844b8d9

Please sign in to comment.