Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement SE3.normalize() #506

Merged
merged 12 commits into from
May 2, 2023
Prev Previous commit
Next Next commit
SO3.normalize() works
  • Loading branch information
fantaosha committed May 1, 2023
commit 1a3300bea1b3a6739b809ca45232af526efc6de7
46 changes: 29 additions & 17 deletions theseus/labs/lie/functional/so3_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -985,40 +985,52 @@ def backward(cls, ctx, grad_output):
# -----------------------------------------------------------------------------
# Normalize
# -----------------------------------------------------------------------------
def _normalize_impl(matrix: torch.Tensor):
check_matrix_tensor(matrix)
u, s, v = torch.svd(matrix)
sign = torch.det(u @ v).view(-1, 1, 1)
vt = torch.cat(
(v[:, :, :2], torch.where(sign > 0, v[:, :, 2:], -v[:, :, 2:])), dim=-1
).transpose(1, 2)
return u @ vt, (u, s, v, sign)


class Normalize(lie_group.UnaryOperator):
@classmethod
def forward(cls, ctx, matrix):
check_matrix_tensor(matrix)
matrix: torch.Tensor = cast(torch.Tensor, matrix)
u, s, v = torch.svd(matrix)
ctx.save_for_backward(u, s, v)
sign = torch.det(u @ v).view(-1, 1, 1)
ctx.save_for_backward(u, s, sign)
v[:, :, 2:] = torch.where(sign > 0, v[:, :, 2:], -v[:, :, 2:])
return u @ v.transpose(1, 2)
matrix: torch.Tensor = matrix
output, (u, s, v, sign) = _normalize_impl(matrix)
ctx.save_for_backward(u, s, v, sign)
return output

@classmethod
def backward(cls, ctx, grad_output):
u, s, v, sign = ctx.saved_tensors
ut = u.transpose(1, 2)
vt = v.transpose(1, 2)
grad_u: torch.Tensor = grad_output @ torch.cat(
(v[:, :, :2], v[:, :, 2:] @ sign)
(v[:, :, :2], v[:, :, 2:] @ sign), dim=-1
)
grad_v: torch.Tensor = grad_output.transpose(1, 2) @ torch.cat(
(u[:, :, :2], u[:, :, 2:] @ sign), dim=-1
)
grad_v: torch.Tensor = grad_output.transpose(1, 2) @ u
s_squared: torch.Tensor = s.pow(2)
F = s_squared.view(-1, 1, 3).expand(-1, 3, 3) - s_squared.view(-1, 3, 1).expand(
-1, 3, 3
)
F = torch.where(F == 0, (torch.ones(1) * math.inf).expand(F.shape), F)
F = F.pow(-1)
u_term: torch.Tensor = u @ (
F * (u.transpose(1, 2) @ grad_u - grad_u.transpose(1, 2) @ u)
)

u_term: torch.Tensor = u @ (F * (ut @ grad_u - grad_u.transpose(1, 2) @ u))
u_term = torch.einsum("n...ij, nj->n...ij", u_term, s)
u_term = u_term @ vt

v_term: torch.Tensor = (
F * (v.transpose(1, 2) @ grad_v - grad_v.transpose(1, 2) @ u)
F * (vt @ grad_v - grad_v.transpose(1, 2) @ v)
) @ v.transpose(1, 2)
return torch.einsum("n...ij, nj->n...ij", u_term, s) + torch.einsum(
"ni, n...ij->n...ij", s, v_term
)
v_term = torch.einsum("ni, n...ij->n...ij", s, v_term)
v_term = u @ v_term
return u_term + v_term


_normalize_autograd_fn = Normalize.apply
Expand Down