Skip to content

Add a differentiable sparse matrix vector product on top of our ops #392

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 8, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add wrapper for sparse_mtv in SparseLinearization to make differentia…
…ble Atb.
  • Loading branch information
luisenp committed Dec 8, 2022
commit cccce93fe8e569ccd830a9e85fe4faac37c31c3b
6 changes: 6 additions & 0 deletions tests/optimizer/test_sparse_linearization.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@ def test_sparse_linearization():
for i in range(batch_size):
assert b[i].isclose(linearization.b[i]).all()

# Test Atb result
atb_expected = A.transpose(1, 2).bmm(b.unsqueeze(2))
atb_out = linearization.Atb
torch.testing.assert_close(atb_expected, atb_out)

# Test Av() with a random v
rng = torch.Generator()
rng.manual_seed(1009)
for _ in range(20):
Expand Down
9 changes: 2 additions & 7 deletions theseus/optimizer/sparse_linearization.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torch

from theseus.core import Objective
from theseus.utils.sparse_matrix_utils import sparse_mv, tmat_vec
from theseus.utils.sparse_matrix_utils import sparse_mv, sparse_mtv

from .linear_system import SparseStructure
from .linearization import Linearization
Expand Down Expand Up @@ -145,19 +145,14 @@ def _ata_impl(self) -> torch.Tensor:
raise NotImplementedError("AtA is not yet implemented for SparseLinearization.")

def _atb_impl(self) -> torch.Tensor:
if torch.is_grad_enabled():
raise NotImplementedError(
"Atb is not differentiable for SparseLinearization."
)
if self._Atb is None:
A_row_ptr = torch.tensor(self.A_row_ptr, dtype=torch.int32).to(
self.objective.device
)
A_col_ind = A_row_ptr.new_tensor(self.A_col_ind)

# unsqueeze at the end for consistency with DenseLinearization
self._Atb = tmat_vec(
self.objective.batch_size,
self._Atb = sparse_mtv(
self.num_cols,
A_row_ptr,
A_col_ind,
Expand Down