Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aten/src/ATen/core/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -731,7 +731,7 @@ class CAFFE2_API Tensor {
Tensor gather(int64_t dim, const Tensor & index, bool sparse_grad=false) const;
Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
std::tuple<Tensor,Tensor> gels(const Tensor & A) const;
std::tuple<Tensor,Tensor> lstsq(const Tensor & A) const;
std::tuple<Tensor,Tensor> triangular_solve(const Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const;
std::tuple<Tensor,Tensor> symeig(bool eigenvectors=false, bool upper=true) const;
std::tuple<Tensor,Tensor> eig(bool eigenvectors=false) const;
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/core/TensorMethods.h
Original file line number Diff line number Diff line change
Expand Up @@ -1545,8 +1545,8 @@ inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Sc
static auto table = globalATenDispatch().getOpTable("aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor");
return table->getOp<Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar)>(tensorTypeIdToBackend(type_id()), is_variable())(*this, tensor1, tensor2, value);
}
inline std::tuple<Tensor,Tensor> Tensor::gels(const Tensor & A) const {
static auto table = globalATenDispatch().getOpTable("aten::gels(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)");
inline std::tuple<Tensor,Tensor> Tensor::lstsq(const Tensor & A) const {
static auto table = globalATenDispatch().getOpTable("aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)");
return table->getOp<std::tuple<Tensor,Tensor> (const Tensor &, const Tensor &)>(tensorTypeIdToBackend(type_id()), is_variable())(*this, A);
}
inline std::tuple<Tensor,Tensor> Tensor::triangular_solve(const Tensor & A, bool upper, bool transpose, bool unitriangular) const {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/aten_interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,6 @@ _(aten, full) \
_(aten, full_like) \
_(aten, gather) \
_(aten, ge) \
_(aten, gels) \
_(aten, geometric) \
_(aten, geqrf) \
_(aten, ger) \
Expand Down Expand Up @@ -415,6 +414,7 @@ _(aten, logspace) \
_(aten, logsumexp) \
_(aten, lstm) \
_(aten, lstm_cell) \
_(aten, lstsq) \
_(aten, lt) \
_(aten, lu_solve) \
_(aten, margin_ranking_loss) \
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3747,12 +3747,12 @@
CPU: legacy::cpu::_th_addcdiv
CUDA: legacy::cuda::_th_addcdiv

- func: gels(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
- func: lstsq(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
dispatch:
CPU: legacy::cpu::_th_gels_out
CUDA: legacy::cuda::_th_gels_out

- func: gels(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
- func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
variants: method, function
dispatch:
CPU: legacy::cpu::_th_gels
Expand Down
1 change: 1 addition & 0 deletions docs/source/tensors.rst
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,7 @@ view of a storage and defines numeric operations on it.
.. automethod:: log_normal_
.. automethod:: logsumexp
.. automethod:: long
.. automethod:: lstsq
.. automethod:: lt
.. automethod:: lt_
.. automethod:: lu
Expand Down
1 change: 1 addition & 0 deletions docs/source/torch.rst
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,7 @@ BLAS and LAPACK Operations
.. autofunction:: det
.. autofunction:: logdet
.. autofunction:: slogdet
.. autofunction:: lstsq
.. autofunction:: lu
.. autofunction:: lu_solve
.. autofunction:: lu_unpack
Expand Down
4 changes: 4 additions & 0 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -2757,6 +2757,10 @@ def test_triangular_solve_batched_many_batches(self):
def test_triangular_solve_batched_dims(self):
_TestTorchMixin._test_triangular_solve_batched_dims(self, lambda t: t.cuda())

@unittest.skipIf(not TEST_MAGMA, "no MAGMA library detected")
def test_lstsq(self):
_TestTorchMixin._test_lstsq(self, 'cuda')

@unittest.skipIf(not TEST_MAGMA, "no MAGMA library detected")
def test_qr(self):
_TestTorchMixin._test_qr(self, lambda t: t.cuda())
Expand Down
4 changes: 2 additions & 2 deletions test/test_namedtuple_return_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
aten_native_yaml = os.path.join(path, '../aten/src/ATen/native/native_functions.yaml')
all_operators_with_namedtuple_return = {
'max', 'min', 'median', 'mode', 'kthvalue', 'svd', 'symeig', 'eig',
'qr', 'geqrf', 'solve', 'slogdet', 'sort', 'topk', 'gels',
'qr', 'geqrf', 'solve', 'slogdet', 'sort', 'topk', 'lstsq',
'triangular_solve'
}

Expand Down Expand Up @@ -61,7 +61,7 @@ def test_namedtuple_return(self):
op(operators=['geqrf'], input=(), names=('a', 'tau'), hasout=True),
op(operators=['symeig', 'eig'], input=(True,), names=('eigenvalues', 'eigenvectors'), hasout=True),
op(operators=['triangular_solve'], input=(a,), names=('solution', 'cloned_coefficient'), hasout=True),
op(operators=['gels'], input=(a,), names=('solution', 'QR'), hasout=True),
op(operators=['lstsq'], input=(a,), names=('solution', 'QR'), hasout=True),
]

for op in operators:
Expand Down
102 changes: 56 additions & 46 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5771,28 +5771,35 @@ def run_test(A_dims, b_dims, cast, upper, transpose, unitriangular):
def test_triangular_solve_batched_dims(self):
self._test_triangular_solve_batched_dims(self, lambda t: t)

@skipIfNoLapack
def test_gels(self):
@staticmethod
def _test_lstsq(self, device):
def cast_fn(tensor):
return tensor.to(device=device)

def _test_underdetermined(a, b, expectedNorm):
# underdetermined systems are not supported on the GPU
if 'cuda' in device:
return

m = a.size()[0]
n = a.size()[1]
assert(m <= n)

a_copy = a.clone()
b_copy = b.clone()
res1 = torch.gels(b, a)[0]
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, 0)
self.assertEqual(b, b_copy, 0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, 1e-8)

ta = torch.Tensor()
tb = torch.Tensor()
res2 = torch.gels(b, a, out=(tb, ta))[0]
ta = cast_fn(torch.Tensor())
tb = cast_fn(torch.Tensor())
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, 0)
self.assertEqual(b, b_copy, 0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, 1e-8)

res3 = torch.gels(b, a, out=(b, a))[0]
res3 = torch.lstsq(b, a, out=(b, a))[0]
self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, 1e-8)
self.assertEqual(res1, tb, 0)
self.assertEqual(res1, b, 0)
Expand All @@ -5806,7 +5813,6 @@ def _test_overdetermined(a, b, expectedNorm):

def check_norm(a, b, expected_norm, gels_result):
# Checks |ax - b| and the residual info from the result
n = a.size()[1]

# The first n rows is the least square solution.
# Rows n to m-1 contain residual information.
Expand All @@ -5819,19 +5825,19 @@ def check_norm(a, b, expected_norm, gels_result):

a_copy = a.clone()
b_copy = b.clone()
res1 = torch.gels(b, a)[0]
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, 0)
self.assertEqual(b, b_copy, 0)
check_norm(a, b, expectedNorm, res1)

ta = torch.Tensor()
tb = torch.Tensor()
res2 = torch.gels(b, a, out=(tb, ta))[0]
ta = cast_fn(torch.Tensor())
tb = cast_fn(torch.Tensor())
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, 0)
self.assertEqual(b, b_copy, 0)
check_norm(a, b, expectedNorm, res2)

res3 = torch.gels(b, a, out=(b, a))[0]
res3 = torch.lstsq(b, a, out=(b, a))[0]
check_norm(a_copy, b_copy, expectedNorm, res3)

self.assertEqual(res1, tb, 0)
Expand All @@ -5841,51 +5847,55 @@ def check_norm(a, b, expected_norm, gels_result):

# basic test
expectedNorm = 0
a = torch.Tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06))).t()
b = torch.Tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26))).t()
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)))).t()
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)))).t()
_test_underdetermined(a, b, expectedNorm)

# test overderemined
# test overdetermined
expectedNorm = 17.390200628863
a = torch.Tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70))).t()
b = torch.Tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52))).t()
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70)))).t()
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52)))).t()
_test_overdetermined(a, b, expectedNorm)

# test underdetermined
expectedNorm = 0
a = torch.Tensor(((1.44, -9.96, -7.55),
(-7.84, -0.28, 3.24),
(-4.39, -3.24, 6.27),
(4.53, 3.83, -6.64))).t()
b = torch.Tensor(((8.58, 8.26, 8.48),
(9.35, -4.43, -0.70))).t()
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55),
(-7.84, -0.28, 3.24),
(-4.39, -3.24, 6.27),
(4.53, 3.83, -6.64)))).t()
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48),
(9.35, -4.43, -0.70)))).t()
_test_underdetermined(a, b, expectedNorm)

# test reuse
expectedNorm = 0
a = torch.Tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06))).t()
b = torch.Tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26))).t()
ta = torch.Tensor()
tb = torch.Tensor()
torch.gels(b, a, out=(tb, ta))
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)))).t()
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)))).t()
ta = cast_fn(torch.Tensor())
tb = cast_fn(torch.Tensor())
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, 1e-8)
torch.gels(b, a, out=(tb, ta))
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, 1e-8)
torch.gels(b, a, out=(tb, ta))
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, 1e-8)

@skipIfNoLapack
def test_lstsq(self):
self._test_lstsq(self, 'cpu')

@skipIfNoLapack
def test_eig(self):
a = torch.Tensor(((1.96, 0.00, 0.00, 0.00, 0.00),
Expand Down Expand Up @@ -8906,9 +8916,9 @@ def fn(torchfn, *args):
q, r = fn(torch.qr, (3, 0), False)
self.assertEqual([(3, 3), (3, 0)], [q.shape, r.shape])

# gels
self.assertRaises(RuntimeError, lambda: torch.gels(torch.randn(0, 0), torch.randn(0, 0)))
self.assertRaises(RuntimeError, lambda: torch.gels(torch.randn(0,), torch.randn(0, 0)))
# lstsq
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))

def test_expand(self):
tensor = torch.rand(1, 8, 1)
Expand Down
8 changes: 4 additions & 4 deletions tools/autograd/derivatives.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -345,10 +345,6 @@
self: zeros_like(self)
other: zeros_like(other)

- name: gels(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
self: not_implemented("gels")
A: not_implemented("gels")

- name: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
self: zeros_like(grad)

Expand Down Expand Up @@ -467,6 +463,10 @@
- name: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
self: logsumexp_backward(grad, self, result, dim, keepdim)

- name: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
self: not_implemented("lstsq")
A: not_implemented("lstsq")

- name: lt_(Tensor(a!) self, Scalar other) -> Tensor(a!)
self: zeros_like(self)

Expand Down
14 changes: 7 additions & 7 deletions torch/_tensor_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1040,13 +1040,6 @@ def add_docstr_all(method, docstr):
In-place version of :meth:`~Tensor.ge`
""")

add_docstr_all('gels',
r"""
gels(A) -> Tensor

See :func:`torch.gels`
""")

add_docstr_all('geometric_',
r"""
geometric_(p, *, generator=None) -> Tensor
Expand Down Expand Up @@ -1466,6 +1459,13 @@ def add_docstr_all(method, docstr):
See :func:`torch.logsumexp`
""")

add_docstr_all('lstsq',
r"""
lstsq(A) -> (Tensor, Tensor)

See :func:`torch.lstsq`
""")

add_docstr_all('lt',
r"""
lt(other) -> Tensor
Expand Down
Loading