Skip to content

Commit b3a9a7a

Browse files
vishwakftwfacebook-github-bot
authored andcommitted
Rename gels to lstsq (#23460)
Summary: Changelog: - Rename `gels` to `lstsq` - Fix all callsites - Rename all tests - Create a tentative alias for `lstsq` under the name `gels` and add a deprecation warning to not promote usage. Pull Request resolved: #23460 Test Plan: - All tests should pass to confirm that the patch is correct Differential Revision: D16547834 Pulled By: colesbury fbshipit-source-id: b3bdb8f4c5d14c7716c3d9528e40324cc544e496
1 parent cfe9400 commit b3a9a7a

File tree

14 files changed

+175
-132
lines changed

14 files changed

+175
-132
lines changed

aten/src/ATen/core/Tensor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -731,7 +731,7 @@ class CAFFE2_API Tensor {
731731
Tensor gather(int64_t dim, const Tensor & index, bool sparse_grad=false) const;
732732
Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
733733
Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
734-
std::tuple<Tensor,Tensor> gels(const Tensor & A) const;
734+
std::tuple<Tensor,Tensor> lstsq(const Tensor & A) const;
735735
std::tuple<Tensor,Tensor> triangular_solve(const Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const;
736736
std::tuple<Tensor,Tensor> symeig(bool eigenvectors=false, bool upper=true) const;
737737
std::tuple<Tensor,Tensor> eig(bool eigenvectors=false) const;

aten/src/ATen/core/TensorMethods.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1545,8 +1545,8 @@ inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Sc
15451545
static auto table = globalATenDispatch().getOpTable("aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor");
15461546
return table->getOp<Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar)>(tensorTypeIdToBackend(type_id()), is_variable())(*this, tensor1, tensor2, value);
15471547
}
1548-
inline std::tuple<Tensor,Tensor> Tensor::gels(const Tensor & A) const {
1549-
static auto table = globalATenDispatch().getOpTable("aten::gels(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)");
1548+
inline std::tuple<Tensor,Tensor> Tensor::lstsq(const Tensor & A) const {
1549+
static auto table = globalATenDispatch().getOpTable("aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)");
15501550
return table->getOp<std::tuple<Tensor,Tensor> (const Tensor &, const Tensor &)>(tensorTypeIdToBackend(type_id()), is_variable())(*this, A);
15511551
}
15521552
inline std::tuple<Tensor,Tensor> Tensor::triangular_solve(const Tensor & A, bool upper, bool transpose, bool unitriangular) const {

aten/src/ATen/core/aten_interned_strings.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,6 @@ _(aten, full) \
335335
_(aten, full_like) \
336336
_(aten, gather) \
337337
_(aten, ge) \
338-
_(aten, gels) \
339338
_(aten, geometric) \
340339
_(aten, geqrf) \
341340
_(aten, ger) \
@@ -416,6 +415,7 @@ _(aten, logspace) \
416415
_(aten, logsumexp) \
417416
_(aten, lstm) \
418417
_(aten, lstm_cell) \
418+
_(aten, lstsq) \
419419
_(aten, lt) \
420420
_(aten, lu_solve) \
421421
_(aten, margin_ranking_loss) \

aten/src/ATen/native/native_functions.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3751,12 +3751,12 @@
37513751
CPU: legacy::cpu::_th_addcdiv
37523752
CUDA: legacy::cuda::_th_addcdiv
37533753

3754-
- func: gels(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
3754+
- func: lstsq(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
37553755
dispatch:
37563756
CPU: legacy::cpu::_th_gels_out
37573757
CUDA: legacy::cuda::_th_gels_out
37583758

3759-
- func: gels(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
3759+
- func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
37603760
variants: method, function
37613761
dispatch:
37623762
CPU: legacy::cpu::_th_gels

docs/source/tensors.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ view of a storage and defines numeric operations on it.
317317
.. automethod:: log_normal_
318318
.. automethod:: logsumexp
319319
.. automethod:: long
320+
.. automethod:: lstsq
320321
.. automethod:: lt
321322
.. automethod:: lt_
322323
.. automethod:: lu

docs/source/torch.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,7 @@ BLAS and LAPACK Operations
334334
.. autofunction:: det
335335
.. autofunction:: logdet
336336
.. autofunction:: slogdet
337+
.. autofunction:: lstsq
337338
.. autofunction:: lu
338339
.. autofunction:: lu_solve
339340
.. autofunction:: lu_unpack

test/test_cuda.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2759,6 +2759,10 @@ def test_triangular_solve_batched_many_batches(self):
27592759
def test_triangular_solve_batched_dims(self):
27602760
_TestTorchMixin._test_triangular_solve_batched_dims(self, lambda t: t.cuda())
27612761

2762+
@unittest.skipIf(not TEST_MAGMA, "no MAGMA library detected")
2763+
def test_lstsq(self):
2764+
_TestTorchMixin._test_lstsq(self, 'cuda')
2765+
27622766
@unittest.skipIf(not TEST_MAGMA, "no MAGMA library detected")
27632767
def test_qr(self):
27642768
_TestTorchMixin._test_qr(self, lambda t: t.cuda())

test/test_namedtuple_return_api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
aten_native_yaml = os.path.join(path, '../aten/src/ATen/native/native_functions.yaml')
1212
all_operators_with_namedtuple_return = {
1313
'max', 'min', 'median', 'mode', 'kthvalue', 'svd', 'symeig', 'eig',
14-
'qr', 'geqrf', 'solve', 'slogdet', 'sort', 'topk', 'gels',
14+
'qr', 'geqrf', 'solve', 'slogdet', 'sort', 'topk', 'lstsq',
1515
'triangular_solve'
1616
}
1717

@@ -61,7 +61,7 @@ def test_namedtuple_return(self):
6161
op(operators=['geqrf'], input=(), names=('a', 'tau'), hasout=True),
6262
op(operators=['symeig', 'eig'], input=(True,), names=('eigenvalues', 'eigenvectors'), hasout=True),
6363
op(operators=['triangular_solve'], input=(a,), names=('solution', 'cloned_coefficient'), hasout=True),
64-
op(operators=['gels'], input=(a,), names=('solution', 'QR'), hasout=True),
64+
op(operators=['lstsq'], input=(a,), names=('solution', 'QR'), hasout=True),
6565
]
6666

6767
for op in operators:

test/test_torch.py

Lines changed: 56 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -5788,28 +5788,35 @@ def run_test(A_dims, b_dims, cast, upper, transpose, unitriangular):
57885788
def test_triangular_solve_batched_dims(self):
57895789
self._test_triangular_solve_batched_dims(self, lambda t: t)
57905790

5791-
@skipIfNoLapack
5792-
def test_gels(self):
5791+
@staticmethod
5792+
def _test_lstsq(self, device):
5793+
def cast_fn(tensor):
5794+
return tensor.to(device=device)
5795+
57935796
def _test_underdetermined(a, b, expectedNorm):
5797+
# underdetermined systems are not supported on the GPU
5798+
if 'cuda' in device:
5799+
return
5800+
57945801
m = a.size()[0]
57955802
n = a.size()[1]
57965803
assert(m <= n)
57975804

57985805
a_copy = a.clone()
57995806
b_copy = b.clone()
5800-
res1 = torch.gels(b, a)[0]
5807+
res1 = torch.lstsq(b, a)[0]
58015808
self.assertEqual(a, a_copy, 0)
58025809
self.assertEqual(b, b_copy, 0)
58035810
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, 1e-8)
58045811

5805-
ta = torch.Tensor()
5806-
tb = torch.Tensor()
5807-
res2 = torch.gels(b, a, out=(tb, ta))[0]
5812+
ta = cast_fn(torch.Tensor())
5813+
tb = cast_fn(torch.Tensor())
5814+
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
58085815
self.assertEqual(a, a_copy, 0)
58095816
self.assertEqual(b, b_copy, 0)
58105817
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, 1e-8)
58115818

5812-
res3 = torch.gels(b, a, out=(b, a))[0]
5819+
res3 = torch.lstsq(b, a, out=(b, a))[0]
58135820
self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, 1e-8)
58145821
self.assertEqual(res1, tb, 0)
58155822
self.assertEqual(res1, b, 0)
@@ -5823,7 +5830,6 @@ def _test_overdetermined(a, b, expectedNorm):
58235830

58245831
def check_norm(a, b, expected_norm, gels_result):
58255832
# Checks |ax - b| and the residual info from the result
5826-
n = a.size()[1]
58275833

58285834
# The first n rows is the least square solution.
58295835
# Rows n to m-1 contain residual information.
@@ -5836,19 +5842,19 @@ def check_norm(a, b, expected_norm, gels_result):
58365842

58375843
a_copy = a.clone()
58385844
b_copy = b.clone()
5839-
res1 = torch.gels(b, a)[0]
5845+
res1 = torch.lstsq(b, a)[0]
58405846
self.assertEqual(a, a_copy, 0)
58415847
self.assertEqual(b, b_copy, 0)
58425848
check_norm(a, b, expectedNorm, res1)
58435849

5844-
ta = torch.Tensor()
5845-
tb = torch.Tensor()
5846-
res2 = torch.gels(b, a, out=(tb, ta))[0]
5850+
ta = cast_fn(torch.Tensor())
5851+
tb = cast_fn(torch.Tensor())
5852+
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
58475853
self.assertEqual(a, a_copy, 0)
58485854
self.assertEqual(b, b_copy, 0)
58495855
check_norm(a, b, expectedNorm, res2)
58505856

5851-
res3 = torch.gels(b, a, out=(b, a))[0]
5857+
res3 = torch.lstsq(b, a, out=(b, a))[0]
58525858
check_norm(a_copy, b_copy, expectedNorm, res3)
58535859

58545860
self.assertEqual(res1, tb, 0)
@@ -5858,51 +5864,55 @@ def check_norm(a, b, expected_norm, gels_result):
58585864

58595865
# basic test
58605866
expectedNorm = 0
5861-
a = torch.Tensor(((1.44, -9.96, -7.55, 8.34),
5862-
(-7.84, -0.28, 3.24, 8.09),
5863-
(-4.39, -3.24, 6.27, 5.28),
5864-
(4.53, 3.83, -6.64, 2.06))).t()
5865-
b = torch.Tensor(((8.58, 8.26, 8.48, -5.28),
5866-
(9.35, -4.43, -0.70, -0.26))).t()
5867+
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55, 8.34),
5868+
(-7.84, -0.28, 3.24, 8.09),
5869+
(-4.39, -3.24, 6.27, 5.28),
5870+
(4.53, 3.83, -6.64, 2.06)))).t()
5871+
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48, -5.28),
5872+
(9.35, -4.43, -0.70, -0.26)))).t()
58675873
_test_underdetermined(a, b, expectedNorm)
58685874

5869-
# test overderemined
5875+
# test overdetermined
58705876
expectedNorm = 17.390200628863
5871-
a = torch.Tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
5872-
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
5873-
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
5874-
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70))).t()
5875-
b = torch.Tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
5876-
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52))).t()
5877+
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
5878+
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
5879+
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
5880+
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70)))).t()
5881+
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
5882+
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52)))).t()
58775883
_test_overdetermined(a, b, expectedNorm)
58785884

58795885
# test underdetermined
58805886
expectedNorm = 0
5881-
a = torch.Tensor(((1.44, -9.96, -7.55),
5882-
(-7.84, -0.28, 3.24),
5883-
(-4.39, -3.24, 6.27),
5884-
(4.53, 3.83, -6.64))).t()
5885-
b = torch.Tensor(((8.58, 8.26, 8.48),
5886-
(9.35, -4.43, -0.70))).t()
5887+
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55),
5888+
(-7.84, -0.28, 3.24),
5889+
(-4.39, -3.24, 6.27),
5890+
(4.53, 3.83, -6.64)))).t()
5891+
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48),
5892+
(9.35, -4.43, -0.70)))).t()
58875893
_test_underdetermined(a, b, expectedNorm)
58885894

58895895
# test reuse
58905896
expectedNorm = 0
5891-
a = torch.Tensor(((1.44, -9.96, -7.55, 8.34),
5892-
(-7.84, -0.28, 3.24, 8.09),
5893-
(-4.39, -3.24, 6.27, 5.28),
5894-
(4.53, 3.83, -6.64, 2.06))).t()
5895-
b = torch.Tensor(((8.58, 8.26, 8.48, -5.28),
5896-
(9.35, -4.43, -0.70, -0.26))).t()
5897-
ta = torch.Tensor()
5898-
tb = torch.Tensor()
5899-
torch.gels(b, a, out=(tb, ta))
5897+
a = cast_fn(torch.Tensor(((1.44, -9.96, -7.55, 8.34),
5898+
(-7.84, -0.28, 3.24, 8.09),
5899+
(-4.39, -3.24, 6.27, 5.28),
5900+
(4.53, 3.83, -6.64, 2.06)))).t()
5901+
b = cast_fn(torch.Tensor(((8.58, 8.26, 8.48, -5.28),
5902+
(9.35, -4.43, -0.70, -0.26)))).t()
5903+
ta = cast_fn(torch.Tensor())
5904+
tb = cast_fn(torch.Tensor())
5905+
torch.lstsq(b, a, out=(tb, ta))
59005906
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, 1e-8)
5901-
torch.gels(b, a, out=(tb, ta))
5907+
torch.lstsq(b, a, out=(tb, ta))
59025908
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, 1e-8)
5903-
torch.gels(b, a, out=(tb, ta))
5909+
torch.lstsq(b, a, out=(tb, ta))
59045910
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, 1e-8)
59055911

5912+
@skipIfNoLapack
5913+
def test_lstsq(self):
5914+
self._test_lstsq(self, 'cpu')
5915+
59065916
@skipIfNoLapack
59075917
def test_eig(self):
59085918
a = torch.Tensor(((1.96, 0.00, 0.00, 0.00, 0.00),
@@ -8923,9 +8933,9 @@ def fn(torchfn, *args):
89238933
q, r = fn(torch.qr, (3, 0), False)
89248934
self.assertEqual([(3, 3), (3, 0)], [q.shape, r.shape])
89258935

8926-
# gels
8927-
self.assertRaises(RuntimeError, lambda: torch.gels(torch.randn(0, 0), torch.randn(0, 0)))
8928-
self.assertRaises(RuntimeError, lambda: torch.gels(torch.randn(0,), torch.randn(0, 0)))
8936+
# lstsq
8937+
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))
8938+
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))
89298939

89308940
def test_expand(self):
89318941
tensor = torch.rand(1, 8, 1)

tools/autograd/derivatives.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -345,10 +345,6 @@
345345
self: zeros_like(self)
346346
other: zeros_like(other)
347347

348-
- name: gels(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
349-
self: not_implemented("gels")
350-
A: not_implemented("gels")
351-
352348
- name: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
353349
self: zeros_like(grad)
354350

@@ -467,6 +463,10 @@
467463
- name: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
468464
self: logsumexp_backward(grad, self, result, dim, keepdim)
469465

466+
- name: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
467+
self: not_implemented("lstsq")
468+
A: not_implemented("lstsq")
469+
470470
- name: lt_(Tensor(a!) self, Scalar other) -> Tensor(a!)
471471
self: zeros_like(self)
472472

0 commit comments

Comments
 (0)