Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions aten/doc/Functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,8 @@ static inline Tensor & tanh_out(Tensor & result, const Tensor & self);
static inline Tensor tanh(const Tensor & self);
static inline Tensor & erf_out(Tensor & result, const Tensor & self);
static inline Tensor erf(const Tensor & self);
static inline Tensor & erfc_out(Tensor & result, const Tensor & self);
static inline Tensor erfc(const Tensor & self);
static inline Tensor & erfinv_out(Tensor & result, const Tensor & self);
static inline Tensor erfinv(const Tensor & self);
static inline Tensor & sqrt_out(Tensor & result, const Tensor & self);
Expand Down Expand Up @@ -1204,6 +1206,12 @@ static inline Tensor & erf_out(Tensor & result, const Tensor & self) {
static inline Tensor erf(const Tensor & self) {
return infer_type(self).erf(self);
}
static inline Tensor & erfc_out(Tensor & result, const Tensor & self) {
return infer_type(self).erfc_out(result, self);
}
static inline Tensor erfc(const Tensor & self) {
return infer_type(self).erfc(self);
}
static inline Tensor & erfinv_out(Tensor & result, const Tensor & self) {
return infer_type(self).erfinv_out(result, self);
}
Expand Down
2 changes: 2 additions & 0 deletions aten/doc/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,8 @@ struct Tensor : public detail::TensorBase {
Tensor tanh() const;
Tensor & erf_();
Tensor erf() const;
Tensor & erfc_();
Tensor erfc() const;
Tensor & erfinv_();
Tensor erfinv() const;
Tensor & sqrt_();
Expand Down
3 changes: 3 additions & 0 deletions aten/doc/Type.h
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,9 @@ struct AT_API Type {
virtual Tensor & erf_(Tensor & self) const;
virtual Tensor & erf_out(Tensor & result, const Tensor & self) const;
virtual Tensor erf(const Tensor & self) const;
virtual Tensor & erfc_(Tensor & self) const;
virtual Tensor & erfc_out(Tensor & result, const Tensor & self) const;
virtual Tensor erfc(const Tensor & self) const;
virtual Tensor & erfinv_(Tensor & self) const;
virtual Tensor & erfinv_out(Tensor & result, const Tensor & self) const;
virtual Tensor erfinv(const Tensor & self) const;
Expand Down
16 changes: 16 additions & 0 deletions aten/src/ATen/Declarations.cwrap
Original file line number Diff line number Diff line change
Expand Up @@ -1478,6 +1478,22 @@
output: True
- THTensor* self
]]
[[
name: _erfc
cname: erfc
types:
- floating_point
backends:
- CUDA
variants:
- method
- function
return: argument 0
arguments:
- arg: THTensor* result
output: True
- THTensor* self
]]
[[
name: erfinv_
types:
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ struct Vec256 {
Vec256<T> erf() const {
return map(std::erf);
}
Vec256<T> erfc() const {
return map(std::erfc);
}
Vec256<T> exp() const {
return map(std::exp);
}
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_double.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ template <> class Vec256<double> {
Vec256<double> erf() const {
return Vec256<double>(Sleef_erfd4_u10(values));
}
Vec256<double> erfc() const {
return Vec256<double>(Sleef_erfcd4_u15(values));
}
Vec256<double> exp() const {
return Vec256<double>(Sleef_expd4_u10(values));
}
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_float.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,9 @@ template <> class Vec256<float> {
Vec256<float> erf() const {
return Vec256<float>(Sleef_erff8_u10(values));
}
Vec256<float> erfc() const {
return Vec256<float>(Sleef_erfcf8_u15(values));
}
Vec256<float> exp() const {
return Vec256<float>(Sleef_expf8_u10(values));
}
Expand Down
2 changes: 2 additions & 0 deletions aten/src/ATen/cpu/vml.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ IMPLEMENT_VML_BUG(ceil)
IMPLEMENT_VML_BUG(cos)
// IMPLEMENT_VML_BUG(cosh)
IMPLEMENT_VML_BUG(erf)
IMPLEMENT_VML_BUG(erfc)
IMPLEMENT_VML_BUG(exp)
IMPLEMENT_VML_BUG(expm1)
IMPLEMENT_VML_BUG(floor)
Expand Down Expand Up @@ -143,6 +144,7 @@ IMPLEMENT_VML_MKL(atan, Atan)
IMPLEMENT_VML_MKL(cos, Cos)
// IMPLEMENT_VML_MKL(cosh, Cosh)
IMPLEMENT_VML_MKL(erf, Erf)
IMPLEMENT_VML_MKL(erfc, Erfc)
IMPLEMENT_VML_MKL(exp, Exp)
IMPLEMENT_VML_MKL(expm1, Expm1)
IMPLEMENT_VML_MKL(log, Ln)
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/native/UnaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ IMPLEMENT_UNARY_OP_VEC(ceil)
IMPLEMENT_UNARY_OP_VEC(cos)
IMPLEMENT_UNARY_OP_TH(cosh)
IMPLEMENT_UNARY_OP_VEC(erf)
IMPLEMENT_UNARY_OP_VEC(erfc)
IMPLEMENT_UNARY_OP_VEC(exp)
IMPLEMENT_UNARY_OP_VEC(expm1)
IMPLEMENT_UNARY_OP_VEC(floor)
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/native/cpu/UnaryOpsKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ IMPLEMENT_FLOAT_KERNEL(FLOATING, ceil)
IMPLEMENT_FLOAT_KERNEL(FLOATING, cos)
// IMPLEMENT_FLOAT_KERNEL(FLOATING, cosh)
IMPLEMENT_FLOAT_KERNEL(FLOATING, erf)
IMPLEMENT_FLOAT_KERNEL(FLOATING, erfc)
IMPLEMENT_FLOAT_KERNEL(FLOATING, exp)
IMPLEMENT_FLOAT_KERNEL(FLOATING, expm1)
IMPLEMENT_FLOAT_KERNEL(FLOATING, floor)
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/native/cpu/UnaryOpsKernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ extern DispatchStub<unary_fn> ceilImpl;
extern DispatchStub<unary_fn> cosImpl;
// extern DispatchStub<unary_fn> coshImpl;
extern DispatchStub<unary_fn> erfImpl;
extern DispatchStub<unary_fn> erfcImpl;
extern DispatchStub<unary_fn> expImpl;
extern DispatchStub<unary_fn> expm1Impl;
extern DispatchStub<unary_fn> floorImpl;
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/native/cuda/CUDAUnaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ IMPLEMENT_UNARY_OP_PREQUEL(ceil)
IMPLEMENT_UNARY_OP_PREQUEL(cos)
IMPLEMENT_UNARY_OP_PREQUEL(cosh)
IMPLEMENT_UNARY_OP_PREQUEL(erf)
IMPLEMENT_UNARY_OP_PREQUEL(erfc)
IMPLEMENT_UNARY_OP_PREQUEL(exp)
IMPLEMENT_UNARY_OP_PREQUEL(expm1)
IMPLEMENT_UNARY_OP_PREQUEL(floor)
Expand Down
13 changes: 13 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -532,6 +532,19 @@
CPU: _erf_out_cpu
CUDA: _erf_out_cuda

- func: erfc(Tensor self) -> Tensor

- func: erfc_(Tensor self) -> Tensor
dispatch:
CPU: _erfc__cpu
CUDA: _erfc__cuda

- func: erfc_out(Tensor result, Tensor self) -> Tensor
variants: function
dispatch:
CPU: _erfc_out_cpu
CUDA: _erfc_out_cuda

- func: exp(Tensor self) -> Tensor

- func: exp_(Tensor self) -> Tensor
Expand Down
1 change: 1 addition & 0 deletions aten/src/TH/generic/THTensorMath.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4064,6 +4064,7 @@ LAB_IMPLEMENT_BASIC_FUNCTION(log10,TH_MATH_NAME(log10))
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p))
LAB_IMPLEMENT_BASIC_FUNCTION(log2,TH_MATH_NAME(log2))
LAB_IMPLEMENT_BASIC_FUNCTION(erf,TH_MATH_NAME(erf))
LAB_IMPLEMENT_BASIC_FUNCTION(erfc,TH_MATH_NAME(erfc))
LAB_IMPLEMENT_BASIC_FUNCTION(erfinv,TH_erfinv)
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil))
LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor))
Expand Down
1 change: 1 addition & 0 deletions aten/src/TH/generic/THTensorMath.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ TH_API void THTensor_(atan)(THTensor *r_, THTensor *t);
TH_API void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty);
TH_API void THTensor_(tanh)(THTensor *r_, THTensor *t);
TH_API void THTensor_(erf)(THTensor *r_, THTensor *t);
TH_API void THTensor_(erfc)(THTensor *r_, THTensor *t);
TH_API void THTensor_(erfinv)(THTensor *r_, THTensor *t);
TH_API void THTensor_(sqrt)(THTensor *r_, THTensor *t);
TH_API void THTensor_(rsqrt)(THTensor *r_, THTensor *t);
Expand Down
1 change: 1 addition & 0 deletions aten/src/TH/generic/THVector.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ TH_API void THVector_(sigmoid)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(exp)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(expm1)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(erf)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(erfc)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(erfinv)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(cos)(real *y, const real *x, const ptrdiff_t n);
TH_API void THVector_(acos)(real *y, const real *x, const ptrdiff_t n);
Expand Down
1 change: 1 addition & 0 deletions aten/src/TH/generic/THVectorDefault.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,7 @@ VECTOR_IMPLEMENT_FUNCTION(sigmoid_DEFAULT,TH_MATH_NAME(TH_sigmoid))
VECTOR_IMPLEMENT_FUNCTION(exp,TH_MATH_NAME(exp))
VECTOR_IMPLEMENT_FUNCTION(expm1,TH_MATH_NAME(expm1))
VECTOR_IMPLEMENT_FUNCTION(erf,TH_MATH_NAME(erf))
VECTOR_IMPLEMENT_FUNCTION(erfc,TH_MATH_NAME(erfc))
VECTOR_IMPLEMENT_FUNCTION(erfinv, TH_erfinv)
VECTOR_IMPLEMENT_FUNCTION(cos,TH_MATH_NAME(cos))
VECTOR_IMPLEMENT_FUNCTION(acos,TH_MATH_NAME(acos))
Expand Down
14 changes: 13 additions & 1 deletion aten/src/THC/THCNumerics.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,17 @@ static inline __host__ __device__ half lgamma(half a) {
}


static inline __host__ __device__ half erfinv(half a) {
static inline __host__ __device__ half erfc(half a) {
#ifdef __CUDA_ARCH__
float fa = __half2float(a);
return __float2half(erfcf(fa));
#else // __CUDA_ARCH__
return THC_float2half(erfcf(THC_half2float(a)));
#endif
}


static inline __host__ __device__ half erfinv(half a) {
#ifdef __CUDA_ARCH__
float fa = __half2float(a);
return __float2half(erfinvf(fa));
Expand Down Expand Up @@ -684,6 +694,7 @@ struct THCNumerics<float> {
static inline __host__ __device__ float atan (float a) { return atanf(a); }
static inline __host__ __device__ float tanh (float a) { return tanhf(a); }
static inline __host__ __device__ float erf (float a) { return erff(a); }
static inline __host__ __device__ float erfc (float a) { return erfcf(a); }
static inline __host__ __device__ float abs (float a) { return fabsf(a); }
static inline __host__ __device__ float round(float a) { return roundf(a); }
static inline __host__ __device__ float frac (float a) { return a - truncf(a); }
Expand Down Expand Up @@ -737,6 +748,7 @@ struct THCNumerics<double> {
static inline __host__ __device__ double atan (double a) { return ::atan(a); }
static inline __host__ __device__ double tanh (double a) { return ::tanh(a); }
static inline __host__ __device__ double erf (double a) { return ::erf(a); }
static inline __host__ __device__ double erfc (double a) { return ::erfc(a); }
static inline __host__ __device__ double abs (double a) { return ::abs(a); }
static inline __host__ __device__ double round(double a) { return ::round(a); }
static inline __host__ __device__ double frac (double a) { return a - ::trunc(a); }
Expand Down
1 change: 1 addition & 0 deletions aten/src/THC/generic/THCTensorMathPointwise.cu
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<real>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
Expand Down
1 change: 1 addition & 0 deletions aten/src/THC/generic/THCTensorMathPointwise.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ THC_API void THCTensor_(atan)(THCState *state, THCTensor *self, THCTensor *src);
THC_API void THCTensor_(atan2)(THCState *state, THCTensor *r_, THCTensor *tx, THCTensor *ty);
THC_API void THCTensor_(tanh)(THCState *state, THCTensor *self, THCTensor *src);
THC_API void THCTensor_(erf)(THCState *state, THCTensor *self, THCTensor *src);
THC_API void THCTensor_(erfc)(THCState *state, THCTensor *self, THCTensor *src);
THC_API void THCTensor_(erfinv)(THCState *state, THCTensor *self, THCTensor *src);
THC_API void THCTensor_(sqrt)(THCState *state, THCTensor *self, THCTensor *src);
THC_API void THCTensor_(rsqrt)(THCState *state, THCTensor *self, THCTensor *src);
Expand Down
2 changes: 2 additions & 0 deletions docs/source/tensors.rst
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: equal
.. automethod:: erf
.. automethod:: erf_
.. automethod:: erfc
.. automethod:: erfc_
.. automethod:: erfinv
.. automethod:: erfinv_
.. automethod:: exp
Expand Down
1 change: 1 addition & 0 deletions docs/source/torch.rst
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ Pointwise Ops
.. autofunction:: cosh
.. autofunction:: div
.. autofunction:: erf
.. autofunction:: erfc
.. autofunction:: erfinv
.. autofunction:: exp
.. autofunction:: expm1
Expand Down
2 changes: 2 additions & 0 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -2640,6 +2640,8 @@ class dont_convert(tuple):
('expm1', (), NO_ARGS, 'scalar'),
('erf', torch.rand(S, S, S), NO_ARGS),
('erf', uniform_scalar(requires_grad=True), NO_ARGS, 'scalar'),
('erfc', torch.rand(S, S, S), NO_ARGS),
('erfc', uniform_scalar(requires_grad=True), NO_ARGS, 'scalar'),
('erfinv', torch.rand(S, S, S).clamp(-0.9, 0.9), NO_ARGS),
('erfinv', normal_scalar_clamp(-0.9, 0.9, requires_grad=True), NO_ARGS, 'scalar'),
('log', torch.rand(S, S, S) + 1e-2, NO_ARGS),
Expand Down
2 changes: 2 additions & 0 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,7 @@ def tmp(t):
'div': 1e-3,
'dot': 1e-2,
'erf': 1e-3,
'erfc': 1e-3,
'erfinv': 1e-3,
'exp': 1e-2,
'expm1': 1e-2,
Expand Down Expand Up @@ -552,6 +553,7 @@ def tmp(t):
'cos',
'cosh',
'erf',
'erfc',
'erfinv',
'exp',
'expm1',
Expand Down
3 changes: 3 additions & 0 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,9 @@ def test_sqrt(self):
def test_erf(self):
self._test_math_by_name('erf')

def test_erfc(self):
self._test_math_by_name('erfc')

def test_erfinv(self):
def checkType(tensor):
inputValues = torch.randn(4, 4, out=tensor()).clamp(-2., 2.)
Expand Down
3 changes: 3 additions & 0 deletions tools/autograd/derivatives.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,9 @@
- name: erf(Tensor self)
self: 2.0 / sqrt(M_PI) * exp(-(self.pow(2))) * grad

- name: erfc(Tensor self)
self: -2.0 / sqrt(M_PI) * exp(-(self.pow(2))) * grad

- name: erfinv(Tensor self)
self: 0.5 * sqrt(M_PI) * exp(self.erfinv().pow(2)) * grad

Expand Down
7 changes: 7 additions & 0 deletions torch/_tensor_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -717,6 +717,13 @@ def add_docstr_all(method, docstr):
See :func:`torch.erf`
""")

add_docstr_all('erfc',
r"""
erf() -> Tensor

See :func:`torch.erfc`
""")

add_docstr_all('erfinv',
r"""
erfinv() -> Tensor
Expand Down
19 changes: 19 additions & 0 deletions torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1411,6 +1411,25 @@ def parse_kwargs(desc):
tensor([ 0.0000, -0.8427, 1.0000])
""")

add_docstr(torch.erfc,
r"""
erfc(tensor, out=None) -> Tensor

Computes the complementary error function of each element. The complementary error function is defined as follows:

.. math::
\mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt

Args:
tensor (Tensor): the input tensor
out (Tensor, optional): the output tensor

Example::

>>> torch.erfc(torch.tensor([0, -1., 10.]))
tensor([ 1.0000, 1.8427, 0.0000])
""")

add_docstr(torch.erfinv,
r"""
erfinv(tensor, out=None) -> Tensor
Expand Down