Skip to content

Commit 24e147b

Browse files
author
davidriazati
committed
Merge branch 'driazati/rec/class' of github.com:pytorch/pytorch into driazati/fixscope
2 parents 759157b + 671533f commit 24e147b

33 files changed

+2074
-495
lines changed

.jenkins/caffe2/test.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -126,5 +126,11 @@ pip install --user pytest-sugar
126126
#####################
127127
if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
128128
pip install -q --user git+https://github.com/pytorch/vision.git
129+
pip install -q --user ninja
130+
# JIT C++ extensions require ninja, so put it into PATH.
131+
export PATH="/var/lib/jenkins/.local/bin:$PATH"
132+
if [[ "$BUILD_ENVIRONMENT" == *py3* ]]; then
133+
pip install -q --user onnxruntime
134+
fi
129135
"$ROOT_DIR/scripts/onnx/test.sh"
130136
fi

aten/src/ATen/native/Pool.h

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#include <ATen/ATen.h>
22
#include <ATen/Parallel.h>
33
#include <ATen/NativeFunctions.h>
4+
#include <ATen/div_rtn.h>
45
#include <tuple>
56

67
#pragma once
@@ -21,18 +22,29 @@ safe_downcast(src_t v)
2122
}
2223

2324
template<typename T>
24-
static inline T pooling_output_shape(
25-
T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) {
26-
T outputSize = ((inputSize + 2 * pad - dilation * (kernelSize - 1) - 1 + (ceil_mode ? stride - 1 : 0)) / stride + 1);
27-
if (pad) {
25+
static inline T pooling_output_shape_pad_lr(
26+
T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation,
27+
bool ceil_mode) {
28+
T outputSize = div_rtn<T>(
29+
inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 +
30+
(ceil_mode ? stride - 1 : 0), stride) + 1;
31+
if (pad_l) {
2832
// ensure that the last pooling starts inside the image
2933
// needed to avoid problems in ceil mode
30-
if ((outputSize - 1) * stride >= inputSize + pad)
34+
if ((outputSize - 1) * stride >= inputSize + pad_l)
3135
--outputSize;
3236
}
3337
return outputSize;
3438
}
3539

40+
template<typename T>
41+
static inline T pooling_output_shape(
42+
T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) {
43+
return pooling_output_shape_pad_lr(
44+
inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode);
45+
}
46+
47+
3648
// AveragePool2d/DilatedMaxPool2d (forward)
3749
static inline void
3850
pool2d_shape_check(

aten/src/ATen/native/cuda/TensorFactories.cu

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,10 +88,15 @@ Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
8888
return result.copy_(result_cpu);
8989
}
9090

91+
#if 0
92+
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
93+
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
94+
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
9195
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
9296
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
9397
return result.copy_(randperm_out_cuda(result_float, n, generator));
9498
}
99+
#endif
95100

96101
// Generate random values for the keys array
97102
AT_DISPATCH_ALL_TYPES(

aten/src/ATen/native/mkldnn/Utils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#include <ATen/native/mkldnn/Utils.h>
2-
#include <THNN/generic/pooling_shape.h>
2+
#include <ATen/native/Pool.h>
33

44
namespace at { namespace native {
55

aten/src/ATen/native/quantized/cpu/qpool.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
#include <ATen/native/TensorIterator.h>
55
#include <ATen/native/cpu/Loops.h>
66
#include <ATen/quantized/Quantizer.h>
7-
#include <THNN/generic/pooling_shape.h>
7+
#include <ATen/native/Pool.h>
88

99
#include <algorithm>
1010
#include <vector>

aten/src/THCUNN/generic/pooling_shape.h

Lines changed: 0 additions & 18 deletions
This file was deleted.

aten/src/THNN/generic/pooling_shape.h

Lines changed: 0 additions & 27 deletions
This file was deleted.

caffe2/python/extension_loader.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,12 @@ def DlopenGuard(extra_flags=ctypes.RTLD_GLOBAL):
1818
if _set_global_flags:
1919
old_flags = sys.getdlopenflags()
2020
sys.setdlopenflags(old_flags | extra_flags)
21-
yield
22-
if _set_global_flags:
23-
sys.setdlopenflags(old_flags)
21+
22+
# in case we dlopen something that doesn't exist, yield will fail and throw;
23+
# we need to remember reset the old flags to clean up, otherwise RTLD_GLOBAL
24+
# flag will stick around and create symbol conflict problems
25+
try:
26+
yield
27+
finally:
28+
if _set_global_flags:
29+
sys.setdlopenflags(old_flags)

caffe2/python/onnx/tests/onnx_backend_test.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
'|test_roialign.*' # Needs implementation
7272
'|test_bitshift.*' # Needs implementation
7373
'|test_round.*' # Needs implementation
74+
'|test_cumsum.*' # Needs implementation
7475
')')
7576

7677
# Quick patch to unbreak master CI, is working on the debugging.

caffe2/python/regularizer.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,30 @@ def _run_on_loss(self, net, param_init_net, param, grad=None):
8585
return output_blob
8686

8787

88+
class L1NormTrimmed(Regularizer):
89+
"""
90+
The Trimmed Lasso: Sparsity and Robustness. https://arxiv.org/abs/1708.04527
91+
"""
92+
def __init__(self, reg_lambda, k):
93+
super(L1NormTrimmed, self).__init__()
94+
assert reg_lambda >= 0, "factor ahead of regularization should be 0 or positive"
95+
assert isinstance(k, int), "k should be an interger as expected #. after selection"
96+
assert k >= 1, "k should be larger than 1"
97+
98+
self.reg_lambda = reg_lambda
99+
self.k = k
100+
101+
def _run_on_loss(self, net, param_init_net, param, grad=None):
102+
output_blob = net.NextScopedBlob(param + "_l1_trimmed_regularization")
103+
abs = net.Abs(param, [net.NextScopedBlob("abs")])
104+
sum_abs = net.SumElements([abs], [net.NextScopedBlob("sum_abs")], average=False)
105+
topk, _, _ = net.TopK(abs, [net.NextScopedBlob("topk"), 'id', 'flat_id'], k=self.k)
106+
topk_sum = net.SumElements([topk], [net.NextScopedBlob("topk_sum")], average=False)
107+
net.Sub([sum_abs, topk_sum], [output_blob])
108+
net.Scale([output_blob], [output_blob], scale=self.reg_lambda)
109+
return output_blob
110+
111+
88112
class L2Norm(Regularizer):
89113
def __init__(self, reg_lambda):
90114
super(L2Norm, self).__init__()
@@ -117,6 +141,31 @@ def _run_on_loss(self, net, param_init_net, param, grad=None):
117141
return output_blob
118142

119143

144+
class ElasticNetL1NormTrimmed(Regularizer):
145+
def __init__(self, l1, l2, k):
146+
super(ElasticNetL1NormTrimmed, self).__init__()
147+
self.l1 = l1
148+
self.l2 = l2
149+
self.k = k
150+
151+
def _run_on_loss(self, net, param_init_net, param, grad=None):
152+
output_blob = net.NextScopedBlob(param + "_elastic_net_l1_trimmed_regularization")
153+
l2_blob = net.NextScopedBlob(param + "_l2_blob")
154+
net.LpNorm([param], [l2_blob], p=2)
155+
net.Scale([l2_blob], [l2_blob], scale=self.l2)
156+
157+
l1_blob = net.NextScopedBlob(param + "_l1_blob")
158+
abs = net.Abs(param, [net.NextScopedBlob("abs")])
159+
sum_abs = net.SumElements([abs], [net.NextScopedBlob("sum_abs")], average=False)
160+
topk, _, _ = net.TopK(abs, [net.NextScopedBlob("topk"), 'id', 'flat_id'], k=self.k)
161+
topk_sum = net.SumElements([topk], [net.NextScopedBlob("topk_sum")], average=False)
162+
net.Sub([sum_abs, topk_sum], [l1_blob])
163+
net.Scale([l1_blob], [l1_blob], scale=self.l1)
164+
165+
net.Add([l1_blob, l2_blob], [output_blob])
166+
return output_blob
167+
168+
120169
class MaxNorm(Regularizer):
121170
def __init__(self, norm=1.0):
122171
super(MaxNorm, self).__init__()

0 commit comments

Comments
 (0)