Skip to content

Commit 3796ce9

Browse files
ssnlsoumith
authored andcommitted
assert (#4056)
1 parent e0d5d1b commit 3796ce9

File tree

5 files changed

+98
-40
lines changed

5 files changed

+98
-40
lines changed

aten/src/TH/generic/THBlas.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -201,11 +201,12 @@ void THBlas_(gemv)(char trans, int64_t m, int64_t n, real alpha, real *a, int64_
201201
lda = m;
202202

203203
#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT))
204-
if( (m <= INT_MAX) && (n <= INT_MAX) &&
205-
(lda >= THMax(1, m)) && (lda <= INT_MAX) &&
204+
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) &&
206205
(incx > 0) && (incx <= INT_MAX) &&
207206
(incy > 0) && (incy <= INT_MAX) )
208207
{
208+
THArgCheck(lda >= THMax(1, m), 6,
209+
"lda should be at least max(1, m=%d), but have %d", m, lda);
209210
int i_m = (int)m;
210211
int i_n = (int)n;
211212
int i_lda = (int)lda;
@@ -259,11 +260,12 @@ void THBlas_(ger)(int64_t m, int64_t n, real alpha, real *x, int64_t incx, real
259260
lda = m;
260261

261262
#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT))
262-
if( (m <= INT_MAX) && (n <= INT_MAX) &&
263-
(lda >= THMax(1, m)) && (lda <= INT_MAX) &&
263+
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) &&
264264
(incx > 0) && (incx <= INT_MAX) &&
265265
(incy > 0) && (incy <= INT_MAX) )
266266
{
267+
THArgCheck(lda >= THMax(1, m), 9,
268+
"lda should be at least max(1, m=%d), but have %d", m, lda);
267269
int i_m = (int)m;
268270
int i_n = (int)n;
269271
int i_lda = (int)lda;
@@ -322,10 +324,14 @@ void THBlas_(gemm)(char transa, char transb, int64_t m, int64_t n, int64_t k, re
322324

323325
#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT))
324326
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) &&
325-
(lda >= THMax(1, (transa_ ? k : m))) && (lda <= INT_MAX) &&
326-
(ldb >= THMax(1, (transb_ ? n : k))) && (ldb <= INT_MAX) &&
327-
(ldc >= THMax(1, m)) && (ldc <= INT_MAX) )
327+
(lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
328328
{
329+
THArgCheck(lda >= THMax(1, (transa_ ? k : m)), 8,
330+
"lda should be at least max(1, %d), but have %d", (transa_ ? k : m), lda);
331+
THArgCheck(ldb >= THMax(1, (transb_ ? n : k)), 10,
332+
"ldb should be at least max(1, %d), but have %d", (transb_ ? n : k), ldb);
333+
THArgCheck(ldc >= THMax(1, m), 13,
334+
"ldc should be at least max(1, m=%d), but have %d", m, ldc);
329335
int i_m = (int)m;
330336
int i_n = (int)n;
331337
int i_k = (int)k;

aten/src/TH/generic/THTensorMath.c

Lines changed: 42 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1331,14 +1331,17 @@ void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
13311331
THTensor_(copy)(r_, t);
13321332
}
13331333

1334-
if(mat->stride[0] == 1)
1334+
// n == 1 || lda >= max(1, m)
1335+
#define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M)))
1336+
1337+
if(mat->stride[0] == 1 && LDA_COND(mat->size[0], mat->size[1], mat->stride[1]))
13351338
{
13361339
THBlas_(gemv)('n', mat->size[0], mat->size[1],
13371340
alpha, THTensor_(data)(mat), mat->stride[1],
13381341
THTensor_(data)(vec), vec->stride[0],
13391342
beta, THTensor_(data)(r_), r_->stride[0]);
13401343
}
1341-
else if(mat->stride[1] == 1)
1344+
else if(mat->stride[1] == 1 && LDA_COND(mat->size[1], mat->size[0], mat->stride[0]))
13421345
{
13431346
THBlas_(gemv)('t', mat->size[1], mat->size[0],
13441347
alpha, THTensor_(data)(mat), mat->stride[0],
@@ -1356,6 +1359,8 @@ void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
13561359

13571360
THTensor_(free)(cmat);
13581361
}
1362+
1363+
#undef LDA_COND
13591364
}
13601365

13611366
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
@@ -1434,15 +1439,18 @@ void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
14341439
}
14351440
}
14361441

1442+
// n == 1 || ldc >= max(1, m)
1443+
#define LDC_COND(M, N, LDC) ((N) == 1 || (LDC) >= THMax(1, M))
1444+
14371445
/* r_ */
14381446
if(r_->stride[0] == 1 &&
1439-
r_->stride[1] != 0)
1447+
LDC_COND(r_->size[0], r_->size[1], r_->stride[1]))
14401448
{
14411449
transpose_r = 'n';
14421450
r__ = r_;
14431451
}
14441452
else if(r_->stride[1] == 1 &&
1445-
r_->stride[0] != 0)
1453+
LDC_COND(r_->size[1], r_->size[0], r_->stride[0]))
14461454
{
14471455
THTensor *swap = m2;
14481456
m2 = m1;
@@ -1453,22 +1461,30 @@ void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
14531461
else
14541462
{
14551463
transpose_r = 'n';
1456-
1464+
// make r__ FORTRAN contiguous
14571465
THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1);
14581466
r__ = THTensor_(newClone)(transp_r_);
14591467
THTensor_(free)(transp_r_);
14601468
THTensor_(transpose)(r__, NULL, 0, 1);
14611469
}
14621470

1471+
#undef LDC_COND
1472+
1473+
int64_t m = r__->size[(transpose_r == 'n' ? 0 : 1)];
1474+
int64_t n = r__->size[(transpose_r == 'n' ? 1 : 0)];
1475+
int64_t k = m1->size[(transpose_r == 'n' ? 1 : 0)];
1476+
int64_t ldr__ = r__->stride[(transpose_r == 'n' ? 1 : 0)];
1477+
14631478
/* m1 */
1479+
/* Need ldm1_ >= max(1, (transpose_m1 == 't' ? m : k)) */
14641480
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
1465-
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
1481+
m1->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, k))
14661482
{
14671483
transpose_m1 = 'n';
14681484
m1_ = m1;
14691485
}
14701486
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
1471-
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
1487+
m1->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, m))
14721488
{
14731489
transpose_m1 = 't';
14741490
m1_ = m1;
@@ -1481,14 +1497,15 @@ void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
14811497
}
14821498

14831499
/* m2 */
1500+
/* Need ldm2_ >= max(1, (transpose_m2 == 't' ? n : k)) */
14841501
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
1485-
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
1502+
m2->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, k))
14861503
{
14871504
transpose_m2 = 'n';
14881505
m2_ = m2;
14891506
}
14901507
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
1491-
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
1508+
m2->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, n))
14921509
{
14931510
transpose_m2 = 't';
14941511
m2_ = m2;
@@ -1500,21 +1517,24 @@ void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
15001517
free_m2 = 1;
15011518
}
15021519

1520+
int64_t ldm1_ = (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]);
1521+
int64_t ldm2_ = (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]);
1522+
15031523
#pragma omp critical(blasgemm)
15041524
/* do the operation */
15051525
THBlas_(gemm)(transpose_m1,
15061526
transpose_m2,
1507-
r__->size[(transpose_r == 'n' ? 0 : 1)],
1508-
r__->size[(transpose_r == 'n' ? 1 : 0)],
1509-
m1_->size[(transpose_r == 'n' ? 1 : 0)],
1527+
m,
1528+
n,
1529+
k,
15101530
alpha,
15111531
THTensor_(data)(m1_),
1512-
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
1532+
ldm1_,
15131533
THTensor_(data)(m2_),
1514-
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
1534+
ldm2_,
15151535
beta,
15161536
THTensor_(data)(r__),
1517-
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
1537+
ldr__);
15181538

15191539
/* free intermediate variables */
15201540
if(free_m1)
@@ -1555,14 +1575,17 @@ void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
15551575
else if(beta != 1)
15561576
THTensor_(mul)(r_, r_, beta);
15571577

1558-
if(r_->stride[0] == 1)
1578+
// n == 1 || lda >= max(1, m)
1579+
#define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M)))
1580+
1581+
if(r_->stride[0] == 1 && LDA_COND(vec1->size[0], vec2->size[0], r_->stride[1]))
15591582
{
15601583
THBlas_(ger)(vec1->size[0], vec2->size[0],
15611584
alpha, THTensor_(data)(vec1), vec1->stride[0],
15621585
THTensor_(data)(vec2), vec2->stride[0],
15631586
THTensor_(data)(r_), r_->stride[1]);
15641587
}
1565-
else if(r_->stride[1] == 1)
1588+
else if(r_->stride[1] == 1 && LDA_COND(vec2->size[0], vec1->size[0], r_->stride[0]))
15661589
{
15671590
THBlas_(ger)(vec2->size[0], vec1->size[0],
15681591
alpha, THTensor_(data)(vec2), vec2->stride[0],
@@ -1580,6 +1603,8 @@ void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor
15801603

15811604
THTensor_(freeCopyTo)(cr, r_);
15821605
}
1606+
1607+
#undef LDA_COND
15831608
}
15841609

15851610
void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2)

aten/src/THNN/generic/SpatialDilatedConvolution.c

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,11 @@ void THNN_(SpatialDilatedConvolution_updateOutput)(
8282

8383
input = THTensor_(newContiguous)(input);
8484
weight = THTensor_(newContiguous)(weight);
85-
bias = bias ? THTensor_(newContiguous)(bias) : bias;
85+
THArgCheck(THTensor_(isContiguous)(columns), 5, "columns needs to be contiguous");
86+
if (bias) {
87+
bias = THTensor_(newContiguous)(bias);
88+
THArgCheck(THTensor_(isContiguous)(ones), 6, "ones needs to be contiguous");
89+
}
8690
int batch = 1;
8791
if (input->nDimension == 3) {
8892
// Force batch
@@ -107,7 +111,8 @@ void THNN_(SpatialDilatedConvolution_updateOutput)(
107111
// Define a buffer of ones, for bias accumulation
108112
// Note: this buffer can be shared with other modules, it only ever gets increased,
109113
// and always contains ones.
110-
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
114+
if (!THTensor_(isContiguous)(ones) || ones->nDimension != 2 ||
115+
ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
111116
// Resize plane and fill with ones...
112117
THTensor_(resize2d)(ones, outputHeight, outputWidth);
113118
THTensor_(fill)(ones, 1);
@@ -207,6 +212,7 @@ void THNN_(SpatialDilatedConvolution_updateGradInput)(
207212
input = THTensor_(newContiguous)(input);
208213
weight = THTensor_(newContiguous)(weight);
209214
gradOutput = THTensor_(newContiguous)(gradOutput);
215+
THArgCheck(THTensor_(isContiguous)(gradColumns), 5, "gradColumns needs to be contiguous");
210216
int batch = 1;
211217
if (input->nDimension == 3) {
212218
// Force batch
@@ -310,8 +316,11 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)(
310316
input = THTensor_(newContiguous)(input);
311317
gradOutput = THTensor_(newContiguous)(gradOutput);
312318
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
313-
if (gradBias)
319+
THArgCheck(THTensor_(isContiguous)(columns), 6, "columns needs to be contiguous");
320+
if (gradBias) {
314321
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
322+
THArgCheck(THTensor_(isContiguous)(ones), 7, "ones needs to be contiguous");
323+
}
315324
int batch = 1;
316325
if (input->nDimension == 3) {
317326
// Force batch
@@ -329,13 +338,6 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)(
329338
// Batch size + input planes
330339
int64_t batchSize = input->size[0];
331340

332-
// Define a buffer of ones, for bias accumulation
333-
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
334-
// Resize plane and fill with ones...
335-
THTensor_(resize2d)(ones, outputHeight, outputWidth);
336-
THTensor_(fill)(ones, 1);
337-
}
338-
339341
// Resize temporary columns
340342
THTensor_(resize2d)(columns, nInputPlane*kW*kH, outputHeight*outputWidth);
341343

@@ -380,6 +382,12 @@ void THNN_(SpatialDilatedConvolution_accGradParameters)(
380382

381383
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
382384
if (gradBias) {
385+
// Define a buffer of ones, for bias accumulation
386+
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
387+
// Resize plane and fill with ones...
388+
THTensor_(resize2d)(ones, outputHeight, outputWidth);
389+
THTensor_(fill)(ones, 1);
390+
}
383391
THBlas_(gemv)(
384392
't',
385393
k_, m_,

aten/src/THNN/generic/SpatialFullDilatedConvolution.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,11 @@ void THNN_(SpatialFullDilatedConvolution_updateOutput)(
138138

139139
input = THTensor_(newContiguous)(input);
140140
weight = THTensor_(newContiguous)(weight);
141-
bias = bias ? THTensor_(newContiguous)(bias) : bias;
141+
THArgCheck(THTensor_(isContiguous)(columns), 5, "columns needs to be contiguous");
142+
if (bias) {
143+
bias = THTensor_(newContiguous)(bias);
144+
THArgCheck(THTensor_(isContiguous)(ones), 6, "ones needs to be contiguous");
145+
}
142146
int batch = 1;
143147
if (input->nDimension == 3) {
144148
// Force batch
@@ -265,6 +269,7 @@ void THNN_(SpatialFullDilatedConvolution_updateGradInput)(
265269
input = THTensor_(newContiguous)(input);
266270
gradOutput = THTensor_(newContiguous)(gradOutput);
267271
weight = THTensor_(newContiguous)(weight);
272+
THArgCheck(THTensor_(isContiguous)(gradColumns), 5, "gradColumns needs to be contiguous");
268273
int batch = 1;
269274
if (input->nDimension == 3) {
270275
// Force batch
@@ -370,8 +375,11 @@ void THNN_(SpatialFullDilatedConvolution_accGradParameters)(
370375
input = THTensor_(newContiguous)(input);
371376
gradOutput = THTensor_(newContiguous)(gradOutput);
372377
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
373-
if (gradBias)
378+
THArgCheck(THTensor_(isContiguous)(columns), 6, "columns needs to be contiguous");
379+
if (gradBias) {
374380
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
381+
THArgCheck(THTensor_(isContiguous)(ones), 7, "ones needs to be contiguous");
382+
}
375383
int batch = 1;
376384
if (input->nDimension == 3) {
377385
// Force batch

aten/src/THNN/generic/VolumetricDilatedConvolution.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,11 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)(
8585

8686
input = THTensor_(newContiguous)(input);
8787
weight = THTensor_(newContiguous)(weight);
88-
bias = bias ? THTensor_(newContiguous)(bias) : bias;
88+
THArgCheck(THTensor_(isContiguous)(columns), 5, "columns needs to be contiguous");
89+
if (bias) {
90+
bias = THTensor_(newContiguous)(bias);
91+
THArgCheck(THTensor_(isContiguous)(ones), 6, "ones needs to be contiguous");
92+
}
8993
int batch = 1;
9094
if (input->nDimension == 4) {
9195
// Force batch
@@ -189,7 +193,7 @@ void THNN_(VolumetricDilatedConvolution_updateOutput)(
189193

190194
THTensor_(free)(input);
191195
THTensor_(free)(weight);
192-
if (bias) THTensor_(free)(bias);
196+
if (bias) THTensor_(free)(bias);
193197
}
194198

195199
void THNN_(VolumetricDilatedConvolution_updateGradInput)(
@@ -216,7 +220,8 @@ void THNN_(VolumetricDilatedConvolution_updateGradInput)(
216220
input = THTensor_(newContiguous)(input);
217221
gradOutput = THTensor_(newContiguous)(gradOutput);
218222
weight = THTensor_(newContiguous)(weight);
219-
223+
THArgCheck(THTensor_(isContiguous)(gradColumns), 5, "gradColumns needs to be contiguous");
224+
220225
int batch = 1;
221226
if (input->nDimension == 4) {
222227
// Force batch
@@ -321,7 +326,13 @@ void THNN_(VolumetricDilatedConvolution_accGradParameters)(
321326

322327
input = THTensor_(newContiguous)(input);
323328
gradOutput = THTensor_(newContiguous)(gradOutput);
324-
329+
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
330+
THArgCheck(THTensor_(isContiguous)(columns), 6, "columns needs to be contiguous");
331+
if (gradBias) {
332+
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
333+
THArgCheck(THTensor_(isContiguous)(ones), 7, "ones needs to be contiguous");
334+
}
335+
325336
int batch = 1;
326337
if (input->nDimension == 4) {
327338
// Force batch

0 commit comments

Comments
 (0)