@@ -171,24 +171,32 @@ static void recursive_store(char* data, IntList sizes, IntList strides, int64_t
171171 }
172172}
173173
174- static Tensor internal_new_from_data (const Type & type, int device , PyObject* data,
174+ static Tensor internal_new_from_data (const Type & type, at::optional<Device> device_opt , PyObject* data,
175175 bool copy_variables, bool copy_numpy,
176176 bool type_inference) {
177+ int64_t device = device_opt.has_value () ? device_opt.value ().deviceInt64 () : -1 ;
177178 if (THPUtils_checkString (data)) {
178179 throw TypeError (" new(): invalid data type '%s'" , Py_TYPE (data)->tp_name );
179180 }
180181
181182 if (THPVariable_Check (data)) {
182183 auto var = reinterpret_cast <THPVariable*>(data)->cdata ;
183- const auto & type_to_use = type_inference ? var.type () : type;
184+ auto type_inference_device_type = device_opt.has_value () ? device_opt.value ().type
185+ : torch::getDeviceType (var.type ());
186+ // infer the scalar type and device type; it's not expected to infer the layout since these constructors
187+ // are defined per-layout-type (e.g. tensor vs sparse_coo_tensor).
188+ const auto & type_inference_type = torch::getType (var.type ().scalarType (),
189+ *torch::getLayout (type.backend ()),
190+ type_inference_device_type);
191+ const auto & type_to_use = type_inference ? type_inference_type : type;
184192 return copy_variables ? new_with_tensor_copy (type_to_use, var, device) :
185193 new_with_type_conversion (type_to_use, var, device);
186194 }
187195
188196#ifdef WITH_NUMPY
189197 if (PyArray_Check (data)) {
190198 auto tensor = autograd::make_variable (tensor_from_numpy (data), /* requires_grad=*/ false );
191- const auto & type_to_use = type_inference ? tensor.type () : type;
199+ const auto & type_to_use = type_inference ? type. toScalarType ( tensor.type (). scalarType () ) : type;
192200 return copy_numpy ? new_with_tensor_copy (type_to_use, tensor, device) :
193201 new_with_type_conversion (type_to_use, tensor, device);
194202 }
@@ -204,15 +212,15 @@ static Tensor internal_new_from_data(const Type & type, int device, PyObject* da
204212 return new_with_type_conversion (type_to_use, tensor, device);
205213}
206214
207- Tensor legacy_new_from_data (const Type & type, int device, PyObject *data) {
215+ Tensor legacy_new_from_data (const Type & type, at::optional<Device> device, PyObject *data) {
208216 return internal_new_from_data (type, device, data, false , false , false );
209217}
210218
211- static Tensor new_from_data_copy (const Type & type, int device, PyObject *data) {
219+ static Tensor new_from_data_copy (const Type & type, at::optional<Device> device, PyObject *data) {
212220 return internal_new_from_data (type, device, data, true , true , false );
213221}
214222
215- static Tensor legacy_new_from_sequence (const Type & type, int device, PyObject* data) {
223+ static Tensor legacy_new_from_sequence (const Type & type, at::optional<Device> device, PyObject* data) {
216224 if (!PySequence_Check (data)) {
217225 throw TypeError (" new(): data must be a sequence (got %s)" , Py_TYPE (data)->tp_name );
218226 }
@@ -246,7 +254,7 @@ static Tensor legacy_sparse_tensor_ctor(const Type& type, PyObject* args, PyObje
246254 if (!THPSize_Check (arg) && PyTuple_GET_SIZE (args) >= 1 && arg == PyTuple_GET_ITEM (args, 0 )) {
247255 // new(sequence) binds to this signature but should be treated differently
248256 // unless the sequences is a torch.Size
249- return legacy_new_from_sequence (type, r.deviceInt64 (1 ), r.pyobject (0 ));
257+ return legacy_new_from_sequence (type, r.deviceOptional (1 ), r.pyobject (0 ));
250258 }
251259 return new_with_sizes (type, r.deviceInt64 (1 ), r.intlist (0 ));
252260 }
@@ -284,11 +292,11 @@ Tensor legacy_tensor_ctor(const Type& type, PyObject* args, PyObject* kwargs) {
284292 if (!THPSize_Check (arg) && PyTuple_GET_SIZE (args) >= 1 && arg == PyTuple_GET_ITEM (args, 0 )) {
285293 // new(sequence) binds to this signature but should be treated differently
286294 // unless the sequences is a torch.Size
287- return legacy_new_from_sequence (type, r.deviceInt64 (1 ), r.pyobject (0 ));
295+ return legacy_new_from_sequence (type, r.deviceOptional (1 ), r.pyobject (0 ));
288296 }
289297 return new_with_sizes (type, r.deviceInt64 (1 ), r.intlist (0 ));
290298 } else if (r.idx == 5 ) {
291- return legacy_new_from_sequence (type, r.deviceInt64 (1 ), r.pyobject (0 ));
299+ return legacy_new_from_sequence (type, r.deviceOptional (1 ), r.pyobject (0 ));
292300 }
293301 throw std::runtime_error (" new(): invalid arguments" );
294302}
@@ -324,7 +332,7 @@ static Tensor legacy_sparse_tensor_new(const Type& type, PyObject* args, PyObjec
324332 if (!THPSize_Check (arg) && PyTuple_GET_SIZE (args) >= 1 && arg == PyTuple_GET_ITEM (args, 0 )) {
325333 // new(sequence) binds to this signature but should be treated differently
326334 // unless the sequences is a torch.Size
327- return legacy_new_from_sequence (type, r.deviceInt64 (1 ), r.pyobject (0 ));
335+ return legacy_new_from_sequence (type, r.deviceOptional (1 ), r.pyobject (0 ));
328336 }
329337 return new_with_sizes (type, r.deviceInt64 (1 ), r.intlist (0 ));
330338 }
@@ -362,11 +370,11 @@ Tensor legacy_tensor_new(const Type& type, PyObject* args, PyObject* kwargs) {
362370 if (!THPSize_Check (arg) && PyTuple_GET_SIZE (args) >= 1 && arg == PyTuple_GET_ITEM (args, 0 )) {
363371 // new(sequence) binds to this signature but should be treated differently
364372 // unless the sequences is a torch.Size
365- return legacy_new_from_sequence (type, r.deviceInt64 (1 ), r.pyobject (0 ));
373+ return legacy_new_from_sequence (type, r.deviceOptional (1 ), r.pyobject (0 ));
366374 }
367375 return new_with_sizes (type, r.deviceInt64 (1 ), r.intlist (0 ));
368376 } else if (r.idx == 5 ) {
369- return legacy_new_from_sequence (type, r.deviceInt64 (1 ), r.pyobject (0 ));
377+ return legacy_new_from_sequence (type, r.deviceOptional (1 ), r.pyobject (0 ));
370378 }
371379 throw std::runtime_error (" new(): invalid arguments" );
372380}
@@ -398,22 +406,21 @@ Tensor sparse_coo_tensor_ctor(const Type& type, PyObject* args, PyObject* kwargs
398406 bool type_inference = r.isNone (2 );
399407 const auto & sparse_type = typeWithDefault (r, 2 , 3 , default_sparse_type);
400408 const auto & dense_type = sparse_type.toBackend (sparse_type.is_cuda () ? kCUDA : kCPU );
401- const auto & index_type = dense_type.toScalarType (kLong );
402409 AutoGPU autogpu (r.deviceInt64 (3 ));
403- // explanation of booleans: allow variables, do type conversion of them, copy numpy data
404- Tensor indices = internal_new_from_data (index_type, -1 , r.pyobject (0 ), false , true , false );
405- Tensor values = internal_new_from_data (dense_type, -1 , r.pyobject (1 ), false , true , type_inference);
410+ Tensor values = internal_new_from_data (dense_type, r.deviceOptional (3 ), r.pyobject (1 ), false , true , type_inference);
411+ // if no dtype provided, infer type based on value type.
412+ const auto & index_type = values.type ().toScalarType (kLong );
413+ Tensor indices = internal_new_from_data (index_type, r.deviceOptional (3 ), r.pyobject (0 ), false , true , false );
406414 const auto & sparse_type_to_use = values.type ().toBackend (values.type ().is_cuda () ? kSparseCUDA : kSparseCPU );
407415 return set_requires_grad (sparse_type_to_use.sparse_coo_tensor (indices, values), r.toBool (4 ));
408416 } else if (r.idx == 1 ) {
409417 bool type_inference = r.isNone (3 );
410418 const auto & sparse_type = typeWithDefault (r, 3 , 4 , default_sparse_type);
411419 const auto & dense_type = sparse_type.toBackend (sparse_type.is_cuda () ? kCUDA : kCPU );
412- const auto & index_type = dense_type.toScalarType (kLong );
413420 AutoGPU autogpu (r.deviceInt64 (4 ));
414- // explanation of booleans: allow variables, do type conversion of them, copy numpy data
415- Tensor indices = internal_new_from_data ( index_type, - 1 , r. pyobject ( 0 ), false , true , false );
416- Tensor values = internal_new_from_data (dense_type, - 1 , r.pyobject (1 ), false , true , type_inference );
421+ Tensor values = internal_new_from_data (dense_type, r. deviceOptional ( 4 ), r. pyobject ( 1 ), false , true , type_inference);
422+ const auto & index_type = values. type (). toScalarType ( kLong );
423+ Tensor indices = internal_new_from_data (index_type, r. deviceOptional ( 4 ) , r.pyobject (0 ), false , true , false );
417424 const auto & sparse_type_to_use = values.type ().toBackend (values.type ().is_cuda () ? kSparseCUDA : kSparseCPU );
418425 return set_requires_grad (sparse_type_to_use.sparse_coo_tensor (indices, values, r.intlist (2 )), r.toBool (5 ));
419426 }
@@ -430,7 +437,7 @@ Tensor tensor_ctor(const Type& type, PyObject* args, PyObject* kwargs) {
430437 if (r.idx == 0 ) {
431438 bool type_inference = r.isNone (1 );
432439 return set_requires_grad (internal_new_from_data (
433- typeWithDefault (r, 1 , 2 , type), r.deviceInt64 (2 ), r.pyobject (0 ), true , true , type_inference), r.toBool (3 ));
440+ typeWithDefault (r, 1 , 2 , type), r.deviceOptional (2 ), r.pyobject (0 ), true , true , type_inference), r.toBool (3 ));
434441 }
435442 throw std::runtime_error (" tensor(): invalid arguments" );
436443}
@@ -445,7 +452,7 @@ Tensor new_tensor(const Type& type, PyObject* args, PyObject* kwargs) {
445452 auto r = parser.parse (args, kwargs, parsed_args);
446453 if (r.idx == 0 ) {
447454 return set_requires_grad (new_from_data_copy (
448- typeWithDefault (r, 1 , 2 , type), r.deviceInt64 (2 ), r.pyobject (0 )), r.toBool (3 ));
455+ typeWithDefault (r, 1 , 2 , type), r.deviceOptional (2 ), r.pyobject (0 )), r.toBool (3 ));
449456 }
450457 throw std::runtime_error (" new_tensor(): invalid arguments" );
451458}
0 commit comments