Skip to content

Commit 5220d0d

Browse files
cyyeverpytorchmergebot
authored andcommitted
Increase header coverage of clang-tidy (#110443)
Pull Request resolved: #110443 Approved by: https://github.com/Skylion007
1 parent 0e55cc4 commit 5220d0d

File tree

13 files changed

+68
-48
lines changed

13 files changed

+68
-48
lines changed

.clang-tidy

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ modernize-*,
4949
performance-*,
5050
readability-container-size-empty,
5151
'
52-
HeaderFilterRegex: '^(c10/|torch/csrc/).*$'
52+
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
5353
AnalyzeTemporaryDtors: false
5454
WarningsAsErrors: '*'
5555
...

aten/src/ATen/ExpandUtils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) {
6363
size_t ndim2 = shape2.size();
6464
size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2;
6565

66-
for (int64_t i = ndim - 1; i >= 0; --i) {
66+
for (int64_t i = static_cast<int64_t>(ndim) - 1; i >= 0; --i) {
6767
if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 ||
6868
shape2[ndim2] == 1) {
6969
continue;

aten/src/ATen/LegacyBatchedTensorImpl.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -113,15 +113,15 @@ inline bool isBatchedTensor(const Tensor& tensor) {
113113

114114
// It is unsafe to call this on a Tensor that is not backed by a
115115
// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
116-
inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) {
116+
inline BatchedTensorImpl* unsafeGetBatchedImpl(const Tensor& tensor) {
117117
return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
118118
}
119119

120-
inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) {
120+
inline BatchedTensorImpl* maybeGetBatchedImpl(const Tensor& tensor) {
121121
if (!isBatchedTensor(tensor)) {
122122
return nullptr;
123123
}
124-
return unsafeGetBatchedImpl(std::move(tensor));
124+
return unsafeGetBatchedImpl(tensor);
125125
}
126126

127127
// Returns a bitset. If bit i is set, then that means dim i is a batchdim.

aten/src/ATen/NestedTensorImpl.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,12 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
9595
const auto buffer_size = get_buffer_size();
9696
auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
9797
c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
98-
buffer_tensor_impl->set_sizes_contiguous(c10::makeArrayRef(buffer_size));
98+
buffer_tensor_impl->set_sizes_contiguous(
99+
c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
99100
return Tensor(buffer_tensor_impl);
100101
}
101102

102-
int64_t get_buffer_size() const {
103+
size_t get_buffer_size() const {
103104
return storage_.nbytes() / data_type_.itemsize();
104105
}
105106

@@ -146,6 +147,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
146147
// to TensorImpl.
147148
void refresh_dim();
148149

150+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
149151
const at::Tensor nested_sizes_, nested_strides_;
150152
// The starting positions of the underlying tensors in contiguous buffer
151153
// i.e. the buffer memory offsets to get the underlying tensors
@@ -159,6 +161,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
159161
// Some strong enough constraints are:
160162
// 1. every underlying tensor is contiguous in memory
161163
// && nesting in ascending order
164+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
162165
const at::Tensor storage_offsets_;
163166
// NOTE: -1 here means the size is missing
164167
// Optional to allow it to be computed lazily from nested.

aten/src/ATen/ParallelOpenMP.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#pragma once
22

3+
#include <algorithm>
34
#include <atomic>
45
#include <cstddef>
56
#include <exception>

aten/src/ATen/SparseCsrTensorImpl.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,8 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
115115
auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
116116
key_set(), device(), layout_impl(), dtype());
117117
copy_tensor_metadata(
118-
/*src_impl=*/this,
119-
/*dest_impl=*/impl.get(),
118+
/*src_sparse_impl=*/this,
119+
/*dest_sparse_impl=*/impl.get(),
120120
/*version_counter=*/version_counter,
121121
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
122122
impl->refresh_numel();
@@ -135,9 +135,9 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
135135
auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
136136
key_set(), device(), layout_impl(), dtype());
137137
copy_tensor_metadata(
138-
/*src_impl=*/this,
139-
/*dest_impl=*/impl.get(),
140-
/*version_counter=*/std::move(version_counter),
138+
/*src_sparse_impl=*/this,
139+
/*dest_sparse_impl=*/impl.get(),
140+
/*version_counter=*/version_counter,
141141
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
142142
impl->refresh_numel();
143143
return impl;

aten/src/ATen/TensorNames.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,9 @@ namespace at::namedinference {
2828
struct TORCH_API TensorName {
2929
explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
3030
: origin_(origin),
31-
name_(origin[maybe_wrap_dim(origin_idx, origin.size())]),
31+
name_(origin[maybe_wrap_dim(
32+
origin_idx,
33+
static_cast<int64_t>(origin.size()))]),
3234
origin_idx_(origin_idx) {}
3335

3436
// op_name is only used for error reporting.

aten/src/ATen/autocast_mode.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,9 @@ TORCH_API bool is_autocast_cache_enabled();
4545
TORCH_API void set_autocast_cache_enabled(bool enabled);
4646

4747
namespace {
48-
bool is_autocast_eligible(const Tensor& tensor, c10::DeviceType device_type) {
48+
inline bool is_autocast_eligible(
49+
const Tensor& tensor,
50+
c10::DeviceType device_type) {
4951
switch (device_type) {
5052
case c10::DeviceType::CUDA:
5153
return (tensor.is_cuda() || tensor.is_xla()) &&

aten/src/ATen/core/List_inl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ typename List<T>::internal_const_reference_type List<T>::operator[](size_type po
198198
template<class T>
199199
typename List<T>::internal_reference_type List<T>::operator[](size_type pos) {
200200
static_cast<void>(impl_->list.at(pos)); // Throw the exception if it is out of range.
201-
return {impl_->list.begin() + pos};
201+
return {impl_->list.begin() + static_cast<typename decltype(impl_->list)::difference_type>(pos)};
202202
}
203203

204204
template<class T>

aten/src/ATen/native/ForeachUtils.h

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,14 @@
2020
namespace at::native {
2121
namespace {
2222
// Check if tensor list has either a boolean tensor or a integer tensor
23-
bool has_integral_tensor(TensorList tensors, const bool includeBool) {
23+
inline bool has_integral_tensor(TensorList tensors, const bool includeBool) {
2424
return std::any_of(
2525
tensors.begin(), tensors.end(), [&includeBool](const auto& t) {
2626
return at::isIntegralType(t.scalar_type(), includeBool);
2727
});
2828
}
2929
// check if tensor list has bool tensors
30-
bool has_bool_tensor(TensorList tensors) {
30+
inline bool has_bool_tensor(TensorList tensors) {
3131
return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool {
3232
return t.scalar_type() == ScalarType::Bool;
3333
});
@@ -37,11 +37,11 @@ bool has_bool_tensor(TensorList tensors) {
3737
// - Tensor lists must be non-empty.
3838
// - All TensorLists and ScalarLists must have the same number of elements.
3939
// - Corresponding tensors must have the same size.
40-
void check_foreach_api_restrictions(TensorList tensors) {
40+
inline void check_foreach_api_restrictions(TensorList tensors) {
4141
TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor.");
4242
}
4343

44-
void check_foreach_api_restrictions(
44+
inline void check_foreach_api_restrictions(
4545
TensorList tensors,
4646
ArrayRef<Scalar> scalars) {
4747
check_foreach_api_restrictions(tensors);
@@ -50,7 +50,9 @@ void check_foreach_api_restrictions(
5050
"Tensor list must have same number of elements as scalar list.");
5151
}
5252

53-
void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2) {
53+
inline void check_foreach_api_restrictions(
54+
TensorList tensors1,
55+
TensorList tensors2) {
5456
TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
5557
TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
5658
TORCH_CHECK(
@@ -61,7 +63,7 @@ void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2) {
6163
tensors2.size());
6264
}
6365

64-
void check_foreach_api_restrictions(
66+
inline void check_foreach_api_restrictions(
6567
TensorList tensors1,
6668
TensorList tensors2,
6769
TensorList tensors3) {
@@ -82,7 +84,7 @@ void check_foreach_api_restrictions(
8284
tensors3.size());
8385
}
8486

85-
void check_foreach_api_restrictions(
87+
inline void check_foreach_api_restrictions(
8688
TensorList tensors1,
8789
TensorList tensors2,
8890
TensorList tensors3,
@@ -99,7 +101,8 @@ void check_foreach_api_restrictions(
99101
// Helper function called in check_fast_path_restrictions to check whether all
100102
// corresponding tensors (aligning in index across the tensorLists) share the
101103
// same device and dtype.
102-
bool _check_tensors_share_device_and_dtype(ArrayRef<TensorList> tensorLists) {
104+
inline bool _check_tensors_share_device_and_dtype(
105+
ArrayRef<TensorList> tensorLists) {
103106
const auto expected_dtype = tensorLists[0][0].dtype();
104107
const auto expected_device = tensorLists[0][0].device();
105108

@@ -122,7 +125,8 @@ bool _check_tensors_share_device_and_dtype(ArrayRef<TensorList> tensorLists) {
122125

123126
// Helper function called in check_fast_path_restrictions to check if
124127
// corresponding tensors in tensor lists have the same sizes and strides.
125-
bool _check_tensors_share_sizes_and_strides(ArrayRef<TensorList> tensorLists) {
128+
inline bool _check_tensors_share_sizes_and_strides(
129+
ArrayRef<TensorList> tensorLists) {
126130
for (const auto i : c10::irange(1, tensorLists.size())) {
127131
for (const auto j : c10::irange(tensorLists[0].size())) {
128132
if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() ||
@@ -140,7 +144,7 @@ bool _check_tensors_share_sizes_and_strides(ArrayRef<TensorList> tensorLists) {
140144
// function assumes that _check_tensors_share_device_and_dtype has already been
141145
// called so that all corresponding tensors in tensorLists have the same dtype.
142146
// Then, it is sufficient to check the type promotion with just one tensorList.
143-
bool _check_tensors_do_type_promotion_with_scalars(
147+
inline bool _check_tensors_do_type_promotion_with_scalars(
144148
TensorList tensorList,
145149
ArrayRef<Scalar> scalarList = {},
146150
bool does_op_promote_integer_inputs_to_float = false) {
@@ -176,7 +180,7 @@ bool _check_tensors_do_type_promotion_with_scalars(
176180

177181
// Please, make sure to call check_foreach_api_restrictions before calling this
178182
// method. There is a set of preconditions that have to be satisfied.
179-
bool check_fast_path_restrictions(
183+
inline bool check_fast_path_restrictions(
180184
ArrayRef<TensorList> tensorLists,
181185
ArrayRef<Scalar> scalarList = {},
182186
bool does_op_promote_integer_inputs_to_float = false) {
@@ -188,7 +192,7 @@ bool check_fast_path_restrictions(
188192
does_op_promote_integer_inputs_to_float);
189193
}
190194

191-
std::vector<c10::Scalar> convert_tensor_to_scalar_list(
195+
inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
192196
const Tensor& scalarList_,
193197
int64_t expect_length) {
194198
std::vector<c10::Scalar> scalarList;
@@ -221,21 +225,21 @@ std::vector<c10::Scalar> convert_tensor_to_scalar_list(
221225
scalarList_.size(0),
222226
" instead.");
223227
for (int64_t i = 0; i < scalarList_.size(0); i++) {
224-
scalarList.push_back(c10::Scalar(scalar_data[i]));
228+
scalarList.emplace_back(scalar_data[i]);
225229
}
226230
});
227231
return scalarList;
228232
}
229233

230-
bool can_use_fast_route(
234+
inline bool can_use_fast_route(
231235
ArrayRef<TensorList> tensorLists,
232236
ArrayRef<Scalar> scalarList = {},
233237
bool does_op_promote_integer_inputs_to_float = false) {
234238
return check_fast_path_restrictions(
235239
tensorLists, scalarList, does_op_promote_integer_inputs_to_float);
236240
}
237241

238-
bool can_use_fast_route(
242+
inline bool can_use_fast_route(
239243
TensorList tensors1,
240244
TensorList tensors2,
241245
bool does_op_promote_integer_inputs_to_float = false) {
@@ -253,13 +257,13 @@ using FlatMap = std::unordered_map<
253257
TensorsAndIndicesT,
254258
ParamsHash<DeviceDtypeKey>>;
255259

256-
FlatMap _group_tensors_by_first_tensors_device_and_dtype(
260+
inline FlatMap _group_tensors_by_first_tensors_device_and_dtype(
257261
const nested_optional_tensorvec_t& nested_tensorlist,
258262
const bool with_indices) {
259263
FlatMap grouped_tensors_with_indices;
260264

261-
TORCH_CHECK(nested_tensorlist.size() > 0);
262-
TORCH_CHECK(nested_tensorlist[0].size() > 0);
265+
TORCH_CHECK(!nested_tensorlist.empty());
266+
TORCH_CHECK(!nested_tensorlist[0].empty());
263267
const auto num_lists = nested_tensorlist.size();
264268
const auto num_tensors = nested_tensorlist[0].size();
265269

0 commit comments

Comments
 (0)