-
Notifications
You must be signed in to change notification settings - Fork 247
Expand file tree
/
Copy pathtensorBase.cpp
More file actions
109 lines (97 loc) · 3.89 KB
/
tensorBase.cpp
File metadata and controls
109 lines (97 loc) · 3.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#include "tensorBase.hpp"
#include "uTensor/core/context.hpp"
#include "memoryManagementInterface.hpp"
#include "uTensor/core/uTensor_util.hpp"
namespace uTensor {
DEFINE_ERROR(NullTensorDeleteError);
TensorBase::TensorBase() {
Context::get_default_context()->register_tensor(this);
}
TensorBase::~TensorBase() {
// Context::get_metadata_allocator()->deallocate(this);
}
// Allocate the tensor metadata on a different heap from the data scratch pads
void* TensorBase::operator new(size_t sz) {
void* p =
Context::get_default_context()->get_metadata_allocator()->allocate(sz);
return p;
}
void TensorBase::operator delete(void* p) {
if(p == nullptr){
Context::get_default_context()->throwError(new NullTensorDeleteError);
}
Context::get_default_context()->get_metadata_allocator()->deallocate(p);
}
ttype TensorInterface::get_type() const { return _type; }
TensorShape& TensorInterface::get_shape() { return _shape; }
const TensorShape& TensorInterface::get_shape() const { return _shape; }
uint32_t TensorInterface::num_elems() const { return _shape.num_elems(); }
TensorInterface::TensorInterface()
: TensorBase(), _shape(0), _type(undefined), _type_size(0), _qnt_params(nullptr) {}
TensorInterface::TensorInterface(ttype _type)
: TensorBase(), _shape(0), _type(_type), _qnt_params(nullptr) {
_type_size = type_size(_type);
}
TensorInterface::TensorInterface(const TensorShape& _shape, ttype _type)
: TensorBase(), _shape(_shape), _type(_type), _qnt_params(nullptr) {
_type_size = type_size(_type);
}
TensorInterface::~TensorInterface(){
};
// Can access Tensors like
// mTensor(1) = 5, mTensor(2,2) = 5, etc.
const IntegralValue TensorInterface::operator()(uint16_t i, uint16_t j,
uint16_t k, uint16_t l) const {
// Add shape checks here
return read(_shape.linear_index(i, j, k, l));
}
IntegralValue TensorInterface::operator()(uint16_t i, uint16_t j, uint16_t k,
uint16_t l) {
// Add shape checks here
return write(_shape.linear_index(i, j, k, l));
}
const IntegralValue TensorInterface::operator()(uint32_t linear_index) const {
// Add shape checks here
return read(linear_index);
}
IntegralValue TensorInterface::operator()(uint32_t linear_index) {
// Add shape checks here
return write(linear_index);
}
const QuantizationParams& TensorInterface::get_quantization_params() const {
return *(_qnt_params.operator*());
}
size_t TensorInterface::_get_readable_block(const void*& buffer,
uint16_t req_read_size,
uint32_t linear_index) const {
uTensor_printf(
"ERROR, Optimized op attempted to read access non-optimizable tensor\n");
Context::get_default_context()->throwError(
new InvalidOptimizableTensorError());
return -1;
}
size_t TensorInterface::_get_writeable_block(void*& buffer,
uint16_t req_write_size,
uint32_t linear_index) {
uTensor_printf(
"ERROR, Optimized op attempted to write access non-optimizable tensor\n");
Context::get_default_context()->throwError(
new InvalidOptimizableTensorError());
return -1;
}
size_t TensorInterface::get_readable_block(const void*& buffer, uint16_t req_read_size,
uint32_t linear_index) const {
if (req_read_size > _type_size * _shape.get_linear_size()) {
return -1;
}
return _get_readable_block(buffer, req_read_size, linear_index);
}
size_t TensorInterface::get_writeable_block(void*& buffer,
uint16_t req_write_size,
uint32_t linear_index) {
if (req_write_size > _type_size * _shape.get_linear_size()) {
return -1;
}
return _get_writeable_block(buffer, req_write_size, linear_index);
}
} // namespace uTensor