-
Notifications
You must be signed in to change notification settings - Fork 247
Expand file tree
/
Copy pathmemoryManagementInterface.hpp
More file actions
133 lines (115 loc) · 3.65 KB
/
memoryManagementInterface.hpp
File metadata and controls
133 lines (115 loc) · 3.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#ifndef __UTENSOR_MEMORY_MANAGEMENT_IFC_H
#define __UTENSOR_MEMORY_MANAGEMENT_IFC_H
// Don't see where cstring is used here
//#include <cstring>
#include <cstddef>
//#include "stdint.h" //avr
using std::size_t;
namespace uTensor {
class AllocatorInterface;
// TODO Add support for Moving Handles without copies, Must be careful with
// binding
class Handle {
private:
// Handles cannot be copied, there exists one unless explicitly deep copied
// TODO write Handle deep_copy(const Handle& that);
Handle(const Handle& that);
// Handles cannot be allocated directly
void* operator new(size_t sz);
void operator delete(void* p);
// KEY BIT
friend class AllocatorInterface;
friend void* operator*(const Handle& that);
public:
Handle();
Handle(void* p);
// return the data directly (looks pointer like)
void* operator*();
// Allow users to check if handle is not valid
bool operator!() const;
operator bool() const;
protected:
void* _ptr;
};
void* operator*(const Handle& that);
/**
* Expecting the Handle copies to contain knowledge of their underlying
* types/copy info is not reasonable. This class is designed to reference a
* singleton-like handle for some data
*/
class HandleReference {
protected:
Handle* _ref;
public:
HandleReference();
HandleReference(Handle* ref);
HandleReference(const Handle& ref);
HandleReference(const HandleReference& that);
HandleReference(HandleReference&& that);
HandleReference& operator=(Handle* ref);
HandleReference& operator=(const HandleReference& that);
HandleReference& operator=(HandleReference&& that);
// Delegate functions
// return the data directly (looks pointer like)
void* operator*();
// Allow users to check if handle is not valid
bool operator!() const;
operator bool() const;
};
/**
* Allocators are expected to maintain a mapping of Tensor handles to data
* regions. * This allows the allocator to move around the underlying data
* without breaking the user interface.
*/
class AllocatorInterface {
// Allocators must implement these functions
protected:
virtual void _bind(void* ptr, Handle* hndl) = 0;
virtual void _unbind(void* ptr, Handle* hndl) = 0;
virtual bool _is_bound(void* ptr, Handle* hndl) = 0;
virtual bool _has_handle(Handle* hndl) = 0;
virtual void* _allocate(size_t sz) = 0;
virtual void _deallocate(void* ptr) = 0;
public:
/*
* Public interface for updating a Tensor Handle reference
*/
void update_hndl(Handle* h, void* new_ptr);
/**
* Bind/Unbind data to Tensor Handle
*/
void bind(void* ptr, Handle* hndl);
void unbind(void* ptr, Handle* hndl);
/**
* Check if a pointer is associated with a Tensor
*/
bool is_bound(void* ptr, Handle* hndl);
/**
* Returns the amount of space available in the Memory Manager
*/
virtual size_t available() = 0;
/**
* Update Tensor handles to point to new regions.
* This is useful is the data moves around inside the memory manager,
* For example if the data is compressed/decompressed dynamically
*/
virtual bool
rebalance() = 0; // KEY. This call updates all the Tensor data references
/**
* Allocate sz bytes in the memory manager
*/
void* allocate(size_t sz);
/**
* Deallocate all data associated with pointer
*/
void deallocate(void* ptr);
/** Unbind invalidates a handle, so rather than forcing users to store a temporary,
* this conveniece function does it for them,
*/
void unbind_and_deallocate(Handle* hndl);
};
bool bind(Handle& hndl, AllocatorInterface& allocator);
bool unbind(Handle& hndl, AllocatorInterface& allocator);
bool is_bound(Handle& hndl, AllocatorInterface& allocator);
} // namespace uTensor
#endif