Skip to content

Commit 7eeef5f

Browse files
committed
std.mem.Allocator: introduce remap function to the interface
This one changes the size of an allocation, allowing it to be relocated. However, the implementation will still return `null` if it would be equivalent to new = alloc memcpy(new, old) free(old) Mainly this prepares for taking advantage of `mremap` which I thought would be a bigger deal but apparently is only available on Linux. Still, we should use it on Linux.
1 parent dd2fa4f commit 7eeef5f

File tree

7 files changed

+389
-217
lines changed

7 files changed

+389
-217
lines changed

lib/std/array_list.zig

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -105,21 +105,19 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
105105
return result;
106106
}
107107

108-
/// The caller owns the returned memory. Empties this ArrayList,
109-
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
108+
/// The caller owns the returned memory. Empties this ArrayList.
109+
/// Its capacity is cleared, making `deinit` safe but unnecessary to call.
110110
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
111111
const allocator = self.allocator;
112112

113113
const old_memory = self.allocatedSlice();
114-
if (allocator.resize(old_memory, self.items.len)) {
115-
const result = self.items;
114+
if (allocator.remap(old_memory, self.items.len)) |new_items| {
116115
self.* = init(allocator);
117-
return result;
116+
return new_items;
118117
}
119118

120119
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
121120
@memcpy(new_memory, self.items);
122-
@memset(self.items, undefined);
123121
self.clearAndFree();
124122
return new_memory;
125123
}
@@ -185,8 +183,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
185183
// extra capacity.
186184
const new_capacity = growCapacity(self.capacity, new_len);
187185
const old_memory = self.allocatedSlice();
188-
if (self.allocator.resize(old_memory, new_capacity)) {
189-
self.capacity = new_capacity;
186+
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
187+
self.items.ptr = new_memory.ptr;
188+
self.capacity = new_memory.len;
190189
return addManyAtAssumeCapacity(self, index, count);
191190
}
192191

@@ -468,8 +467,9 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
468467
// the allocator implementation would pointlessly copy our
469468
// extra capacity.
470469
const old_memory = self.allocatedSlice();
471-
if (self.allocator.resize(old_memory, new_capacity)) {
472-
self.capacity = new_capacity;
470+
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
471+
self.items.ptr = new_memory.ptr;
472+
self.capacity = new_memory.len;
473473
} else {
474474
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
475475
@memcpy(new_memory[0..self.items.len], self.items);
@@ -707,15 +707,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
707707
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
708708
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
709709
const old_memory = self.allocatedSlice();
710-
if (allocator.resize(old_memory, self.items.len)) {
711-
const result = self.items;
710+
if (allocator.remap(old_memory, self.items.len)) |new_items| {
712711
self.* = .empty;
713-
return result;
712+
return new_items;
714713
}
715714

716715
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
717716
@memcpy(new_memory, self.items);
718-
@memset(self.items, undefined);
719717
self.clearAndFree(allocator);
720718
return new_memory;
721719
}
@@ -1031,9 +1029,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
10311029
}
10321030

10331031
const old_memory = self.allocatedSlice();
1034-
if (allocator.resize(old_memory, new_len)) {
1035-
self.capacity = new_len;
1036-
self.items.len = new_len;
1032+
if (allocator.remap(old_memory, new_len)) |new_items| {
1033+
self.capacity = new_items.len;
1034+
self.items = new_items;
10371035
return;
10381036
}
10391037

@@ -1099,8 +1097,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
10991097
// the allocator implementation would pointlessly copy our
11001098
// extra capacity.
11011099
const old_memory = self.allocatedSlice();
1102-
if (allocator.resize(old_memory, new_capacity)) {
1103-
self.capacity = new_capacity;
1100+
if (allocator.remap(old_memory, new_capacity)) |new_memory| {
1101+
self.items.ptr = new_memory.ptr;
1102+
self.capacity = new_memory.len;
11041103
} else {
11051104
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
11061105
@memcpy(new_memory[0..self.items.len], self.items);

lib/std/heap/FixedBufferAllocator.zig

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ end_index: usize,
99
buffer: []u8,
1010

1111
pub fn init(buffer: []u8) FixedBufferAllocator {
12-
return FixedBufferAllocator{
12+
return .{
1313
.buffer = buffer,
1414
.end_index = 0,
1515
};
@@ -22,6 +22,7 @@ pub fn allocator(self: *FixedBufferAllocator) Allocator {
2222
.vtable = &.{
2323
.alloc = alloc,
2424
.resize = resize,
25+
.remap = remap,
2526
.free = free,
2627
},
2728
};
@@ -36,6 +37,7 @@ pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
3637
.vtable = &.{
3738
.alloc = threadSafeAlloc,
3839
.resize = Allocator.noResize,
40+
.remap = Allocator.noRemap,
3941
.free = Allocator.noFree,
4042
},
4143
};
@@ -57,10 +59,10 @@ pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
5759
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
5860
}
5961

60-
pub fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
62+
pub fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
6163
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
6264
_ = ra;
63-
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
65+
const ptr_align = alignment.toByteUnits();
6466
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
6567
const adjusted_index = self.end_index + adjust_off;
6668
const new_end_index = adjusted_index + n;
@@ -72,12 +74,12 @@ pub fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
7274
pub fn resize(
7375
ctx: *anyopaque,
7476
buf: []u8,
75-
log2_buf_align: u8,
77+
alignment: mem.Alignment,
7678
new_size: usize,
7779
return_address: usize,
7880
) bool {
7981
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
80-
_ = log2_buf_align;
82+
_ = alignment;
8183
_ = return_address;
8284
assert(@inComptime() or self.ownsSlice(buf));
8385

@@ -99,14 +101,24 @@ pub fn resize(
99101
return true;
100102
}
101103

104+
pub fn remap(
105+
context: *anyopaque,
106+
memory: []u8,
107+
alignment: mem.Alignment,
108+
new_len: usize,
109+
return_address: usize,
110+
) ?[*]u8 {
111+
return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
112+
}
113+
102114
pub fn free(
103115
ctx: *anyopaque,
104116
buf: []u8,
105-
log2_buf_align: u8,
117+
alignment: mem.Alignment,
106118
return_address: usize,
107119
) void {
108120
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
109-
_ = log2_buf_align;
121+
_ = alignment;
110122
_ = return_address;
111123
assert(@inComptime() or self.ownsSlice(buf));
112124

@@ -115,10 +127,10 @@ pub fn free(
115127
}
116128
}
117129

118-
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
130+
fn threadSafeAlloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
119131
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
120132
_ = ra;
121-
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
133+
const ptr_align = alignment.toByteUnits();
122134
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
123135
while (true) {
124136
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;

lib/std/heap/PageAllocator.zig

Lines changed: 60 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,18 @@ const page_size_min = std.heap.page_size_min;
1212
pub const vtable: Allocator.VTable = .{
1313
.alloc = alloc,
1414
.resize = resize,
15+
.remap = remap,
1516
.free = free,
1617
};
1718

18-
fn alloc(context: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
19-
const requested_alignment: mem.Alignment = @enumFromInt(log2_align);
19+
fn alloc(context: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
2020
_ = context;
2121
_ = ra;
2222
assert(n > 0);
2323

2424
const page_size = std.heap.pageSize();
2525
if (n >= maxInt(usize) - page_size) return null;
26-
const alignment_bytes = requested_alignment.toByteUnits();
26+
const alignment_bytes = alignment.toByteUnits();
2727

2828
if (native_os == .windows) {
2929
// According to official documentation, VirtualAlloc aligns to page
@@ -103,22 +103,52 @@ fn alloc(context: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
103103

104104
fn resize(
105105
context: *anyopaque,
106-
buf_unaligned: []u8,
107-
log2_buf_align: u8,
108-
new_size: usize,
106+
memory: []u8,
107+
alignment: mem.Alignment,
108+
new_len: usize,
109109
return_address: usize,
110110
) bool {
111111
_ = context;
112-
_ = log2_buf_align;
112+
_ = alignment;
113113
_ = return_address;
114+
return realloc(memory, new_len, false) != null;
115+
}
116+
117+
pub fn remap(
118+
context: *anyopaque,
119+
memory: []u8,
120+
alignment: mem.Alignment,
121+
new_len: usize,
122+
return_address: usize,
123+
) ?[*]u8 {
124+
_ = context;
125+
_ = alignment;
126+
_ = return_address;
127+
return realloc(memory, new_len, true);
128+
}
129+
130+
fn free(context: *anyopaque, slice: []u8, alignment: mem.Alignment, return_address: usize) void {
131+
_ = context;
132+
_ = alignment;
133+
_ = return_address;
134+
135+
if (native_os == .windows) {
136+
windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
137+
} else {
138+
const buf_aligned_len = mem.alignForward(usize, slice.len, std.heap.pageSize());
139+
posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
140+
}
141+
}
142+
143+
fn realloc(memory: []u8, new_len: usize, may_move: bool) ?[*]u8 {
114144
const page_size = std.heap.pageSize();
115-
const new_size_aligned = mem.alignForward(usize, new_size, page_size);
145+
const new_size_aligned = mem.alignForward(usize, new_len, page_size);
116146

117147
if (native_os == .windows) {
118-
if (new_size <= buf_unaligned.len) {
119-
const base_addr = @intFromPtr(buf_unaligned.ptr);
120-
const old_addr_end = base_addr + buf_unaligned.len;
121-
const new_addr_end = mem.alignForward(usize, base_addr + new_size, page_size);
148+
if (new_len <= memory.len) {
149+
const base_addr = @intFromPtr(memory.ptr);
150+
const old_addr_end = base_addr + memory.len;
151+
const new_addr_end = mem.alignForward(usize, base_addr + new_len, page_size);
122152
if (old_addr_end > new_addr_end) {
123153
// For shrinking that is not releasing, we will only decommit
124154
// the pages not needed anymore.
@@ -128,40 +158,31 @@ fn resize(
128158
windows.MEM_DECOMMIT,
129159
);
130160
}
131-
return true;
161+
return memory.ptr;
132162
}
133-
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, page_size);
163+
const old_size_aligned = mem.alignForward(usize, memory.len, page_size);
134164
if (new_size_aligned <= old_size_aligned) {
135-
return true;
165+
return memory.ptr;
136166
}
137-
return false;
167+
return null;
138168
}
139169

140-
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, page_size);
141-
if (new_size_aligned == buf_aligned_len)
142-
return true;
170+
const page_aligned_len = mem.alignForward(usize, memory.len, page_size);
171+
if (new_size_aligned == page_aligned_len)
172+
return memory.ptr;
143173

144-
if (new_size_aligned < buf_aligned_len) {
145-
const ptr = buf_unaligned.ptr + new_size_aligned;
146-
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
147-
posix.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned]));
148-
return true;
174+
const mremap_available = false; // native_os == .linux;
175+
if (mremap_available) {
176+
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
177+
return posix.mremap(memory, new_len, .{ .MAYMOVE = may_move }, null) catch return null;
149178
}
150179

151-
// TODO: call mremap
152-
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
153-
return false;
154-
}
155-
156-
fn free(context: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
157-
_ = context;
158-
_ = log2_buf_align;
159-
_ = return_address;
160-
161-
if (native_os == .windows) {
162-
windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
163-
} else {
164-
const buf_aligned_len = mem.alignForward(usize, slice.len, std.heap.pageSize());
165-
posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
180+
if (new_size_aligned < page_aligned_len) {
181+
const ptr = memory.ptr + new_size_aligned;
182+
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
183+
posix.munmap(@alignCast(ptr[0 .. page_aligned_len - new_size_aligned]));
184+
return memory.ptr;
166185
}
186+
187+
return null;
167188
}

0 commit comments

Comments
 (0)