-
Notifications
You must be signed in to change notification settings - Fork 507
Expand file tree
/
Copy pathBufferChunk.java
More file actions
105 lines (91 loc) · 3.17 KB
/
BufferChunk.java
File metadata and controls
105 lines (91 loc) · 3.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.common.io.DiskRange;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import java.nio.ByteBuffer;
/**
* The sections of stripe that we have read.
* This might not match diskRange - 1 disk range can be multiple buffer chunks,
* depending on DFS block boundaries.
* @since 1.1.0
*/
public class BufferChunk extends DiskRangeList {
private ByteBuffer chunk;
public BufferChunk(long offset, int length) {
super(offset, offset + length);
chunk = null;
}
public BufferChunk(ByteBuffer chunk, long offset) {
super(offset, offset + chunk.remaining());
this.chunk = chunk;
}
public void setChunk(ByteBuffer chunk) {
this.chunk = chunk;
}
@Override
public boolean hasData() {
return chunk != null;
}
@Override
public final String toString() {
if (chunk == null) {
return "data range[" + offset + ", " + end +")";
} else {
boolean makesSense = chunk.remaining() == (end - offset);
return "data range [" + offset + ", " + end + "), size: " + chunk.remaining()
+ (makesSense ? "" : "(!)") + " type: " +
(chunk.isDirect() ? "direct" : "array-backed");
}
}
@Override
public DiskRange sliceAndShift(long offset, long end, long shiftBy) {
assert offset <= end && offset >= this.offset && end <= this.end;
assert offset + shiftBy >= 0;
ByteBuffer sliceBuf = chunk.slice();
int newPos = (int) (offset - this.offset);
int newLimit = newPos + (int) (end - offset);
try {
sliceBuf.position(newPos);
sliceBuf.limit(newLimit);
} catch (Throwable t) {
throw new RuntimeException(
"Failed to slice buffer chunk with range" + " [" + this.offset + ", " + this.end
+ "), position: " + chunk.position() + " limit: " + chunk.limit() + ", "
+ (chunk.isDirect() ? "direct" : "array") + "; to [" + offset + ", " + end + ") "
+ t.getClass(), t);
}
return new BufferChunk(sliceBuf, offset + shiftBy);
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != getClass()) {
return false;
}
BufferChunk ob = (BufferChunk) other;
return chunk.equals(ob.chunk);
}
@Override
public int hashCode() {
return chunk.hashCode();
}
@Override
public ByteBuffer getData() {
return chunk;
}
}