本文整理了Java中org.apache.hadoop.record.Buffer
类的一些代码示例,展示了Buffer
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Buffer
类的具体详情如下:
包路径:org.apache.hadoop.record.Buffer
类名称:Buffer
[英]A byte sequence that is used as a Java native type for buffer. It is resizable and distinguishes between the count of the sequence and the current capacity.
[中]用作缓冲区的Java本机类型的字节序列。它可以调整大小,并区分序列计数和当前容量。
代码示例来源:origin: ch.cern.hadoop/hadoop-common
@Override
public Object clone() throws CloneNotSupportedException {
Buffer result = (Buffer) super.clone();
result.copy(this.get(), 0, this.getCount());
return result;
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-common
/**
* Append specified bytes to the buffer
*
* @param bytes byte array to be appended
*/
public void append(byte[] bytes) {
append(bytes, 0, bytes.length);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
public boolean equals(Object other) {
if (other instanceof Buffer && this != other) {
return compareTo(other) == 0;
}
return (this == other);
}
代码示例来源:origin: io.hops/hadoop-common
/**
* Convert the byte buffer to a string an specific character encoding
*
* @param charsetName Valid Java Character Set Name
*/
public String toString(String charsetName)
throws UnsupportedEncodingException {
return new String(this.get(), 0, this.getCount(), charsetName);
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
/**
* Append specified bytes to the buffer.
*
* @param bytes byte array to be appended
* @param offset offset into byte array
* @param length length of data
*/
public void append(byte[] bytes, int offset, int length) {
setCapacity(count+length);
System.arraycopy(bytes, offset, this.get(), count, length);
count = count + length;
}
代码示例来源:origin: org.apache.hadoop/hadoop-streaming
/**
* Reads the raw bytes following a <code>Type.LIST</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawList() throws IOException {
Buffer buffer = new Buffer(new byte[] { (byte) Type.LIST.code });
byte[] bytes = readRaw();
while (bytes != null) {
buffer.append(bytes);
bytes = readRaw();
}
buffer.append(new byte[] { (byte) Type.MARKER.code });
return buffer.get();
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
/**
* Test of append method, of class org.apache.hadoop.record.Buffer.
*/
public void testAppend() {
final byte[] bytes = new byte[100];
final int offset = 0;
final int length = 100;
for (int idx = 0; idx < 100; idx++) {
bytes[idx] = (byte) (100-idx);
}
final Buffer instance = new Buffer();
instance.append(bytes, offset, length);
assertEquals("Buffer size mismatch", 100, instance.getCount());
for (int idx = 0; idx < 100; idx++) {
assertEquals("Buffer contents corrupted", 100-idx, instance.get()[idx]);
}
}
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
/**
* Change the capacity of the backing storage.
* The data is preserved if newCapacity >= getCount().
* @param newCapacity The new capacity in bytes.
*/
public void setCapacity(int newCapacity) {
if (newCapacity < 0) {
throw new IllegalArgumentException("Invalid capacity argument "+newCapacity);
}
if (newCapacity == 0) {
this.bytes = null;
this.count = 0;
return;
}
if (newCapacity != getCapacity()) {
byte[] data = new byte[newCapacity];
if (newCapacity < count) {
count = newCapacity;
}
if (count != 0) {
System.arraycopy(this.get(), 0, data, 0, count);
}
bytes = data;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
/**
* Test of copy method, of class org.apache.hadoop.record.Buffer.
*/
public void testCopy() {
final byte[] bytes = new byte[10];
final int offset = 6;
final int length = 3;
for (int idx = 0; idx < 10; idx ++) {
bytes[idx] = (byte) idx;
}
final Buffer instance = new Buffer();
instance.copy(bytes, offset, length);
assertEquals("copy failed", 3, instance.getCapacity());
assertEquals("copy failed", 3, instance.get().length);
for (int idx = 0; idx < 3; idx++) {
assertEquals("Buffer content corrupted", idx+6, instance.get()[idx]);
}
}
代码示例来源:origin: apache/chukwa
public void add(String key, String value) {
synchronized (this) {
if (this.mapFields == null) {
this.mapFields = new TreeMap<String, Buffer>();
}
}
this.mapFields.put(key, new Buffer(value.getBytes(Charset.forName("UTF-8"))));
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
/**
* Test of truncate method, of class org.apache.hadoop.record.Buffer.
*/
public void testTruncate() {
final Buffer instance = new Buffer();
instance.setCapacity(100);
assertEquals("setCapacity failed", 100, instance.getCapacity());
instance.truncate();
assertEquals("truncate failed", 0, instance.getCapacity());
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
/**
* Test of getCapacity method, of class org.apache.hadoop.record.Buffer.
*/
public void testGetCapacity() {
final Buffer instance = new Buffer();
final int expResult = 0;
final int result = instance.getCapacity();
assertEquals("getCapacity failed", expResult, result);
instance.setCapacity(100);
assertEquals("setCapacity failed", 100, instance.getCapacity());
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
/**
* Test of set method, of class org.apache.hadoop.record.Buffer.
*/
public void testSet() {
final byte[] bytes = new byte[10];
final Buffer instance = new Buffer();
instance.set(bytes);
assertEquals("set failed", bytes, instance.get());
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
/**
* Test of getCount method, of class org.apache.hadoop.record.Buffer.
*/
public void testGetCount() {
final Buffer instance = new Buffer();
final int expResult = 0;
final int result = instance.getCount();
assertEquals("getSize failed", expResult, result);
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
/**
* Get the capacity, which is the maximum count that could handled without
* resizing the backing storage.
*
* @return The number of bytes
*/
public int getCapacity() {
return this.get().length;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-common
/**
*
* @param s
* @return
*/
static String toXMLBuffer(Buffer s) {
return s.toString();
}
代码示例来源:origin: io.hops/hadoop-common
/**
* Create a Buffer using the byte range as the initial value.
*
* @param bytes Copy of this array becomes the backing storage for the object.
* @param offset offset into byte array
* @param length length of data
*/
public Buffer(byte[] bytes, int offset, int length) {
copy(bytes, offset, length);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Reset the buffer to 0 size
*/
public void reset() {
setCapacity(0);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-streaming
/**
* Reads the raw bytes following a <code>Type.LIST</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawList() throws IOException {
Buffer buffer = new Buffer(new byte[] { (byte) Type.LIST.code });
byte[] bytes = readRaw();
while (bytes != null) {
buffer.append(bytes);
bytes = readRaw();
}
buffer.append(new byte[] { (byte) Type.MARKER.code });
return buffer.get();
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* Convert the byte buffer to a string an specific character encoding
*
* @param charsetName Valid Java Character Set Name
*/
public String toString(String charsetName)
throws UnsupportedEncodingException {
return new String(this.get(), 0, this.getCount(), charsetName);
}
内容来源于网络,如有侵权,请联系作者删除!