本文整理了Java中org.apache.hadoop.io.IOUtils.readFully()
方法的一些代码示例,展示了IOUtils.readFully()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。IOUtils.readFully()
方法的具体详情如下:
包路径:org.apache.hadoop.io.IOUtils
类名称:IOUtils
方法名:readFully
[英]Reads len bytes in a loop.
[中]读取循环中的len字节。
代码示例来源:origin: org.apache.hadoop/hadoop-common
public void write(InputStream in, int len) throws IOException {
int newcount = count + len;
if (newcount > buf.length) {
byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)];
System.arraycopy(buf, 0, newbuf, 0, count);
buf = newbuf;
}
IOUtils.readFully(in, buf, count, len);
count = newcount;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
private void fillReservoir(int min) {
if (pos >= reservoir.length - min) {
try {
if (stream == null) {
stream = new FileInputStream(new File(randomDevPath));
}
IOUtils.readFully(stream, reservoir, 0, reservoir.length);
} catch (IOException e) {
throw new RuntimeException("failed to fill reservoir", e);
}
pos = 0;
}
}
代码示例来源:origin: apache/hbase
FSDataInputStream s = fs.open(versionFile);
try {
IOUtils.readFully(s, content, 0, content.length);
if (ProtobufUtil.isPBMagicPrefix(content)) {
version = parseVersionFrom(content);
代码示例来源:origin: apache/hbase
/**
* Create a KeyValue reading from the raw InputStream. Named
* <code>createKeyValueFromInputStream</code> so doesn't clash with {@link #create(DataInput)}
* @param in inputStream to read.
* @param withTags whether the keyvalue should include tags are not
* @return Created KeyValue OR if we find a length of zero, we will return null which can be
* useful marking a stream as done.
* @throws IOException
*/
public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags)
throws IOException {
byte[] intBytes = new byte[Bytes.SIZEOF_INT];
int bytesRead = 0;
while (bytesRead < intBytes.length) {
int n = in.read(intBytes, bytesRead, intBytes.length - bytesRead);
if (n < 0) {
if (bytesRead == 0) {
throw new EOFException();
}
throw new IOException("Failed read of int, read " + bytesRead + " bytes");
}
bytesRead += n;
}
byte[] bytes = new byte[Bytes.toInt(intBytes)];
IOUtils.readFully(in, bytes, 0, bytes.length);
return withTags ? new KeyValue(bytes, 0, bytes.length)
: new NoTagsKeyValue(bytes, 0, bytes.length);
}
代码示例来源:origin: apache/hbase
private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOException {
byte status = (byte)in.read();
if (status == Dictionary.NOT_IN_DICTIONARY) {
// status byte indicating that data to be read is not in dictionary.
// if this isn't in the dictionary, we need to add to the dictionary.
int length = StreamUtils.readRawVarint32(in);
IOUtils.readFully(in, to, offset, length);
dict.addEntry(to, offset, length);
return length;
} else {
// the status byte also acts as the higher order byte of the dictionary entry.
short dictIdx = StreamUtils.toShort(status, (byte)in.read());
byte[] entry = dict.getEntry(dictIdx);
if (entry == null) {
throw new IOException("Missing dictionary entry for index " + dictIdx);
}
// now we write the uncompressed value.
Bytes.putBytes(to, offset, entry, 0, entry.length);
return entry.length;
}
}
代码示例来源:origin: apache/hbase
bufferedBoundedStream, decompressor, 0);
IOUtils.readFully(is, dest, destOffset, uncompressedSize);
is.close();
} finally {
代码示例来源:origin: apache/hbase
int tagLen = StreamUtils.readRawVarint32(src);
offset = Bytes.putAsShort(dest, offset, tagLen);
IOUtils.readFully(src, dest, offset, tagLen);
tagDict.addEntry(dest, offset, tagLen);
offset += tagLen;
代码示例来源:origin: apache/hbase
IOUtils.readFully(istream, dest, destOffset, size);
return -1;
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public void readDataFully(byte[] buf, int off, int len)
throws IOException {
IOUtils.readFully(dataIn, buf, off, len);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public void readChecksumFully(byte[] buf, int off, int len)
throws IOException {
IOUtils.readFully(checksumIn, buf, off, len);
}
代码示例来源:origin: apache/hbase
tsTypeValLen = tsTypeValLen - tagsLength - KeyValue.TAGS_LENGTH_SIZE;
IOUtils.readFully(in, backingArray, pos, tsTypeValLen);
pos += tsTypeValLen;
compression.tagCompressionContext.uncompressTags(in, backingArray, pos, tagsLength);
} else {
IOUtils.readFully(in, backingArray, pos, tagsLength);
代码示例来源:origin: apache/hbase
int size = responseHeader.getCellBlockMeta().getLength();
byte[] cellBlock = new byte[size];
IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length);
cellBlockScanner = this.rpcClient.cellBlockBuilder.createCellScanner(this.codec,
this.compressor, cellBlock);
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private static byte[][] loadINodeSection(InputStream in)
throws IOException {
FsImageProto.INodeSection s = FsImageProto.INodeSection
.parseDelimitedFrom(in);
LOG.info("Loading " + s.getNumInodes() + " inodes.");
final byte[][] inodes = new byte[(int) s.getNumInodes()][];
for (int i = 0; i < s.getNumInodes(); ++i) {
int size = CodedInputStream.readRawVarint32(in.read(), in);
byte[] bytes = new byte[size];
IOUtils.readFully(in, bytes, 0, size);
inodes[i] = bytes;
}
LOG.debug("Sorting inodes");
Arrays.sort(inodes, INODE_BYTES_COMPARATOR);
LOG.debug("Finished sorting inodes");
return inodes;
}
代码示例来源:origin: apache/metron
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!processed) {
byte[] contents = new byte[(int) fileSplit.getLength()];
Path file = fileSplit.getPath();
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream in = null;
try {
in = fs.open(file);
IOUtils.readFully(in, contents, 0, contents.length);
value.set(contents, 0, contents.length);
} finally {
IOUtils.closeStream(in);
}
processed = true;
return true;
}
return false;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public void load(File file, boolean requireSameLayoutVersion)
throws IOException {
Preconditions.checkState(impl == null, "Image already loaded!");
FileInputStream is = null;
try {
is = new FileInputStream(file);
byte[] magic = new byte[FSImageUtil.MAGIC_HEADER.length];
IOUtils.readFully(is, magic, 0, magic.length);
if (Arrays.equals(magic, FSImageUtil.MAGIC_HEADER)) {
FSImageFormatProtobuf.Loader loader = new FSImageFormatProtobuf.Loader(
conf, fsn, requireSameLayoutVersion);
impl = loader;
loader.load(file);
} else {
Loader loader = new Loader(conf, fsn);
impl = loader;
loader.load(file);
}
} finally {
IOUtils.cleanupWithLogger(LOG, is);
}
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
for (int rem = opLength - CHECKSUM_LENGTH; rem > 0;) {
int toRead = Math.min(temp.length, rem);
IOUtils.readFully(in, temp, 0, toRead);
checksum.update(temp, 0, toRead);
rem -= toRead;
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Calculate partial block checksum.
*
* @return
* @throws IOException
*/
byte[] crcPartialBlock() throws IOException {
int partialLength = (int) (requestLength % getBytesPerCRC());
if (partialLength > 0) {
byte[] buf = new byte[partialLength];
final InputStream blockIn = getBlockInputStream(block,
requestLength - partialLength);
try {
// Get the CRC of the partialLength.
IOUtils.readFully(blockIn, buf, 0, partialLength);
} finally {
IOUtils.closeStream(blockIn);
}
checksum.update(buf, 0, partialLength);
byte[] partialCrc = new byte[getChecksumSize()];
checksum.writeValue(partialCrc, 0, true);
return partialCrc;
}
return null;
}
}
代码示例来源:origin: org.apache.hbase/hbase-client
int size = responseHeader.getCellBlockMeta().getLength();
byte[] cellBlock = new byte[size];
IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length);
cellBlockScanner = this.rpcClient.cellBlockBuilder.createCellScanner(this.codec,
this.compressor, cellBlock);
代码示例来源:origin: ch.cern.hadoop/hadoop-common
public byte[] readFile(Path path, int len) throws IOException {
DataInputStream dis = fs.open(path);
byte[] buffer = new byte[len];
IOUtils.readFully(dis, buffer, 0, len);
dis.close();
return buffer;
}
代码示例来源:origin: dremio/dremio-oss
private void checkFileData(String location) throws Exception {
Path serverFileDirPath = new Path(location);
assertTrue(fs.exists(serverFileDirPath));
FileStatus[] statuses = fs.listStatus(serverFileDirPath);
assertEquals(1, statuses.length);
int fileSize = (int)statuses[0].getLen();
final byte[] data = new byte[fileSize];
FSDataInputStream inputStream = fs.open(statuses[0].getPath());
org.apache.hadoop.io.IOUtils.readFully(inputStream, data, 0, fileSize);
inputStream.close();
assertEquals("{\"person_id\": 1, \"salary\": 10}", new String(data));
}
内容来源于网络,如有侵权,请联系作者删除!