org.apache.hadoop.io.file.tfile.Utils.readVInt()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(6.6k)|赞(0)|评价(0)|浏览(109)

本文整理了Java中org.apache.hadoop.io.file.tfile.Utils.readVInt()方法的一些代码示例,展示了Utils.readVInt()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utils.readVInt()方法的具体详情如下:
包路径:org.apache.hadoop.io.file.tfile.Utils
类名称:Utils
方法名:readVInt

Utils.readVInt介绍

[英]Decoding the variable-length integer. Synonymous to (int)Utils#readVLong(in).
[中]解码可变长度整数。与(int)Utils#readVLong(in)同义。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Reading the length of next chunk.
 * 
 * @throws java.io.IOException
 *           when no more data is available.
 */
private void readLength() throws IOException {
 remain = Utils.readVInt(in);
 if (remain >= 0) {
  lastChunk = true;
 } else {
  remain = -remain;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public TFileIndexEntry(DataInput in) throws IOException {
 int len = Utils.readVInt(in);
 key = new byte[len];
 in.readFully(key, 0, len);
 kvEntries = Utils.readVLong(in);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Read a String as a VInt n, followed by n Bytes in Text format.
 * 
 * @param in
 *          The input stream.
 * @return The string
 * @throws IOException
 */
public static String readString(DataInput in) throws IOException {
 int length = readVInt(in);
 if (length == -1) return null;
 byte[] buffer = new byte[length];
 in.readFully(buffer);
 return Text.decode(buffer);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public MetaIndex(DataInput in) throws IOException {
 int count = Utils.readVInt(in);
 index = new TreeMap<String, MetaIndexEntry>();
 for (int nx = 0; nx < count; nx++) {
  MetaIndexEntry indexEntry = new MetaIndexEntry(in);
  index.put(indexEntry.getMetaName(), indexEntry);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

index = new ArrayList<TFileIndexEntry>(entryCount);
recordNumIndex = new ArrayList<Long>(entryCount);
int size = Utils.readVInt(in); // size for the first key entry.
if (size > 0) {
 byte[] buffer = new byte[size];
   new DataInputStream(new ByteArrayInputStream(buffer, 0, size));
 int firstKeyLength = Utils.readVInt(firstKeyInputStream);
 firstKey = new ByteArray(new byte[firstKeyLength]);
 firstKeyInputStream.readFully(firstKey.buffer());
  size = Utils.readVInt(in);
  if (buffer.length < size) {
   buffer = new byte[size];

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * check whether we have already successfully obtained the key. It also
 * initializes the valueInputStream.
 */
void checkKey() throws IOException {
 if (klen >= 0) return;
 if (atEnd()) {
  throw new EOFException("No key-value to read");
 }
 klen = -1;
 vlen = -1;
 valueChecked = false;
 klen = Utils.readVInt(blkReader);
 blkReader.readFully(keyBuffer, 0, klen);
 valueBufferInputStream.reset(blkReader);
 if (valueBufferInputStream.isLastChunk()) {
  vlen = valueBufferInputStream.getRemain();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

public TFileIndexEntry(DataInput in) throws IOException {
 int len = Utils.readVInt(in);
 key = new byte[len];
 in.readFully(key, 0, len);
 kvEntries = Utils.readVLong(in);
}

代码示例来源:origin: io.hops/hadoop-common

public TFileIndexEntry(DataInput in) throws IOException {
 int len = Utils.readVInt(in);
 key = new byte[len];
 in.readFully(key, 0, len);
 kvEntries = Utils.readVLong(in);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

public TFileIndexEntry(DataInput in) throws IOException {
 int len = Utils.readVInt(in);
 key = new byte[len];
 in.readFully(key, 0, len);
 kvEntries = Utils.readVLong(in);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

public MetaIndex(DataInput in) throws IOException {
 int count = Utils.readVInt(in);
 index = new TreeMap<String, MetaIndexEntry>();
 for (int nx = 0; nx < count; nx++) {
  MetaIndexEntry indexEntry = new MetaIndexEntry(in);
  index.put(indexEntry.getMetaName(), indexEntry);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

public MetaIndex(DataInput in) throws IOException {
 int count = Utils.readVInt(in);
 index = new TreeMap<String, MetaIndexEntry>();
 for (int nx = 0; nx < count; nx++) {
  MetaIndexEntry indexEntry = new MetaIndexEntry(in);
  index.put(indexEntry.getMetaName(), indexEntry);
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public MetaIndex(DataInput in) throws IOException {
 int count = Utils.readVInt(in);
 index = new TreeMap<String, MetaIndexEntry>();
 for (int nx = 0; nx < count; nx++) {
  MetaIndexEntry indexEntry = new MetaIndexEntry(in);
  index.put(indexEntry.getMetaName(), indexEntry);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public MetaIndex(DataInput in) throws IOException {
 int count = Utils.readVInt(in);
 index = new TreeMap<String, MetaIndexEntry>();
 for (int nx = 0; nx < count; nx++) {
  MetaIndexEntry indexEntry = new MetaIndexEntry(in);
  index.put(indexEntry.getMetaName(), indexEntry);
 }
}

代码示例来源:origin: io.hops/hadoop-common

public MetaIndex(DataInput in) throws IOException {
 int count = Utils.readVInt(in);
 index = new TreeMap<String, MetaIndexEntry>();
 for (int nx = 0; nx < count; nx++) {
  MetaIndexEntry indexEntry = new MetaIndexEntry(in);
  index.put(indexEntry.getMetaName(), indexEntry);
 }
}

代码示例来源:origin: io.hops/hadoop-common

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: org.apache.apex/malhar-library

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

相关文章