org.apache.hadoop.hbase.io.hfile.HFile.createReader()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(11.2k)|赞(0)|评价(0)|浏览(170)

本文整理了Java中org.apache.hadoop.hbase.io.hfile.HFile.createReader()方法的一些代码示例,展示了HFile.createReader()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HFile.createReader()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.io.hfile.HFile
类名称:HFile
方法名:createReader

HFile.createReader介绍

[英]Creates reader with cache configuration disabled
[中]创建已禁用缓存配置的读卡器

代码示例

代码示例来源:origin: apache/hbase

public StoreFileReader(FileSystem fs, Path path, CacheConfig cacheConf,
  boolean primaryReplicaStoreFile, AtomicInteger refCount, boolean shared, Configuration conf)
  throws IOException {
 this(HFile.createReader(fs, path, cacheConf, primaryReplicaStoreFile, conf), refCount, shared);
}

代码示例来源:origin: apache/hbase

public StoreFileReader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size,
  CacheConfig cacheConf, boolean primaryReplicaStoreFile, AtomicInteger refCount,
  boolean shared, Configuration conf) throws IOException {
 this(HFile.createReader(fs, path, in, size, cacheConf, primaryReplicaStoreFile, conf), refCount,
   shared);
}

代码示例来源:origin: apache/hbase

/**
* Creates reader with cache configuration disabled
* @param fs filesystem
* @param path Path to file to read
* @return an active Reader instance
* @throws IOException Will throw a CorruptHFileException
* (DoNotRetryIOException subtype) if hfile is corrupt/invalid.
*/
public static Reader createReader(FileSystem fs, Path path, Configuration conf)
  throws IOException {
 // The primaryReplicaReader is mainly used for constructing block cache key, so if we do not use
 // block cache then it is OK to set it as any value. We use true here.
 return createReader(fs, path, CacheConfig.DISABLED, true, conf);
}

代码示例来源:origin: apache/hbase

@Override
public void initialize(InputSplit split, TaskAttemptContext context)
  throws IOException, InterruptedException {
 FileSplit fileSplit = (FileSplit) split;
 conf = context.getConfiguration();
 Path path = fileSplit.getPath();
 FileSystem fs = path.getFileSystem(conf);
 LOG.info("Initialize HFileRecordReader for {}", path);
 this.in = HFile.createReader(fs, path, conf);
 // The file info must be loaded before the scanner can be used.
 // This seems like a bug in HBase, but it's easily worked around.
 this.in.loadFileInfo();
 this.scanner = in.getScanner(false, false);
}

代码示例来源:origin: apache/hbase

@Override
 public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus)
   throws IOException {
  Path hfile = hfileStatus.getPath();
  try (HFile.Reader reader =
    HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) {
   if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
    builder.setCompressionType(reader.getFileContext().getCompression());
    LOG.info("Setting compression " + reader.getFileContext().getCompression().name() +
      " for family " + builder.getNameAsString());
   }
   reader.loadFileInfo();
   byte[] first = reader.getFirstRowKey().get();
   byte[] last = reader.getLastRowKey().get();
   LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" +
     Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
   // To eventually infer start key-end key boundaries
   Integer value = map.containsKey(first) ? map.get(first) : 0;
   map.put(first, value + 1);
   value = map.containsKey(last) ? map.get(last) : 0;
   map.put(last, value - 1);
  }
 }
});

代码示例来源:origin: apache/hbase

@Override
void setUp() throws Exception {
 reader = HFile.createReader(this.fs, this.mf, new CacheConfig(this.conf), true, this.conf);
 this.reader.loadFileInfo();
}

代码示例来源:origin: apache/hbase

/**
 * Create 0-length hfile and show that it fails
 */
@Test
public void testCorrupt0LengthHFile() throws IOException {
 Path f = new Path(ROOT_DIR, testName.getMethodName());
 FSDataOutputStream fsos = fs.create(f);
 fsos.close();
 try {
  Reader r = HFile.createReader(fs, f, cacheConf, true, conf);
 } catch (CorruptHFileException che) {
  // Expected failure
  return;
 }
 fail("Should have thrown exception");
}

代码示例来源:origin: apache/hbase

private void readStoreFile(Path storeFilePath) throws Exception {
 // Open the file reader with block cache disabled.
 HFile.Reader reader = HFile.createReader(fs, storeFilePath, conf);
 long offset = 0;
 while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
  HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null);
  offset += block.getOnDiskSizeWithHeader();
 }
}

代码示例来源:origin: apache/hbase

/**
 * Read a storefile in the same manner as a scanner -- using non-positional reads and
 * without waiting for prefetch to complete.
 */
private void readStoreFileLikeScanner(Path storeFilePath) throws Exception {
 // Open the file
 HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf);
 do {
  long offset = 0;
  while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
   HFileBlock block = reader.readBlock(offset, -1, false, /*pread=*/false,
     false, true, null, null);
   offset += block.getOnDiskSizeWithHeader();
  }
 } while (!reader.prefetchComplete());
}

代码示例来源:origin: apache/hbase

private int verifyHFile(Path p) throws IOException {
 Configuration conf = util.getConfiguration();
 HFile.Reader reader =
   HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf);
 reader.loadFileInfo();
 HFileScanner scanner = reader.getScanner(false, false);
 scanner.seekTo();
 int count = 0;
 do {
  count++;
 } while (scanner.next());
 assertTrue(count > 0);
 reader.close();
 return count;
}

代码示例来源:origin: apache/hbase

private static byte[] extractHFileKey(Path path) throws Exception {
 HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
  new CacheConfig(conf), true, conf);
 try {
  reader.loadFileInfo();
  Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
  assertNotNull("Reader has a null crypto context", cryptoContext);
  Key key = cryptoContext.getKey();
  if (key == null) {
   return null;
  }
  return key.getEncoded();
 } finally {
  reader.close();
 }
}

代码示例来源:origin: apache/hbase

private byte[] extractHFileKey(Path path) throws Exception {
 HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
  new CacheConfig(conf), true, conf);
 try {
  reader.loadFileInfo();
  Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
  assertNotNull("Reader has a null crypto context", cryptoContext);
  Key key = cryptoContext.getKey();
  assertNotNull("Crypto context has no key", key);
  return key.getEncoded();
 } finally {
  reader.close();
 }
}

代码示例来源:origin: apache/hbase

private static byte[] extractHFileKey(Path path) throws Exception {
 HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
  new CacheConfig(conf), true, conf);
 try {
  reader.loadFileInfo();
  Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
  assertNotNull("Reader has a null crypto context", cryptoContext);
  Key key = cryptoContext.getKey();
  assertNotNull("Crypto context has no key", key);
  return key.getEncoded();
 } finally {
  reader.close();
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testNullMetaBlocks() throws Exception {
 for (Compression.Algorithm compressAlgo :
   HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
  Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
            .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
    .withOutputStream(fout)
    .withFileContext(meta)
    .create();
  KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
  writer.append(kv);
  writer.close();
  fout.close();
  Reader reader = HFile.createReader(fs, mFile, cacheConf, true, conf);
  reader.loadFileInfo();
  assertNull(reader.getMetaBlock("non-existant", false));
 }
}

代码示例来源:origin: apache/hbase

/**
 * Create a truncated hfile and verify that exception thrown.
 */
@Test
public void testCorruptTruncatedHFile() throws IOException {
 Path f = new Path(ROOT_DIR, testName.getMethodName());
 HFileContext  context = new HFileContextBuilder().build();
 Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
   .withFileContext(context).create();
 writeSomeRecords(w, 0, 100, false);
 w.close();
 Path trunc = new Path(f.getParent(), "trucated");
 truncateFile(fs, w.getPath(), trunc);
 try {
  Reader r = HFile.createReader(fs, trunc, cacheConf, true, conf);
 } catch (CorruptHFileException che) {
  // Expected failure
  return;
 }
 fail("Should have thrown exception");
}

代码示例来源:origin: apache/hbase

/**
 * Test empty HFile.
 * Test all features work reasonably when hfile is empty of entries.
 * @throws IOException
 */
@Test
public void testEmptyHFile() throws IOException {
 Path f = new Path(ROOT_DIR, testName.getMethodName());
 HFileContext context = new HFileContextBuilder().withIncludesTags(false).build();
 Writer w =
   HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
 w.close();
 Reader r = HFile.createReader(fs, f, cacheConf, true, conf);
 r.loadFileInfo();
 assertFalse(r.getFirstKey().isPresent());
 assertFalse(r.getLastKey().isPresent());
}

代码示例来源:origin: apache/hbase

/**
 * Method returns the total KVs in given hfile
 * @param fs File System
 * @param p HFile path
 * @return KV count in the given hfile
 * @throws IOException
 */
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
 Configuration conf = util.getConfiguration();
 HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
 reader.loadFileInfo();
 HFileScanner scanner = reader.getScanner(false, false);
 scanner.seekTo();
 int count = 0;
 do {
  count++;
 } while (scanner.next());
 reader.close();
 return count;
}

代码示例来源:origin: apache/hbase

private void readStoreFile(Path storeFilePath) throws Exception {
 // Open the file
 HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf);
 while (!reader.prefetchComplete()) {
  // Sleep for a bit
  Thread.sleep(1000);
 }
 // Check that all of the data blocks were preloaded
 BlockCache blockCache = cacheConf.getBlockCache().get();
 long offset = 0;
 while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
  HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null);
  BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
  boolean isCached = blockCache.getBlock(blockCacheKey, true, false, true) != null;
  if (block.getBlockType() == BlockType.DATA ||
    block.getBlockType() == BlockType.ROOT_INDEX ||
    block.getBlockType() == BlockType.INTERMEDIATE_INDEX) {
   assertTrue(isCached);
  }
  offset += block.getOnDiskSizeWithHeader();
 }
}

代码示例来源:origin: apache/hbase

/**
  * Method returns the total KVs in given hfile
  * @param fs File System
  * @param p HFile path
  * @return KV count in the given hfile
  * @throws IOException
  */
 private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
   count++;
  } while (scanner.next());
  reader.close();
  return count;
 }
}

代码示例来源:origin: apache/hbase

protected void testSeekToInternals(TagUsage tagUsage) throws IOException {
 Path p = makeNewFile(tagUsage);
 FileSystem fs = TEST_UTIL.getTestFileSystem();
 Configuration conf = TEST_UTIL.getConfiguration();
 HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
 reader.loadFileInfo();
 assertEquals(2, reader.getDataBlockIndexReader().getRootBlockCount());
 HFileScanner scanner = reader.getScanner(false, true);
 // lies before the start of the file.
 assertEquals(-1, scanner.seekTo(toKV("a", tagUsage)));
 assertEquals(1, scanner.seekTo(toKV("d", tagUsage)));
 assertEquals("c", toRowStr(scanner.getCell()));
 // Across a block boundary now.
 // 'h' does not exist so we will get a '1' back for not found.
 assertEquals(0, scanner.seekTo(toKV("i", tagUsage)));
 assertEquals("i", toRowStr(scanner.getCell()));
 assertEquals(1, scanner.seekTo(toKV("l", tagUsage)));
 assertEquals("k", toRowStr(scanner.getCell()));
 reader.close();
 deleteTestDir(fs);
}

相关文章