org.apache.hadoop.hbase.regionserver.HStore.getStorefilesSize()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(4.2k)|赞(0)|评价(0)|浏览(113)

本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.getStorefilesSize()方法的一些代码示例,展示了HStore.getStorefilesSize()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.getStorefilesSize()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:getStorefilesSize

HStore.getStorefilesSize介绍

暂无

代码示例

代码示例来源:origin: apache/hbase

@Override
public long getHFilesSize() {
 // Include only StoreFiles which are HFiles
 return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(),
  HStoreFile::isHFile);
}

代码示例来源:origin: apache/hbase

@Override
public long getStorefilesSize() {
 // Include all StoreFiles
 return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), sf -> true);
}

代码示例来源:origin: apache/hbase

/**
 * Writes Puts to the table and flushes few times.
 * @return {@link Pair} of (throughput, duration).
 */
private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
 // Internally, throughput is controlled after every cell write, so keep value size less for
 // better control.
 final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
 Random rand = new Random();
 long duration = 0;
 for (int i = 0; i < NUM_FLUSHES; i++) {
  // Write about 10M (10 times of throughput rate) per iteration.
  for (int j = 0; j < NUM_PUTS; j++) {
   byte[] value = new byte[VALUE_SIZE];
   rand.nextBytes(value);
   table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
  }
  long startTime = System.nanoTime();
  hbtu.getAdmin().flush(tableName);
  duration += System.nanoTime() - startTime;
 }
 HStore store = getStoreWithName(tableName);
 assertEquals(NUM_FLUSHES, store.getStorefilesCount());
 double throughput = (double)store.getStorefilesSize()
   / TimeUnit.NANOSECONDS.toSeconds(duration);
 return new Pair<>(throughput, duration);
}

代码示例来源:origin: apache/hbase

double throughput = (double) store.getStorefilesSize() / duration * 1000;

代码示例来源:origin: apache/hbase

this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf -> true));
this.totalUncompressedBytes.addAndGet(getTotalUmcompressedBytes(hStoreFiles));
this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles);

代码示例来源:origin: apache/hbase

byte[] regionName = region.getRegionInfo().getRegionName();
HStore store = region.getStore(Bytes.toBytes("f"));
long expectedStoreFilesSize = store.getStorefilesSize();
Assert.assertNotNull(store);
Assert.assertEquals(expectedStoreFilesSize, store.getSize());

代码示例来源:origin: apache/hbase

assertTrue(s.getStorefilesSize() > 15*1000);

代码示例来源:origin: apache/hbase

assertEquals(store.getSize(), store.getStorefilesSize());

代码示例来源:origin: apache/hbase

storefiles += store.getStorefilesCount();
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024);
storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Writes Puts to the table and flushes few times.
 * @return {@link Pair} of (throughput, duration).
 */
private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
 // Internally, throughput is controlled after every cell write, so keep value size less for
 // better control.
 final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
 Random rand = new Random();
 long duration = 0;
 for (int i = 0; i < NUM_FLUSHES; i++) {
  // Write about 10M (10 times of throughput rate) per iteration.
  for (int j = 0; j < NUM_PUTS; j++) {
   byte[] value = new byte[VALUE_SIZE];
   rand.nextBytes(value);
   table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
  }
  long startTime = System.nanoTime();
  hbtu.getAdmin().flush(tableName);
  duration += System.nanoTime() - startTime;
 }
 HStore store = getStoreWithName(tableName);
 assertEquals(NUM_FLUSHES, store.getStorefilesCount());
 double throughput = (double)store.getStorefilesSize()
   / TimeUnit.NANOSECONDS.toSeconds(duration);
 return new Pair<>(throughput, duration);
}

代码示例来源:origin: org.apache.hbase/hbase-server

double throughput = (double) store.getStorefilesSize() / duration * 1000;

代码示例来源:origin: org.apache.hbase/hbase-server

byte[] regionName = region.getRegionInfo().getRegionName();
HStore store = region.getStore(Bytes.toBytes("f"));
long expectedStoreFilesSize = store.getStorefilesSize();
Assert.assertNotNull(store);
Assert.assertEquals(expectedStoreFilesSize, store.getSize());

代码示例来源:origin: org.apache.hbase/hbase-server

assertTrue(s.getStorefilesSize() > 15*1000);

代码示例来源:origin: org.apache.hbase/hbase-server

assertEquals(store.getSize(), store.getStorefilesSize());

相关文章

HStore类方法