本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.getStorefilesCount()
方法的一些代码示例,展示了HStore.getStorefilesCount()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.getStorefilesCount()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:getStorefilesCount
暂无
代码示例来源:origin: apache/hbase
@Override
public boolean hasTooManyStoreFiles() {
return getStorefilesCount() > this.blockingFileCount;
}
代码示例来源:origin: apache/hbase
@Override
public boolean evaluate() throws Exception {
return store.getStorefilesCount() == 1;
}
代码示例来源:origin: apache/hbase
@Override
public String explainFailure() throws Exception {
return "The store file count " + store.getStorefilesCount() + " is still greater than 1";
}
});
代码示例来源:origin: apache/hbase
public int countStoreFiles() {
int count = 0;
for (HStore store : stores.values()) {
count += store.getStorefilesCount();
}
return count;
}
}
代码示例来源:origin: apache/hbase
private void waitForStoreFileCount(HStore store, int count, int timeout)
throws InterruptedException {
long start = System.currentTimeMillis();
while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) {
Thread.sleep(100);
}
System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" +
store.getStorefilesCount());
assertEquals(count, store.getStorefilesCount());
}
代码示例来源:origin: apache/hbase
private void waitForStoreFileCount(HStore store, int count, int timeout)
throws InterruptedException {
long start = System.currentTimeMillis();
while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) {
Thread.sleep(100);
}
System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" +
store.getStorefilesCount());
assertEquals(count, store.getStorefilesCount());
}
代码示例来源:origin: apache/hbase
private int countStoreFiles() throws IOException {
HStore store = region.getStore(COLUMN_FAMILY);
return store.getStorefilesCount();
}
代码示例来源:origin: apache/hbase
private long testCompactionWithoutThroughputLimit() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
NoLimitThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
HStore store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
代码示例来源:origin: apache/hbase
try {
HStore store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
代码示例来源:origin: apache/hbase
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
// create two hfiles in the region
createHFileInRegion(region, family);
createHFileInRegion(region, family);
HStore s = region.getStore(family);
int count = s.getStorefilesCount();
assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
count >= 2);
// compact the two files into one file to get files in the archive
LOG.debug("Compacting stores");
region.compact(true);
}
代码示例来源:origin: apache/hbase
@Test
public void testRefreshStoreFilesNotChanged() throws IOException {
init(name.getMethodName());
assertEquals(0, this.store.getStorefilesCount());
// add some data, flush
this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null);
flush(1);
// add one more file
addStoreFile();
HStore spiedStore = spy(store);
// call first time after files changed
spiedStore.refreshStoreFiles();
assertEquals(2, this.store.getStorefilesCount());
verify(spiedStore, times(1)).replaceStoreFiles(any(), any());
// call second time
spiedStore.refreshStoreFiles();
//ensure that replaceStoreFiles is not called if files are not refreshed
verify(spiedStore, times(0)).replaceStoreFiles(null, null);
}
代码示例来源:origin: apache/hbase
/**
* Writes Puts to the table and flushes few times.
* @return {@link Pair} of (throughput, duration).
*/
private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
// Internally, throughput is controlled after every cell write, so keep value size less for
// better control.
final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
Random rand = new Random();
long duration = 0;
for (int i = 0; i < NUM_FLUSHES; i++) {
// Write about 10M (10 times of throughput rate) per iteration.
for (int j = 0; j < NUM_PUTS; j++) {
byte[] value = new byte[VALUE_SIZE];
rand.nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
}
long startTime = System.nanoTime();
hbtu.getAdmin().flush(tableName);
duration += System.nanoTime() - startTime;
}
HStore store = getStoreWithName(tableName);
assertEquals(NUM_FLUSHES, store.getStorefilesCount());
double throughput = (double)store.getStorefilesSize()
/ TimeUnit.NANOSECONDS.toSeconds(duration);
return new Pair<>(throughput, duration);
}
代码示例来源:origin: apache/hbase
int initialFiles = s.getStorefilesCount();
assertEquals(initialFiles, s.getStorefilesCount());
代码示例来源:origin: apache/hbase
init(name.getMethodName());
assertEquals(0, this.store.getStorefilesCount());
assertEquals(0, this.store.getStorefilesCount());
assertEquals(1, this.store.getStorefilesCount());
assertEquals(1, this.store.getStorefilesCount());
store.refreshStoreFiles();
assertEquals(2, this.store.getStorefilesCount());
addStoreFile();
assertEquals(2, this.store.getStorefilesCount());
store.refreshStoreFiles();
assertEquals(5, this.store.getStorefilesCount());
assertEquals(5, this.store.getStorefilesCount());
store.refreshStoreFiles();
assertEquals(4, this.store.getStorefilesCount());
assertEquals(4, this.store.getStorefilesCount());
store.refreshStoreFiles();
assertEquals(1, this.store.getStorefilesCount());
assertEquals(0, this.store.getStorefilesCount());
代码示例来源:origin: apache/hbase
@Test
public void test() throws Exception {
// sleep every 10 loops to give memstore compaction enough time to finish before reaching the
// flush size.
doIncrement(10);
assertSum();
HStore store = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0).getStore(FAMILY);
// should have no store files created as we have done aggregating all in memory
assertEquals(0, store.getStorefilesCount());
}
}
代码示例来源:origin: apache/hbase
private static void waitForCompaction(TableName tableName)
throws IOException, InterruptedException {
boolean compacted = false;
for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName)
.getRegions(tableName)) {
for (HStore store : ((HRegion) region).getStores()) {
compacted = false;
while (!compacted) {
if (store.getStorefiles() != null) {
while (store.getStorefilesCount() != 1) {
Thread.sleep(100);
}
for (HStoreFile storefile : store.getStorefiles()) {
if (!storefile.isCompactedAway()) {
compacted = true;
break;
}
Thread.sleep(100);
}
} else {
break;
}
}
}
}
}
代码示例来源:origin: apache/hbase
@Test
public void testPurgeExpiredFiles() throws Exception {
HStore store = prepareData();
assertEquals(10, store.getStorefilesCount());
TEST_UTIL.getAdmin().majorCompact(tableName);
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return store.getStorefilesCount() == 1;
}
@Override
public String explainFailure() throws Exception {
return "The store file count " + store.getStorefilesCount() + " is still greater than 1";
}
});
}
代码示例来源:origin: apache/hbase
@Test
public void test() throws IOException, KeeperException, InterruptedException {
long now = System.currentTimeMillis();
put(0, 100, now - 10000);
assertValueEquals(0, 100);
setExpireBefore(now - 5000);
Thread.sleep(5000);
UTIL.getAdmin().flush(NAME);
assertNotExists(0, 100);
put(0, 50, now - 1000);
UTIL.getAdmin().flush(NAME);
put(50, 100, now - 100);
UTIL.getAdmin().flush(NAME);
assertValueEquals(0, 100);
setExpireBefore(now - 500);
Thread.sleep(5000);
UTIL.getAdmin().majorCompact(NAME);
UTIL.waitFor(30000, () -> UTIL.getHBaseCluster().getRegions(NAME).iterator().next()
.getStore(FAMILY).getStorefilesCount() == 1);
assertNotExists(0, 50);
assertValueEquals(50, 100);
}
}
代码示例来源:origin: apache/hbase
assertEquals(2, this.store.getStorefilesCount());
代码示例来源:origin: apache/hbase
@Test
public void test() throws Exception {
doIncrement(0);
assertSum();
// we do not hack scan operation so using scan we could get the original values added into the
// table.
try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW)
.withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) {
Result r = scanner.next();
assertTrue(r.rawCells().length > 2);
}
UTIL.flush(NAME);
HRegion region = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0);
HStore store = region.getStore(FAMILY);
for (;;) {
region.compact(true);
if (store.getStorefilesCount() == 1) {
break;
}
}
assertSum();
// Should only have two cells after flush and major compaction
try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW)
.withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) {
Result r = scanner.next();
assertEquals(2, r.rawCells().length);
}
}
}
内容来源于网络,如有侵权,请联系作者删除!