本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.compact()
方法的一些代码示例,展示了HBaseTestingUtility.compact()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.compact()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:compact
[英]Compact all of a table's reagion in the mini hbase cluster
[中]在迷你hbase集群中压缩表的所有区域
代码示例来源:origin: apache/hbase
@Override
public void run() {
try {
while (running.get()) {
// flush or compact
if (random.nextBoolean()) {
TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
} else {
HTU.compact(table.getName(), random.nextBoolean());
}
}
} catch (Exception ex) {
LOG.warn(ex.toString(), ex);
exceptions[1].compareAndSet(null, ex);
}
}
};
代码示例来源:origin: apache/hbase
@Test
public void test() throws IOException, InterruptedException {
try (Table t = UTIL.getConnection().getTable(NAME1)) {
writeData(t);
// Flush the data
UTIL.flush(NAME1);
// Issue a compaction
UTIL.compact(NAME1, true);
Thread.sleep(2000);
}
Set<String> jmxMetrics = readJmxMetricsWithRetry();
assertNotNull(jmxMetrics);
long name1TableMetricsCount =
jmxMetrics.stream().filter(metric -> metric.contains("MetaTable_table_" + NAME1)).count();
assertEquals(5L, name1TableMetricsCount);
String putWithClientMetricNameRegex = "MetaTable_client_.+_put_request.*";
long putWithClientMetricsCount =
jmxMetrics.stream().filter(metric -> metric.matches(putWithClientMetricNameRegex))
.count();
assertEquals(5L, putWithClientMetricsCount);
}
代码示例来源:origin: apache/hbase
@Test
public void test() throws IOException, InterruptedException {
try (Table t = UTIL.getConnection().getTable(NAME)) {
writeData(t);
// Flush the data
UTIL.flush(NAME);
// Issue a compaction
UTIL.compact(NAME, true);
Scan s = new Scan();
s.addColumn(FAMILY, QUALIFIER);
try (ResultScanner scanner = t.getScanner(s)) {
for (int i = 0; i < NUM_ROWS; i++) {
Result result = scanner.next();
assertNotNull("The " + (i + 1) + "th result was unexpectedly null", result);
assertEquals(1, result.getFamilyMap(FAMILY).size());
assertArrayEquals(Bytes.toBytes(i + 1), result.getRow());
assertArrayEquals(Bytes.toBytes(replacedValue), result.getValue(FAMILY, QUALIFIER));
}
assertNull(scanner.next());
}
}
}
}
代码示例来源:origin: apache/hbase
TEST_UTIL.compact(true);
for (int i = 20; i < 300; i++) {
byte[] row = Bytes.toBytes("key" + Integer.toString(i));
代码示例来源:origin: apache/hbase
admin.flush(tn);
LOG.info("Synchronously compacting the table");
TEST_UTIL.compact(tn, true);
代码示例来源:origin: apache/hbase
@Test
public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception {
// Tests a table with region replication 3. Writes some data, and causes flushes and
// compactions. Verifies that the data is readable from the replicas. Note that this
// does not test whether the replicas actually pick up flushed files and apply compaction
// to their stores
int regionReplication = 3;
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor htd = HTU.createTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
HTU.getAdmin().createTable(htd);
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
try {
// load the data to the table
for (int i = 0; i < 6000; i += 1000) {
LOG.info("Writing data from " + i + " to " + (i+1000));
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000);
LOG.info("flushing table");
HTU.flush(tableName);
LOG.info("compacting table");
HTU.compact(tableName, false);
}
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
table.close();
connection.close();
}
}
代码示例来源:origin: apache/hbase
util.compact(tableName, true);
assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
assertEquals(refSFCount+1,
代码示例来源:origin: apache/hbase
TEST_UTIL.compact(tableName, true);
table.put(p);
代码示例来源:origin: apache/hbase
admin.flush(tn);
LOG.info("Synchronously compacting the table");
TEST_UTIL.compact(tn, true);
代码示例来源:origin: apache/hbase
util.compact(tableName, true);
assertNotNull(preadScanner.next());
assertNotNull(streamScanner.next());
代码示例来源:origin: apache/hbase
TEST_UTIL.compact(tableName, true);
代码示例来源:origin: apache/hbase
TEST_UTIL.compact(tableName, true);
TEST_UTIL.compact(tableName, true);
代码示例来源:origin: apache/hbase
UTIL2.compact(TABLE_NAME, true);
代码示例来源:origin: apache/hbase
HTU.compact(table.getName(), true);
代码示例来源:origin: apache/hbase
TEST_UTIL.compact(tn, false);
long numFilesAfterMinorCompaction = getNumHFilesForRegion(region);
代码示例来源:origin: apache/hbase
TEST_UTIL.compact(tn, true);
代码示例来源:origin: opencb/opencga
public void flush(String name) throws Exception {
TableName table = TableName.valueOf(name);
utility.get().flush(table);
utility.get().compact(table, true);
}
}
代码示例来源:origin: org.apache.hbase/hbase-server
@Override
public void run() {
try {
while (running.get()) {
// flush or compact
if (random.nextBoolean()) {
TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
} else {
HTU.compact(table.getName(), random.nextBoolean());
}
}
} catch (Exception ex) {
LOG.warn(ex.toString(), ex);
exceptions[1].compareAndSet(null, ex);
}
}
};
代码示例来源:origin: org.apache.hbase/hbase-examples
@Test
public void test() throws IOException, InterruptedException {
try (Table t = UTIL.getConnection().getTable(NAME)) {
writeData(t);
// Flush the data
UTIL.flush(NAME);
// Issue a compaction
UTIL.compact(NAME, true);
Scan s = new Scan();
s.addColumn(FAMILY, QUALIFIER);
try (ResultScanner scanner = t.getScanner(s)) {
for (int i = 0; i < NUM_ROWS; i++) {
Result result = scanner.next();
assertNotNull("The " + (i + 1) + "th result was unexpectedly null", result);
assertEquals(1, result.getFamilyMap(FAMILY).size());
assertArrayEquals(Bytes.toBytes(i + 1), result.getRow());
assertArrayEquals(Bytes.toBytes(replacedValue), result.getValue(FAMILY, QUALIFIER));
}
assertNull(scanner.next());
}
}
}
}
代码示例来源:origin: org.apache.hbase/hbase-server
@Test
public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception {
// Tests a table with region replication 3. Writes some data, and causes flushes and
// compactions. Verifies that the data is readable from the replicas. Note that this
// does not test whether the replicas actually pick up flushed files and apply compaction
// to their stores
int regionReplication = 3;
final TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor htd = HTU.createTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
HTU.getAdmin().createTable(htd);
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
try {
// load the data to the table
for (int i = 0; i < 6000; i += 1000) {
LOG.info("Writing data from " + i + " to " + (i+1000));
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000);
LOG.info("flushing table");
HTU.flush(tableName);
LOG.info("compacting table");
HTU.compact(tableName, false);
}
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
table.close();
connection.close();
}
}
内容来源于网络,如有侵权,请联系作者删除!