本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.getNumHFiles()
方法的一些代码示例,展示了HBaseTestingUtility.getNumHFiles()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.getNumHFiles()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:getNumHFiles
暂无
代码示例来源:origin: apache/hbase
@Test
public void testRefreshRegionHFilesEndpoint() throws Exception {
setUp(HRegion.class.getName());
addHFilesToRegions();
assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
callRefreshRegionHFilesEndPoint();
assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
}
代码示例来源:origin: apache/hbase
@Test
public void testRefreshHFilesClient() throws Exception {
addHFilesToRegions();
assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
RefreshHFilesClient tool = new RefreshHFilesClient(HTU.getConfiguration());
assertEquals(0, ToolRunner.run(tool, new String[] { TABLE_NAME.getNameAsString() }));
assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
}
}
代码示例来源:origin: apache/hbase
@Test public void testCompactingATable() throws Exception {
TableName tableName = TableName.valueOf("TestMajorCompactor");
utility.createMultiRegionTable(tableName, FAMILY, 5);
utility.waitTableAvailable(tableName);
Connection connection = utility.getConnection();
Table table = connection.getTable(tableName);
// write data and flush multiple store files:
for (int i = 0; i < 5; i++) {
utility.loadRandomRows(table, FAMILY, 50, 100);
utility.flush(tableName);
}
table.close();
int numberOfRegions = utility.getAdmin().getRegions(tableName).size();
int numHFiles = utility.getNumHFiles(tableName, FAMILY);
// we should have a table with more store files than we would before we major compacted.
assertTrue(numberOfRegions < numHFiles);
MajorCompactor compactor =
new MajorCompactor(utility.getConfiguration(), tableName,
Sets.newHashSet(Bytes.toString(FAMILY)), 1, System.currentTimeMillis(), 200);
compactor.initializeWorkQueues();
compactor.compactAllRegions();
compactor.shutdown();
// verify that the store has been completely major compacted.
numberOfRegions = utility.getAdmin().getRegions(tableName).size();
numHFiles = utility.getNumHFiles(tableName, FAMILY);
assertEquals(numHFiles, numberOfRegions);
}
}
代码示例来源:origin: com.aliyun.hbase/alihbase-examples
@Test
public void testRefreshRegionHFilesEndpoint() throws Exception {
setUp(HRegion.class.getName());
MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME);
for (Region region : cluster.getRegions(TABLE_NAME)) {
Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
HFileTestUtil
.createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
}
assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
callRefreshRegionHFilesEndPoint();
assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
}
代码示例来源:origin: org.apache.hbase/hbase-examples
@Test
public void testRefreshRegionHFilesEndpoint() throws Exception {
setUp(HRegion.class.getName());
MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME);
for (Region region : cluster.getRegions(TABLE_NAME)) {
Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
HFileTestUtil
.createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
}
assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
callRefreshRegionHFilesEndPoint();
assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
}
代码示例来源:origin: org.apache.hbase/hbase-server
@Test public void testCompactingATable() throws Exception {
TableName tableName = TableName.valueOf("TestMajorCompactor");
utility.createMultiRegionTable(tableName, FAMILY, 5);
utility.waitTableAvailable(tableName);
Connection connection = utility.getConnection();
Table table = connection.getTable(tableName);
// write data and flush multiple store files:
for (int i = 0; i < 5; i++) {
utility.loadRandomRows(table, FAMILY, 50, 100);
utility.flush(tableName);
}
table.close();
int numberOfRegions = utility.getAdmin().getRegions(tableName).size();
int numHFiles = utility.getNumHFiles(tableName, FAMILY);
// we should have a table with more store files than we would before we major compacted.
assertTrue(numberOfRegions < numHFiles);
MajorCompactor compactor =
new MajorCompactor(utility.getConfiguration(), tableName,
Sets.newHashSet(Bytes.toString(FAMILY)), 1, System.currentTimeMillis(), 200);
compactor.initializeWorkQueues();
compactor.compactAllRegions();
compactor.shutdown();
// verify that the store has been completely major compacted.
numberOfRegions = utility.getAdmin().getRegions(tableName).size();
numHFiles = utility.getNumHFiles(tableName, FAMILY);
assertEquals(numHFiles, numberOfRegions);
}
}
内容来源于网络,如有侵权,请联系作者删除!