org.apache.hadoop.hbase.HBaseTestingUtility.restartHBaseCluster()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(88)

本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.restartHBaseCluster()方法的一些代码示例,展示了HBaseTestingUtility.restartHBaseCluster()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.restartHBaseCluster()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:restartHBaseCluster

HBaseTestingUtility.restartHBaseCluster介绍

[英]Starts the hbase cluster up again after shutting it down previously in a test. Use this if you want to keep dfs/zk up and just stop/start hbase.
[中]在之前的测试中关闭hbase群集后,再次启动该群集。如果您想保持dfs/zk正常运行,只需停止/启动hbase,请使用此选项。

代码示例

代码示例来源:origin: apache/hbase

/**
 * Starts the hbase cluster up again after shutting it down previously in a
 * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
 * @param servers number of region servers
 */
public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
 this.restartHBaseCluster(servers, null);
}

代码示例来源:origin: apache/hbase

private void restartHBaseCluster() throws Exception {
  LOG.info("\n\nShutting down cluster");
  TEST_UTIL.shutdownMiniHBaseCluster();
  LOG.info("\n\nSleeping a bit");
  Thread.sleep(2000);
  TEST_UTIL.restartHBaseCluster(NUM_SLAVES_BASE - 1);
  initialize();
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testConnectionRideOverClusterRestart() throws IOException, InterruptedException {
 Configuration config = new Configuration(TEST_UTIL.getConfiguration());
 final TableName tableName = TableName.valueOf(name.getMethodName());
 TEST_UTIL.createTable(tableName, new byte[][] {FAM_NAM}).close();
 Connection connection = ConnectionFactory.createConnection(config);
 Table table = connection.getTable(tableName);
 // this will cache the meta location and table's region location
 table.get(new Get(Bytes.toBytes("foo")));
 // restart HBase
 TEST_UTIL.shutdownMiniHBaseCluster();
 TEST_UTIL.restartHBaseCluster(2);
 // this should be able to discover new locations for meta and table's region
 table.get(new Get(Bytes.toBytes("foo")));
 TEST_UTIL.deleteTable(tableName);
 table.close();
 connection.close();
}

代码示例来源:origin: apache/hbase

private void mimicSyncUpAfterPut() throws Exception {
 LOG.debug("mimicSyncUpAfterPut");
 utility1.restartHBaseCluster(1);
 utility2.shutdownMiniHBaseCluster();
 utility2.restartHBaseCluster(1);
   if (rowCount_ht1TargetAtPeer1 != 100 || rowCount_ht2TargetAtPeer1 != 200) {
    utility1.restartHBaseCluster(1);
    rowCount_ht1Source = utility1.countRows(ht1Source);
    LOG.debug("t1_syncup should have 102 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: apache/hbase

utility2.restartHBaseCluster(1);
  if (rowCount_ht1TargetAtPeer1 != 50 || rowCount_ht2TargetAtPeer1 != 100) {
   utility1.restartHBaseCluster(1);
   rowCount_ht1Source = utility1.countRows(ht1Source);
   LOG.debug("t1_syncup should have 51 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: apache/hbase

@Test
public void testFlushedSequenceIdPersistLoad() throws Exception {
 Configuration conf = TEST_UTIL.getConfiguration();
 int msgInterval = conf.getInt("hbase.regionserver.msginterval", 100);
 // insert some data into META
 TableName tableName = TableName.valueOf("testFlushSeqId");
 HTableDescriptor desc = new HTableDescriptor(tableName);
 desc.addFamily(new HColumnDescriptor(Bytes.toBytes("cf")));
 Table table = TEST_UTIL.createTable(desc, null);
 // flush META region
 TEST_UTIL.flush(TableName.META_TABLE_NAME);
 // wait for regionserver report
 Threads.sleep(msgInterval * 2);
 // record flush seqid before cluster shutdown
 Map<byte[], Long> regionMapBefore =
   TEST_UTIL.getHBaseCluster().getMaster().getServerManager()
     .getFlushedSequenceIdByRegion();
 // restart hbase cluster which will cause flushed sequence id persist and reload
 TEST_UTIL.getMiniHBaseCluster().shutdown();
 TEST_UTIL.restartHBaseCluster(2);
 TEST_UTIL.waitUntilNoRegionsInTransition();
 // check equality after reloading flushed sequence id map
 Map<byte[], Long> regionMapAfter =
   TEST_UTIL.getHBaseCluster().getMaster().getServerManager()
     .getFlushedSequenceIdByRegion();
 assertTrue(regionMapBefore.equals(regionMapAfter));
}

代码示例来源:origin: apache/hbase

UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
  FavoredStochasticBalancer.class.getName());
UTIL.restartHBaseCluster(SLAVES);
while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
 Threads.sleep(1);

代码示例来源:origin: apache/hbase

UTIL.restartHBaseCluster(3);

代码示例来源:origin: apache/hbase

utility2.restartHBaseCluster(1);
  if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) {
   utility1.restartHBaseCluster(1);
   rowCount_ht1Source = utility1.countRows(ht1Source);
   LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: apache/drill

@Test
public void testHBaseConnectionManager() throws Exception{
 setColumnWidth(8);
 runHBaseSQLVerifyCount("SELECT\n"
   + "row_key\n"
   + "FROM\n"
   + "  hbase.`[TABLE_NAME]` tableName",
   8);
 /*
  * Simulate HBase connection close and ensure that the connection
  * will be reestablished automatically.
  */
 storagePlugin.getConnection().close();
 runHBaseSQLVerifyCount("SELECT\n"
   + "row_key\n"
   + "FROM\n"
   + "  hbase.`[TABLE_NAME]` tableName",
   8);
 /*
  * Simulate HBase cluster restart and ensure that running query against
  * HBase does not require Drill cluster restart.
  */
 HBaseTestsSuite.getHBaseTestingUtility().shutdownMiniHBaseCluster();
 HBaseTestsSuite.getHBaseTestingUtility().restartHBaseCluster(1);
 runHBaseSQLVerifyCount("SELECT\n"
   + "row_key\n"
   + "FROM\n"
   + "  hbase.`[TABLE_NAME]` tableName",
   8);
}

代码示例来源:origin: apache/hbase

UTIL.getHBaseCluster().waitUntilShutDown();
LOG.info("Starting cluster the second time");
UTIL.restartHBaseCluster(3, ports);
UTIL.waitFor(10000, () -> UTIL.getHBaseCluster().getMaster().isInitialized());
serverNode = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()

代码示例来源:origin: org.apache.hbase/hbase-rsgroup

private void restartHBaseCluster() throws Exception {
  LOG.info("\n\nShutting down cluster");
  TEST_UTIL.shutdownMiniHBaseCluster();
  LOG.info("\n\nSleeping a bit");
  Thread.sleep(2000);
  TEST_UTIL.restartHBaseCluster(NUM_SLAVES_BASE - 1);
  initialize();
 }
}

代码示例来源:origin: org.apache.hbase/hbase-server

@Test
public void testConnectionRideOverClusterRestart() throws IOException, InterruptedException {
 Configuration config = new Configuration(TEST_UTIL.getConfiguration());
 final TableName tableName = TableName.valueOf(name.getMethodName());
 TEST_UTIL.createTable(tableName, new byte[][] {FAM_NAM}).close();
 Connection connection = ConnectionFactory.createConnection(config);
 Table table = connection.getTable(tableName);
 // this will cache the meta location and table's region location
 table.get(new Get(Bytes.toBytes("foo")));
 // restart HBase
 TEST_UTIL.shutdownMiniHBaseCluster();
 TEST_UTIL.restartHBaseCluster(2);
 // this should be able to discover new locations for meta and table's region
 table.get(new Get(Bytes.toBytes("foo")));
 TEST_UTIL.deleteTable(tableName);
 table.close();
 connection.close();
}

代码示例来源:origin: org.apache.hbase/hbase-server

private void mimicSyncUpAfterPut() throws Exception {
 LOG.debug("mimicSyncUpAfterPut");
 utility1.restartHBaseCluster(1);
 utility2.shutdownMiniHBaseCluster();
 utility2.restartHBaseCluster(1);
   if (rowCount_ht1TargetAtPeer1 != 100 || rowCount_ht2TargetAtPeer1 != 200) {
    utility1.restartHBaseCluster(1);
    rowCount_ht1Source = utility1.countRows(ht1Source);
    LOG.debug("t1_syncup should have 102 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: org.apache.hbase/hbase-server

utility2.restartHBaseCluster(1);
  if (rowCount_ht1TargetAtPeer1 != 50 || rowCount_ht2TargetAtPeer1 != 100) {
   utility1.restartHBaseCluster(1);
   rowCount_ht1Source = utility1.countRows(ht1Source);
   LOG.debug("t1_syncup should have 51 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: org.apache.hbase/hbase-server

UTIL.restartHBaseCluster(3);

代码示例来源:origin: org.apache.hbase/hbase-server

UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
  FavoredStochasticBalancer.class.getName());
UTIL.restartHBaseCluster(SLAVES);
while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
 Threads.sleep(1);

代码示例来源:origin: org.apache.hbase/hbase-endpoint

utility2.restartHBaseCluster(1);
  if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) {
   utility1.restartHBaseCluster(1);
   rowCount_ht1Source = utility1.countRows(ht1Source);
   LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: com.aliyun.hbase/alihbase-endpoint

utility2.restartHBaseCluster(1);
  if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) {
   utility1.restartHBaseCluster(1);
   rowCount_ht1Source = utility1.countRows(ht1Source);
   LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source);

代码示例来源:origin: dremio/dremio-oss

HBaseTestsSuite.getHBaseTestingUtility().restartHBaseCluster(1);
runHBaseSQLVerifyCount("SELECT\n"
  + "row_key\n"

相关文章

HBaseTestingUtility类方法