本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.getHBaseClusterInterface()
方法的一些代码示例,展示了HBaseTestingUtility.getHBaseClusterInterface()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.getHBaseClusterInterface()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:getHBaseClusterInterface
[英]Returns the HBaseCluster instance.
Returned object can be any of the subclasses of HBaseCluster, and the tests referring this should not assume that the cluster is a mini cluster or a distributed one. If the test only works on a mini cluster, then specific method #getMiniHBaseCluster() can be used instead w/o the need to type-cast.
[中]返回HBaseCluster实例。
返回的对象可以是HBaseCluster的任何子类,引用该类的测试不应假定该集群是小型集群或分布式集群。如果测试仅在小型集群上工作,则可以使用特定的方法#getMiniHBaseCluster(),而无需键入cast。
代码示例来源:origin: apache/hbase
public Map<TableName, Map<ServerName, List<String>>> getTableServerRegionMap()
throws IOException {
Map<TableName, Map<ServerName, List<String>>> map = Maps.newTreeMap();
ClusterMetrics status = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics();
for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
ServerName serverName = entry.getKey();
for(RegionMetrics rl : entry.getValue().getRegionMetrics().values()) {
TableName tableName = null;
try {
tableName = RegionInfo.getTable(rl.getRegionName());
} catch (IllegalArgumentException e) {
LOG.warn("Failed parse a table name from regionname=" +
Bytes.toStringBinary(rl.getRegionName()));
continue;
}
if(!map.containsKey(tableName)) {
map.put(tableName, new TreeMap<>());
}
if(!map.get(tableName).containsKey(serverName)) {
map.get(tableName).put(serverName, new LinkedList<>());
}
map.get(tableName).get(serverName).add(rl.getNameAsString());
}
}
return map;
}
代码示例来源:origin: apache/hbase
private void stopMasterAndValidateReplicaCount(final int originalReplicaCount,
final int newReplicaCount)
throws Exception {
ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics().getMasterName();
TEST_UTIL.getHBaseClusterInterface().stopMaster(sn);
TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(sn, 60000);
List<String> metaZnodes = TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes();
assert(metaZnodes.size() == originalReplicaCount); //we should have what was configured before
TEST_UTIL.getHBaseClusterInterface().getConf().setInt(HConstants.META_REPLICAS_NUM,
newReplicaCount);
if (TEST_UTIL.getHBaseCluster().countServedRegions() < newReplicaCount) {
TEST_UTIL.getHBaseCluster().startRegionServer();
}
TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
TEST_UTIL.waitFor(10000, predicateMetaHasReplicas(newReplicaCount));
// also check if hbck returns without errors
TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM,
newReplicaCount);
HBaseFsck hbck = HbckTestingUtil.doFsck(TEST_UTIL.getConfiguration(), false);
HbckTestingUtil.assertNoErrors(hbck);
}
代码示例来源:origin: apache/hbase
@Before
public void beforeMethod() throws Exception {
if(!initialized) {
LOG.info("Setting up IntegrationTestRSGroup");
LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers");
TEST_UTIL = new IntegrationTestingUtility();
TEST_UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE);
//set shared configs
admin = TEST_UTIL.getAdmin();
cluster = TEST_UTIL.getHBaseClusterInterface();
rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()),
TEST_UTIL.getConfiguration());
LOG.info("Done initializing cluster");
initialized = true;
//cluster may not be clean
//cleanup when initializing
afterMethod();
}
}
代码示例来源:origin: apache/hbase
ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," +
Bytes.toLong(startCode));
if (!getHBaseClusterInterface().isDistributedCluster() &&
getHBaseCluster().isKilledRS(serverName)) {
return false;
代码示例来源:origin: apache/hbase
@Test
public void testShutdownOfReplicaHolder() throws Exception {
// checks that the when the server holding meta replica is shut down, the meta replica
// can be recovered
try (ClusterConnection conn = (ClusterConnection)
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
RegionLocations rl = conn.
locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
HRegionLocation hrl = rl.getRegionLocation(1);
ServerName oldServer = hrl.getServerName();
TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
int i = 0;
do {
LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up");
Thread.sleep(10000); //wait for the detection/recovery
rl = conn.locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
hrl = rl.getRegionLocation(1);
i++;
} while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3);
assertTrue(i != 3);
}
}
代码示例来源:origin: apache/hbase
master = util.getHBaseClusterInterface().getClusterMetrics().getMasterName();
util.getHBaseClusterInterface().stopMaster(master);
util.getHBaseClusterInterface().waitForMasterToStop(master, 60000);
LOG.info("Master " + master + " stopped!");
if (!master.equals(primary)) {
util.getHBaseClusterInterface().killRegionServer(primary);
util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000);
util.getHBaseClusterInterface().startMaster(master.getHostname(), 0);
util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0);
util.getHBaseClusterInterface().waitForActiveAndReadyMaster();
LOG.info("Master active!");
((ClusterConnection)c).clearRegionCache();
代码示例来源:origin: apache/hbase
if (!getHBaseClusterInterface().isDistributedCluster()) {
代码示例来源:origin: apache/hbase
ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics().getMasterName();
TEST_UTIL.getHBaseClusterInterface().stopMaster(master);
TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(master, 30000);
TEST_UTIL.getHBaseClusterInterface().startMaster(master.getHostname(), master.getPort());
TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
for (int i = 0; i < numRegions; i++) {
for (int j = 0; j < numReplica; j++) {
代码示例来源:origin: org.apache.hbase/hbase-rsgroup
public Map<TableName, Map<ServerName, List<String>>> getTableServerRegionMap()
throws IOException {
Map<TableName, Map<ServerName, List<String>>> map = Maps.newTreeMap();
ClusterMetrics status = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics();
for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
ServerName serverName = entry.getKey();
for(RegionMetrics rl : entry.getValue().getRegionMetrics().values()) {
TableName tableName = null;
try {
tableName = RegionInfo.getTable(rl.getRegionName());
} catch (IllegalArgumentException e) {
LOG.warn("Failed parse a table name from regionname=" +
Bytes.toStringBinary(rl.getRegionName()));
continue;
}
if(!map.containsKey(tableName)) {
map.put(tableName, new TreeMap<>());
}
if(!map.get(tableName).containsKey(serverName)) {
map.get(tableName).put(serverName, new LinkedList<>());
}
map.get(tableName).get(serverName).add(rl.getNameAsString());
}
}
return map;
}
代码示例来源:origin: org.apache.hbase/hbase-server
private void stopMasterAndValidateReplicaCount(final int originalReplicaCount,
final int newReplicaCount)
throws Exception {
ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics().getMasterName();
TEST_UTIL.getHBaseClusterInterface().stopMaster(sn);
TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(sn, 60000);
List<String> metaZnodes = TEST_UTIL.getZooKeeperWatcher().getMetaReplicaNodes();
assert(metaZnodes.size() == originalReplicaCount); //we should have what was configured before
TEST_UTIL.getHBaseClusterInterface().getConf().setInt(HConstants.META_REPLICAS_NUM,
newReplicaCount);
if (TEST_UTIL.getHBaseCluster().countServedRegions() < newReplicaCount) {
TEST_UTIL.getHBaseCluster().startRegionServer();
}
TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
TEST_UTIL.waitFor(10000, predicateMetaHasReplicas(newReplicaCount));
// also check if hbck returns without errors
TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM,
newReplicaCount);
HBaseFsck hbck = HbckTestingUtil.doFsck(TEST_UTIL.getConfiguration(), false);
HbckTestingUtil.assertNoErrors(hbck);
}
代码示例来源:origin: org.apache.hbase/hbase-it
@Before
public void beforeMethod() throws Exception {
if(!initialized) {
LOG.info("Setting up IntegrationTestRSGroup");
LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers");
TEST_UTIL = new IntegrationTestingUtility();
TEST_UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE);
//set shared configs
admin = TEST_UTIL.getAdmin();
cluster = TEST_UTIL.getHBaseClusterInterface();
rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()),
TEST_UTIL.getConfiguration());
LOG.info("Done initializing cluster");
initialized = true;
//cluster may not be clean
//cleanup when initializing
afterMethod();
}
}
代码示例来源:origin: org.apache.hbase/hbase-server
ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," +
Bytes.toLong(startCode));
if (!getHBaseClusterInterface().isDistributedCluster() &&
getHBaseCluster().isKilledRS(serverName)) {
return false;
代码示例来源:origin: org.apache.hbase/hbase-server
@Test
public void testShutdownOfReplicaHolder() throws Exception {
// checks that the when the server holding meta replica is shut down, the meta replica
// can be recovered
try (ClusterConnection conn = (ClusterConnection)
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());) {
RegionLocations rl = conn.
locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
HRegionLocation hrl = rl.getRegionLocation(1);
ServerName oldServer = hrl.getServerName();
TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
int i = 0;
do {
LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up");
Thread.sleep(10000); //wait for the detection/recovery
rl = conn.locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
hrl = rl.getRegionLocation(1);
i++;
} while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3);
assertTrue(i != 3);
}
}
代码示例来源:origin: org.apache.hbase/hbase-server
master = util.getHBaseClusterInterface().getClusterMetrics().getMasterName();
util.getHBaseClusterInterface().stopMaster(master);
util.getHBaseClusterInterface().waitForMasterToStop(master, 60000);
LOG.info("Master " + master + " stopped!");
if (!master.equals(primary)) {
util.getHBaseClusterInterface().killRegionServer(primary);
util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000);
util.getHBaseClusterInterface().startMaster(master.getHostname(), 0);
util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0);
util.getHBaseClusterInterface().waitForActiveAndReadyMaster();
LOG.info("Master active!");
((ClusterConnection)c).clearRegionCache();
代码示例来源:origin: org.apache.hbase/hbase-server
if (!getHBaseClusterInterface().isDistributedCluster()) {
代码示例来源:origin: org.apache.hbase/hbase-server
ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterMetrics().getMasterName();
TEST_UTIL.getHBaseClusterInterface().stopMaster(master);
TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(master, 30000);
TEST_UTIL.getHBaseClusterInterface().startMaster(master.getHostname(), master.getPort());
TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
for (int i = 0; i < numRegions; i++) {
for (int j = 0; j < numReplica; j++) {
内容来源于网络,如有侵权,请联系作者删除!