org.apache.hadoop.hbase.client.Admin.getRegionMetrics()方法的使用及代码示例

x33g5p2x  于2022-01-15 转载在 其他  
字(10.2k)|赞(0)|评价(0)|浏览(268)

本文整理了Java中org.apache.hadoop.hbase.client.Admin.getRegionMetrics()方法的一些代码示例,展示了Admin.getRegionMetrics()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Admin.getRegionMetrics()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Admin
类名称:Admin
方法名:getRegionMetrics

Admin.getRegionMetrics介绍

[英]Get RegionMetrics of all regions hosted on a regionserver.
[中]获取regionserver上承载的所有区域的RegionMetrics。

代码示例

代码示例来源:origin: apache/hbase

/**
 * Get {@link RegionMetrics} of all regions hosted on a regionserver.
 *
 * @param serverName region server from which {@link RegionMetrics} is required.
 * @return a {@link RegionMetrics} list of all regions hosted on a region server
 * @throws IOException if a remote or network exception occurs
 */
default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
 return getRegionMetrics(serverName, null);
}

代码示例来源:origin: apache/hbase

/**
 * Creates mock returning RegionLoad info about given servers.
*/
private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
 Admin mockAdmin = Mockito.mock(Admin.class);
 List<RegionMetrics> regionLoads = new ArrayList<>();
 for (RegionMetrics regionLoad : regionLoadArray) {
  regionLoads.add(regionLoad);
 }
 when(mockAdmin.getConfiguration()).thenReturn(configuration);
 when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable")))
   .thenReturn(regionLoads);
 return mockAdmin;
}

代码示例来源:origin: apache/hbase

private void init(RegionLocator regionLocator, Admin admin)
  throws IOException {
 if (!enabled(admin.getConfiguration())) {
  LOG.info("Region size calculation disabled.");
  return;
 }
 if (regionLocator.getName().isSystemTable()) {
  LOG.info("Region size calculation disabled for system tables.");
  return;
 }
 LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
 // Get the servers which host regions of the table
 Set<ServerName> tableServers = getRegionServersOfTable(regionLocator);
 for (ServerName tableServerName : tableServers) {
  for (RegionMetrics regionLoad : admin.getRegionMetrics(
   tableServerName,regionLocator.getName())) {
   byte[] regionId = regionLoad.getRegionName();
   long regionSizeBytes
    = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE;
   sizeMap.put(regionId, regionSizeBytes);
   if (LOG.isDebugEnabled()) {
    LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
   }
  }
 }
 LOG.debug("Region sizes calculated");
}

代码示例来源:origin: apache/hbase

List<RegionInfo> regions = admin.getRegions(serverName);
Collection<RegionMetrics> regionMetricsList =
  admin.getRegionMetrics(serverName);
checkRegionsAndRegionMetrics(regions, regionMetricsList);
for (ServerName serverName : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
  .getLiveServerMetrics().keySet()) {
 regionMetrics.addAll(admin.getRegionMetrics(serverName, table));
ServerName serverName = entry.getKey();
ServerMetrics serverMetrics = entry.getValue();
List<RegionMetrics> regionMetrics = admin.getRegionMetrics(serverName);
LOG.debug("serverName=" + serverName + ", getRegionLoads=" +
 serverMetrics.getRegionMetrics().keySet().stream().map(r -> Bytes.toString(r)).

代码示例来源:origin: apache/hbase

LOG.info("serverName=" + serverName + ", regions=" +
   regions.stream().map(r -> r.getRegionNameAsString()).collect(Collectors.toList()));
 Collection<RegionLoad> regionLoads = admin.getRegionMetrics(serverName)
  .stream().map(r -> new RegionLoad(r)).collect(Collectors.toList());
 LOG.info("serverName=" + serverName + ", regionLoads=" +
 for (ServerName serverName : admin
   .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet()) {
  regionLoads.addAll(admin.getRegionMetrics(serverName, table)
   .stream().map(r -> new RegionLoad(r)).collect(Collectors.toList()));
for (ServerName serverName : clusterStatus.getServers()) {
 ServerLoad serverLoad = clusterStatus.getLoad(serverName);
 Map<byte[], RegionLoad> regionLoads = admin.getRegionMetrics(serverName).stream()
  .collect(Collectors.toMap(e -> e.getRegionName(), e -> new RegionLoad(e),
   (v1, v2) -> {

代码示例来源:origin: org.apache.hbase/hbase-client

/**
 * Get {@link RegionMetrics} of all regions hosted on a regionserver.
 *
 * @param serverName region server from which {@link RegionMetrics} is required.
 * @return a {@link RegionMetrics} list of all regions hosted on a region server
 * @throws IOException if a remote or network exception occurs
 */
default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
 return getRegionMetrics(serverName, null);
}

代码示例来源:origin: hugegraph/hugegraph

public long storeSize(String table) throws IOException {
  long total = 0;
  try(Admin admin = this.hbase.getAdmin()) {
    for (ServerName rs : admin.getRegionServers()) {
      // NOTE: we can use getLoad() before hbase 2.0
      //ServerLoad load = admin.getClusterStatus().getLoad(rs);
      //total += load.getStorefileSizeMB() * Bytes.MB;
      //total += load.getMemStoreSizeMB() * Bytes.MB;
      TableName tableName = TableName.valueOf(this.namespace, table);
      for (RegionMetrics m : admin.getRegionMetrics(rs, tableName)) {
        total += m.getStoreFileSize().getLongValue();
        total += m.getMemStoreSize().getLongValue();
      }
    }
  }
  return total;
}

代码示例来源:origin: com.aliyun.hbase/alihbase-client

/**
 * Get {@link RegionMetrics} of all regions hosted on a regionserver.
 *
 * @param serverName region server from which {@link RegionMetrics} is required.
 * @return a {@link RegionMetrics} list of all regions hosted on a region server
 * @throws IOException if a remote or network exception occurs
 */
default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
 return getRegionMetrics(serverName, null);
}

代码示例来源:origin: com.aliyun.hbase/alihbase-mapreduce

/**
 * Creates mock returning RegionLoad info about given servers.
*/
private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
 Admin mockAdmin = Mockito.mock(Admin.class);
 List<RegionMetrics> regionLoads = new ArrayList<>();
 for (RegionMetrics regionLoad : regionLoadArray) {
  regionLoads.add(regionLoad);
 }
 when(mockAdmin.getConfiguration()).thenReturn(configuration);
 when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable")))
   .thenReturn(regionLoads);
 return mockAdmin;
}

代码示例来源:origin: org.apache.hbase/hbase-mapreduce

/**
 * Creates mock returning RegionLoad info about given servers.
*/
private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
 Admin mockAdmin = Mockito.mock(Admin.class);
 List<RegionMetrics> regionLoads = new ArrayList<>();
 for (RegionMetrics regionLoad : regionLoadArray) {
  regionLoads.add(regionLoad);
 }
 when(mockAdmin.getConfiguration()).thenReturn(configuration);
 when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable")))
   .thenReturn(regionLoads);
 return mockAdmin;
}

代码示例来源:origin: com.aliyun.hbase/alihbase-mapreduce

private void init(RegionLocator regionLocator, Admin admin)
  throws IOException {
 if (!enabled(admin.getConfiguration())) {
  LOG.info("Region size calculation disabled.");
  return;
 }
 if (regionLocator.getName().isSystemTable()) {
  LOG.info("Region size calculation disabled for system tables.");
  return;
 }
 LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
 // Get the servers which host regions of the table
 Set<ServerName> tableServers = getRegionServersOfTable(regionLocator);
 for (ServerName tableServerName : tableServers) {
  for (RegionMetrics regionLoad : admin.getRegionMetrics(
   tableServerName,regionLocator.getName())) {
   byte[] regionId = regionLoad.getRegionName();
   long regionSizeBytes
    = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE;
   sizeMap.put(regionId, regionSizeBytes);
   if (LOG.isDebugEnabled()) {
    LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
   }
  }
 }
 LOG.debug("Region sizes calculated");
}

代码示例来源:origin: org.apache.hbase/hbase-mapreduce

private void init(RegionLocator regionLocator, Admin admin)
  throws IOException {
 if (!enabled(admin.getConfiguration())) {
  LOG.info("Region size calculation disabled.");
  return;
 }
 if (regionLocator.getName().isSystemTable()) {
  LOG.info("Region size calculation disabled for system tables.");
  return;
 }
 LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
 // Get the servers which host regions of the table
 Set<ServerName> tableServers = getRegionServersOfTable(regionLocator);
 for (ServerName tableServerName : tableServers) {
  for (RegionMetrics regionLoad : admin.getRegionMetrics(
   tableServerName,regionLocator.getName())) {
   byte[] regionId = regionLoad.getRegionName();
   long regionSizeBytes
    = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE;
   sizeMap.put(regionId, regionSizeBytes);
   if (LOG.isDebugEnabled()) {
    LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
   }
  }
 }
 LOG.debug("Region sizes calculated");
}

代码示例来源:origin: org.apache.hbase/hbase-server

List<RegionInfo> regions = admin.getRegions(serverName);
Collection<RegionMetrics> regionMetricsList =
  admin.getRegionMetrics(serverName);
checkRegionsAndRegionMetrics(regions, regionMetricsList);
for (ServerName serverName : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
  .getLiveServerMetrics().keySet()) {
 regionMetrics.addAll(admin.getRegionMetrics(serverName, table));
ServerName serverName = entry.getKey();
ServerMetrics serverMetrics = entry.getValue();
List<RegionMetrics> regionMetrics = admin.getRegionMetrics(serverName);
LOG.debug("serverName=" + serverName + ", getRegionLoads=" +
 serverMetrics.getRegionMetrics().keySet().stream().map(r -> Bytes.toString(r)).

代码示例来源:origin: org.apache.hbase/hbase-server

LOG.info("serverName=" + serverName + ", regions=" +
   regions.stream().map(r -> r.getRegionNameAsString()).collect(Collectors.toList()));
 Collection<RegionLoad> regionLoads = admin.getRegionMetrics(serverName)
  .stream().map(r -> new RegionLoad(r)).collect(Collectors.toList());
 LOG.info("serverName=" + serverName + ", regionLoads=" +
 for (ServerName serverName : admin
   .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet()) {
  regionLoads.addAll(admin.getRegionMetrics(serverName, table)
   .stream().map(r -> new RegionLoad(r)).collect(Collectors.toList()));
for (ServerName serverName : clusterStatus.getServers()) {
 ServerLoad serverLoad = clusterStatus.getLoad(serverName);
 Map<byte[], RegionLoad> regionLoads = admin.getRegionMetrics(serverName).stream()
  .collect(Collectors.toMap(e -> e.getRegionName(), e -> new RegionLoad(e),
   (v1, v2) -> {

相关文章

Admin类方法