org.apache.hadoop.hbase.client.Connection.getRegionLocator()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(15.2k)|赞(0)|评价(0)|浏览(213)

本文整理了Java中org.apache.hadoop.hbase.client.Connection.getRegionLocator()方法的一些代码示例,展示了Connection.getRegionLocator()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Connection.getRegionLocator()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Connection
类名称:Connection
方法名:getRegionLocator

Connection.getRegionLocator介绍

[英]Retrieve a RegionLocator implementation to inspect region information on a table. The returned RegionLocator is not thread-safe, so a new instance should be created for each using thread. This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither required nor desired.
The caller is responsible for calling RegionLocator#close() on the returned RegionLocator instance. RegionLocator needs to be unmanaged
[中]检索RegionLocator实现以检查表上的区域信息。返回的RegionLocator不是线程安全的,因此应该为每个使用线程创建一个新实例。这是一个轻量级操作。不需要也不需要对返回的RegionLocator进行池或缓存。
调用者负责对返回的RegionLocator实例调用RegionLocator#close()。RegionLocator需要处于非托管状态

代码示例

代码示例来源:origin: apache/hbase

/**
 * Retrieve a regionLocator for the table. The user should close the RegionLocator.
 */
public RegionLocator getRegionLocator(byte[] tableName) throws IOException {
 return getCurrentConnection().connection.getRegionLocator(TableName.valueOf(tableName));
}

代码示例来源:origin: apache/hbase

@Override
public void configure(JobConf job) {
 try {
  this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
  TableName tableName = TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE));
  this.locator = this.connection.getRegionLocator(tableName);
 } catch (IOException e) {
  LOG.error(e.toString(), e);
 }
 try {
  this.startKeys = this.locator.getStartKeys();
 } catch (IOException e) {
  LOG.error(e.toString(), e);
 }
}

代码示例来源:origin: apache/hbase

/**
  * Sets the configuration. This is used to determine the start keys for the
  * given table.
  *
  * @param configuration  The configuration to set.
  * @see org.apache.hadoop.conf.Configurable#setConf(
  *   org.apache.hadoop.conf.Configuration)
  */
 @Override
 public void setConf(Configuration configuration) {
  this.conf = HBaseConfiguration.create(configuration);
  try {
   this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(conf));
   TableName tableName = TableName.valueOf(conf.get(TableOutputFormat.OUTPUT_TABLE));
   this.locator = this.connection.getRegionLocator(tableName);
  } catch (IOException e) {
   LOG.error(e.toString(), e);
  }
  try {
   this.startKeys = this.locator.getStartKeys();
  } catch (IOException e) {
   LOG.error(e.toString(), e);
  }
 }
}

代码示例来源:origin: apache/hbase

@Override
protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
 if (conf.get(SPLIT_TABLE) != null) {
  TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE));
  try (Connection conn = ConnectionFactory.createConnection(getConf())) {
   try (RegionLocator rl = conn.getRegionLocator(splitTableName)) {
    return rl.getStartEndKeys();
   }
  }
 }
 return super.getStartEndKeys();
}

代码示例来源:origin: apache/hbase

@Override
 public void setup(Context context) throws IOException {
  cfRenameMap = createCfRenameMap(context.getConfiguration());
  filter = instantiateFilter(context.getConfiguration());
  int reduceNum = context.getNumReduceTasks();
  Configuration conf = context.getConfiguration();
  TableName tableName = TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
  try (Connection conn = ConnectionFactory.createConnection(conf);
    RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
   byte[][] startKeys = regionLocator.getStartKeys();
   if (startKeys.length != reduceNum) {
    throw new IOException("Region split after job initialization");
   }
   CellWritableComparable[] startKeyWraps =
     new CellWritableComparable[startKeys.length - 1];
   for (int i = 1; i < startKeys.length; ++i) {
    startKeyWraps[i - 1] =
      new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
   }
   CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
  }
 }
}

代码示例来源:origin: apache/hbase

if (hfileOutPath != null) {
 LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs);
 TableName tableName = TableName.valueOf(tabName);
 job.setMapperClass(HFileCellMapper.class);
 job.setReducerClass(CellSortReducer.class);
 try (Connection conn = ConnectionFactory.createConnection(conf);
   Table table = conn.getTable(tableName);
   RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
  HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);

代码示例来源:origin: apache/hbase

private void generatePartitions(Path partitionsPath) throws IOException {
 Connection connection = ConnectionFactory.createConnection(getConf());
 Pair<byte[][], byte[][]> regionKeys
  = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys();
 connection.close();
 tableHash.selectPartitions(regionKeys);
 LOG.info("Writing " + tableHash.partitions.size() + " partition keys to " + partitionsPath);
 tableHash.writePartitionFile(getConf(), partitionsPath);
}

代码示例来源:origin: apache/hbase

loadHFiles.setConf(newConf);
TableName tableName = TableName.valueOf(tableNameString);
Table table = this.connection.getTable(tableName);
try (RegionLocator locator = connection.getRegionLocator(tableName)) {

代码示例来源:origin: apache/hbase

@Override
 protected void handleFailure(Counters counters) throws IOException {
  try (Connection conn = ConnectionFactory.createConnection(job.getConfiguration())) {
   TableName tableName = TableName.valueOf(COMMON_TABLE_NAME);
   CounterGroup g = counters.getGroup("undef");
   Iterator<Counter> it = g.iterator();
   while (it.hasNext()) {
    String keyString = it.next().getName();
    byte[] key = Bytes.toBytes(keyString);
    HRegionLocation loc = conn.getRegionLocator(tableName).getRegionLocation(key, true);
    LOG.error("undefined row " + keyString + ", " + loc);
   }
   g = counters.getGroup("unref");
   it = g.iterator();
   while (it.hasNext()) {
    String keyString = it.next().getName();
    byte[] key = Bytes.toBytes(keyString);
    HRegionLocation loc = conn.getRegionLocator(tableName).getRegionLocation(key, true);
    LOG.error("unreferred row " + keyString + ", " + loc);
   }
  }
 }
}

代码示例来源:origin: apache/hbase

public void manualTest(String args[]) throws Exception {
 Configuration conf = HBaseConfiguration.create();
 util = new HBaseTestingUtility(conf);
 if ("newtable".equals(args[0])) {
  TableName tname = TableName.valueOf(args[1]);
  byte[][] splitKeys = generateRandomSplitKeys(4);
  Table table = util.createTable(tname, FAMILIES, splitKeys);
 } else if ("incremental".equals(args[0])) {
  TableName tname = TableName.valueOf(args[1]);
  try(Connection c = ConnectionFactory.createConnection(conf);
    Admin admin = c.getAdmin();
    RegionLocator regionLocator = c.getRegionLocator(tname)) {
   Path outDir = new Path("incremental-out");
   runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin
       .getTableDescriptor(tname), regionLocator)), outDir, false);
  }
 } else {
  throw new RuntimeException(
    "usage: TestHFileOutputFormat2 newtable | incremental");
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testCreateTableNumberOfRegions() throws IOException, InterruptedException {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 HTableDescriptor desc = new HTableDescriptor(tableName);
 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
 admin.createTable(desc);
 List<HRegionLocation> regions;
 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
  regions = l.getAllRegionLocations();
  assertEquals("Table should have only 1 region", 1, regions.size());
 TableName TABLE_2 = TableName.valueOf(tableName.getNameAsString() + "_2");
 desc = new HTableDescriptor(TABLE_2);
 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
 admin.createTable(desc, new byte[][]{new byte[]{42}});
 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_2)) {
  regions = l.getAllRegionLocations();
  assertEquals("Table should have only 2 region", 2, regions.size());
 TableName TABLE_3 = TableName.valueOf(tableName.getNameAsString() + "_3");
 desc = new HTableDescriptor(TABLE_3);
 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
 admin.createTable(desc, "a".getBytes(), "z".getBytes(), 3);
 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_3)) {
  regions = l.getAllRegionLocations();
  assertEquals("Table should have only 3 region", 3, regions.size());
 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
 admin.createTable(desc, new byte[] { 1 }, new byte[] { 127 }, 16);
 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_5)) {

代码示例来源:origin: apache/hbase

final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 },
  new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 },
admin.createTable(desc, splitKeys);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
 List<HRegionLocation> regions = l.getAllRegionLocations();

代码示例来源:origin: apache/hbase

@BeforeClass
public static void before() throws Exception {
 HTU.startMiniCluster(NB_SERVERS);
 final TableName tableName = TableName.valueOf(TestRegionServerNoMaster.class.getSimpleName());
 // Create table then get the single region for our new table.
 table = HTU.createTable(tableName,HConstants.CATALOG_FAMILY);
 Put p = new Put(row);
 p.addColumn(HConstants.CATALOG_FAMILY, row, row);
 table.put(p);
 try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
  hri = locator.getRegionLocation(row, false).getRegionInfo();
 }
 regionName = hri.getRegionName();
 stopMasterAndAssignMeta(HTU);
}

代码示例来源:origin: apache/hbase

final TableName table = TableName.valueOf(name.getMethodName());
byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"),
  Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"),
   RegionLocator locator = connection.getRegionLocator(table);
   Admin admin = connection.getAdmin()) {
  lih.doBulkLoad(bulk, admin, t, locator);

代码示例来源:origin: apache/hbase

@Test
public void testRegionObserverMultiRegion() throws IOException {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    Admin admin = connection.getAdmin()) {
  admin.createTable(
    new HTableDescriptor(tableName)
      .addFamily(new HColumnDescriptor(foo))
      // add the coprocessor for the region
      .addCoprocessor(CustomRegionObserver.class.getName())
    , new byte[][]{foo}); // create with 2 regions
  try (Table table = connection.getTable(tableName);
     RegionLocator locator = connection.getRegionLocator(tableName)) {
   table.get(new Get(bar));
   table.get(new Get(foo)); // 2 gets to 2 separate regions
   assertEquals(2, locator.getAllRegionLocations().size());
   assertNotEquals(locator.getRegionLocation(bar).getRegionInfo(),
     locator.getRegionLocation(foo).getRegionInfo());
  }
 }
 assertPreGetRequestsCounter(CustomRegionObserver.class);
}

代码示例来源:origin: apache/hbase

public void testSmallReversedScanUnderMultiRegions() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 byte[][] splitRows = new byte[][]{
   Bytes.toBytes("000"), Bytes.toBytes("002"), Bytes.toBytes("004"),
 TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
 try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
  assertEquals(splitRows.length + 1, l.getAllRegionLocations().size());

代码示例来源:origin: apache/drill

private void init() {
 logger.debug("Getting region locations");
 TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
 Connection conn = storagePlugin.getConnection();
 try (Admin admin = conn.getAdmin();
    RegionLocator locator = conn.getRegionLocator(tableName)) {
  this.hTableDesc = admin.getTableDescriptor(tableName);
  List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
  statsCalculator = new TableStatsCalculator(conn, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);
  boolean foundStartRegion = false;
  regionsToScan = new TreeMap<>();
  for (HRegionLocation regionLocation : regionLocations) {
   HRegionInfo regionInfo = regionLocation.getRegionInfo();
   if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
    continue;
   }
   foundStartRegion = true;
   regionsToScan.put(regionInfo, regionLocation.getServerName());
   scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
   if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
    break;
   }
  }
 } catch (IOException e) {
  throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
 }
 HBaseUtils.verifyColumns(columns, hTableDesc);
}

代码示例来源:origin: apache/hbase

final TableName table = TableName.valueOf(name.getMethodName());
final AtomicInteger calls = new AtomicInteger(0);
final Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
setupTable(conn, table, 10);
Path dir = buildBulkFiles(table, 1);
lih.doBulkLoad(dir, conn.getAdmin(), conn.getTable(table), conn.getRegionLocator(table));
assertEquals(calls.get(), 2);
util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false);

代码示例来源:origin: apache/hbase

@Test
public void bulkLoadHFileTest() throws Exception {
 final String testName = TestRegionObserverInterface.class.getName() + "." + name.getMethodName();
 final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName());
 Configuration conf = util.getConfiguration();
 Table table = util.createTable(tableName, new byte[][] { A, B, C });
 try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) {
  verifyMethodResult(SimpleRegionObserver.class,
   new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName,
   new Boolean[] { false, false });
  FileSystem fs = util.getTestFileSystem();
  final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(A));
  createHFile(util.getConfiguration(), fs, new Path(familyDir, Bytes.toString(A)), A, A);
  // Bulk load
  new LoadIncrementalHFiles(conf).doBulkLoad(dir, util.getAdmin(), table, locator);
  verifyMethodResult(SimpleRegionObserver.class,
   new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName,
   new Boolean[] { true, true });
 } finally {
  util.deleteTable(tableName);
  table.close();
 }
}

代码示例来源:origin: apache/drill

private void init() {
 logger.debug("Getting region locations");
 TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
 try (Admin admin = formatPlugin.getConnection().getAdmin();
    RegionLocator locator = formatPlugin.getConnection().getRegionLocator(tableName)) {
  hTableDesc = admin.getTableDescriptor(tableName);
  // Fetch tableStats only once and cache it.
  if (tableStats == null) {
   tableStats = new MapRDBTableStats(getHBaseConf(), hbaseScanSpec.getTableName());
  }
  boolean foundStartRegion = false;
  final TreeMap<TabletFragmentInfo, String> regionsToScan = new TreeMap<TabletFragmentInfo, String>();
  List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
  for (HRegionLocation regionLocation : regionLocations) {
   HRegionInfo regionInfo = regionLocation.getRegionInfo();
   if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
    continue;
   }
   foundStartRegion = true;
   regionsToScan.put(new TabletFragmentInfo(regionInfo), regionLocation.getHostname());
   if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
    break;
   }
  }
  setRegionsToScan(regionsToScan);
 } catch (Exception e) {
  throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
 }
 HBaseUtils.verifyColumns(columns, hTableDesc);
}

相关文章