本文整理了Java中org.apache.hadoop.hbase.client.Table.getDescriptor()
方法的一些代码示例,展示了Table.getDescriptor()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getDescriptor()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Table
类名称:Table
方法名:getDescriptor
[英]Gets the org.apache.hadoop.hbase.client.TableDescriptor for this table.
[中]获取组织。阿帕奇。hadoop。hbase。客户此表的TableDescriptor。
代码示例来源:origin: apache/hbase
/**
* Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
* @throws java.io.IOException if a remote or network exception occurs.
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getDescriptor()}
*/
@Deprecated
default HTableDescriptor getTableDescriptor() throws IOException {
TableDescriptor descriptor = getDescriptor();
if (descriptor instanceof HTableDescriptor) {
return (HTableDescriptor)descriptor;
} else {
return new HTableDescriptor(descriptor);
}
}
代码示例来源:origin: apache/hbase
/**
* Configure a MapReduce Job to perform an incremental load into the given
* table. This
* <ul>
* <li>Inspects the table to configure a total order partitioner</li>
* <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
* <li>Sets the number of reduce tasks to match the current number of regions</li>
* <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
* <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
* PutSortReducer)</li>
* </ul>
* The user should be sure to set the map output value class to either KeyValue or Put before
* running this function.
*/
public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator)
throws IOException {
configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
}
代码示例来源:origin: apache/hbase
/**
* Checks whether there is any invalid family name in HFiles to be bulk loaded.
*/
private void validateFamiliesInHFiles(Table table, Deque<LoadQueueItem> queue, boolean silence)
throws IOException {
Set<String> familyNames = Arrays.asList(table.getDescriptor().getColumnFamilies()).stream()
.map(f -> f.getNameAsString()).collect(Collectors.toSet());
List<String> unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily()))
.filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList());
if (unmatchedFamilies.size() > 0) {
String msg =
"Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " +
unmatchedFamilies + "; valid family names of table " + table.getName() + " are: " +
familyNames;
LOG.error(msg);
if (!silence) {
throw new IOException(msg);
}
}
}
代码示例来源:origin: apache/hbase
Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
代码示例来源:origin: apache/hbase
@VisibleForTesting void initializeWorkQueues() throws IOException {
if (storesToCompact.isEmpty()) {
connection.getTable(tableName).getDescriptor().getColumnFamilyNames()
.forEach(a -> storesToCompact.add(Bytes.toString(a)));
LOG.info("No family specified, will execute for all families");
}
LOG.info(
"Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact);
List<HRegionLocation> regionLocations =
connection.getRegionLocator(tableName).getAllRegionLocations();
for (HRegionLocation location : regionLocations) {
Optional<MajorCompactionRequest> request = MajorCompactionRequest
.newRequest(connection.getConfiguration(), location.getRegion(), storesToCompact,
timestamp);
request.ifPresent(majorCompactionRequest -> clusterCompactionQueues
.addToCompactionQueue(location.getServerName(), majorCompactionRequest));
}
}
代码示例来源:origin: apache/hbase
Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName);
tableInfoList.add(new TableInfo(table.getDescriptor(), regionLocator));
代码示例来源:origin: apache/hbase
Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
job.setMapperClass(CellSortImporter.class);
job.setReducerClass(CellReducer.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(MapReduceExtendedCell.class);
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class);
代码示例来源:origin: apache/hbase
ColumnFamilyDescriptor familyDesc = table.getDescriptor().getColumnFamily(family);
代码示例来源:origin: apache/hbase
ArrayList<String> unmatchedFamilies = new ArrayList<>();
Set<String> cfSet = getColumnFamilies(columns);
TableDescriptor tDesc = table.getDescriptor();
for (String cf : cfSet) {
if(!tDesc.hasColumnFamily(Bytes.toBytes(cf))) {
for (ColumnFamilyDescriptor family : table.getDescriptor().getColumnFamilies()) {
familyNames.add(family.getNameAsString());
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(),
regionLocator);
代码示例来源:origin: apache/hbase
private HRegion openSnapshotRegion(RegionInfo firstRegion, Path tableDir) throws IOException {
return HRegion.openReadOnlyFileSystemHRegion(
TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(),
tableDir,
firstRegion,
table.getDescriptor()
);
}
}
代码示例来源:origin: apache/hbase
@Test
public void testGetTableDescriptor() throws IOException {
HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
HColumnDescriptor fam2 = new HColumnDescriptor("fam2");
HColumnDescriptor fam3 = new HColumnDescriptor("fam3");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
htd.addFamily(fam1);
htd.addFamily(fam2);
htd.addFamily(fam3);
this.admin.createTable(htd);
Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
TableDescriptor confirmedHtd = table.getDescriptor();
assertEquals(0, TableDescriptor.COMPARATOR.compare(htd, confirmedHtd));
MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
table.close();
}
代码示例来源:origin: apache/hbase
TableDescriptor htd = null;
try (Table table = connection.getTable(tableName)) {
htd = table.getDescriptor();
代码示例来源:origin: apache/phoenix
@Override
public TableDescriptor getDescriptor() throws IOException {
return delegate.getDescriptor();
}
代码示例来源:origin: apache/hbase
try {
table = connection.getTable(region.getTable());
tableDesc = table.getDescriptor();
byte[] rowToCheck = region.getStartKey();
if (rowToCheck.length == 0) {
代码示例来源:origin: apache/hbase
RegionInfo hri = region.getRegionInfo();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htable1.getDescriptor().getColumnFamilyNames()) {
scopes.put(fam, 1);
代码示例来源:origin: apache/hbase
assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));
assertEquals("compare row values between two tables",
t1.getDescriptor().getValue("row" + i),
t2.getDescriptor().getValue("row" + i));
MobTestUtil.countMobRows(t2));
assertEquals("compare count of mob row values between two tables",
t1.getDescriptor().getValues().size(),
t2.getDescriptor().getValues().size());
assertTrue("The mob row count is 0 but should be > 0",
MobTestUtil.countMobRows(t2) > 0);
代码示例来源:origin: apache/hbase
LOG.debug("Reading table descriptor for table {}", region.getTable());
table = connection.getTable(region.getTable());
tableDesc = table.getDescriptor();
} catch (IOException e) {
LOG.debug("sniffRegion {} of {} failed", region.getEncodedName(), e);
代码示例来源:origin: apache/hbase
@Test
public void test() throws Exception {
TableDescriptor tableDescriptor = client.getDescriptor();
ProcedureExecutor<MasterProcedureEnv> executor = UTIL.getMiniHBaseCluster().getMaster()
.getMasterProcedureExecutor();
MasterProcedureEnv env = executor.getEnvironment();
List<RegionInfo> regionInfos = admin.getRegions(TABLE_NAME);
MergeTableRegionsProcedure mergeTableRegionsProcedure = new MergeTableRegionsProcedure(
UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()
.getEnvironment(), regionInfos.get(0), regionInfos.get(1));
ModifyTableProcedure modifyTableProcedure = new ModifyTableProcedure(env, tableDescriptor);
long procModify = executor.submitProcedure(modifyTableProcedure);
UTIL.waitFor(30000, () -> executor.getProcedures().stream()
.filter(p -> p instanceof ModifyTableProcedure)
.map(p -> (ModifyTableProcedure) p)
.anyMatch(p -> TABLE_NAME.equals(p.getTableName())));
long proc = executor.submitProcedure(mergeTableRegionsProcedure);
UTIL.waitFor(3000000, () -> UTIL.getMiniHBaseCluster().getMaster()
.getMasterProcedureExecutor().isFinished(procModify));
Assert.assertEquals("Modify Table procedure should success!",
ProcedureProtos.ProcedureState.SUCCESS, modifyTableProcedure.getState());
}
代码示例来源:origin: apache/hbase
admin.addColumnFamily(tableName, getTestRestoreSchemaChangeHCD());
admin.enableTable(tableName);
assertEquals(2, table.getDescriptor().getColumnFamilyCount());
TableDescriptor htd = admin.getDescriptor(tableName);
assertEquals(2, htd.getColumnFamilyCount());
assertEquals(1, table.getDescriptor().getColumnFamilyCount());
try {
countRows(table, TEST_FAMILY2);
htd = admin.getDescriptor(tableName);
assertEquals(2, htd.getColumnFamilyCount());
assertEquals(2, table.getDescriptor().getColumnFamilyCount());
assertEquals(500, countRows(table, TEST_FAMILY2));
assertEquals(snapshot2Rows, countRows(table));
代码示例来源:origin: apache/hbase
@Override
public boolean evaluate() throws IOException {
boolean tableAvailable = getAdmin().isTableAvailable(tableName);
if (tableAvailable) {
try (Table table = getConnection().getTable(tableName)) {
TableDescriptor htd = table.getDescriptor();
for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
.getAllRegionLocations()) {
Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey())
.withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit()
.setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
for (byte[] family : htd.getColumnFamilyNames()) {
scan.addFamily(family);
}
try (ResultScanner scanner = table.getScanner(scan)) {
scanner.next();
}
}
}
}
return tableAvailable;
}
};
内容来源于网络,如有侵权,请联系作者删除!