本文整理了Java中org.apache.hadoop.hbase.client.Table.getTableDescriptor()
方法的一些代码示例,展示了Table.getTableDescriptor()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getTableDescriptor()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Table
类名称:Table
方法名:getTableDescriptor
[英]Gets the org.apache.hadoop.hbase.HTableDescriptor for this table.
[中]获取组织。阿帕奇。hadoop。hbase。此表的HTableDescriptor。
代码示例来源:origin: apache/kylin
/**
* Configure a MapReduce Job to perform an incremental load into the given
* table. This
* <ul>
* <li>Inspects the table to configure a total order partitioner</li>
* <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
* <li>Sets the number of reduce tasks to match the current number of regions</li>
* <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
* <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
* PutSortReducer)</li>
* </ul>
* The user should be sure to set the map output value class to either KeyValue or Put before
* running this function.
*/
public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator) throws IOException {
configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
}
代码示例来源:origin: apache/kylin
public static void configureIncrementalLoadMap(Job job, Table table) throws IOException {
Configuration conf = job.getConfiguration();
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(KeyValue.class);
job.setOutputFormatClass(HFileOutputFormat3.class);
// Set compression algorithms based on column families
configureCompression(conf, table.getTableDescriptor());
configureBloomType(table.getTableDescriptor(), conf);
configureBlockSize(table.getTableDescriptor(), conf);
HTableDescriptor tableDescriptor = table.getTableDescriptor();
configureDataBlockEncoding(tableDescriptor, conf);
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.initCredentials(job);
LOG.info("Incremental table " + table.getName() + " output configured.");
}
代码示例来源:origin: apache/hbase
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
Table table = servlet.getTable(tableResource.getName());
try {
return table.getTableDescriptor();
} finally {
table.close();
}
}
代码示例来源:origin: apache/hbase
@Override
public Map<ByteBuffer, ColumnDescriptor> getColumnDescriptors(
ByteBuffer tableName) throws IOError, TException {
Table table = null;
try {
TreeMap<ByteBuffer, ColumnDescriptor> columns = new TreeMap<>();
table = getTable(tableName);
HTableDescriptor desc = table.getTableDescriptor();
for (HColumnDescriptor e : desc.getFamilies()) {
ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
columns.put(col.name, col);
}
return columns;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw getIOError(e);
} finally {
closeTable(table);
}
}
代码示例来源:origin: apache/hbase
/**
* Returns a list of all the column families for a given Table.
*
* @param table table
* @throws IOException
*/
byte[][] getAllColumns(Table table) throws IOException {
HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies();
byte[][] columns = new byte[cds.length][];
for (int i = 0; i < cds.length; i++) {
columns[i] = Bytes.add(cds[i].getName(),
KeyValue.COLUMN_FAMILY_DELIM_ARRAY);
}
return columns;
}
代码示例来源:origin: apache/hbase
private void setupMockColumnFamiliesForDataBlockEncoding(Table table,
Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]);
for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
.setMaxVersions(1)
.setDataBlockEncoding(entry.getValue())
.setBlockCacheEnabled(false)
.setTimeToLive(0));
}
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
代码示例来源:origin: apache/hbase
private void setupMockColumnFamiliesForCompression(Table table,
Map<String, Compression.Algorithm> familyToCompression) throws IOException {
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]);
for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
.setMaxVersions(1)
.setCompressionType(entry.getValue())
.setBlockCacheEnabled(false)
.setTimeToLive(0));
}
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
代码示例来源:origin: apache/hbase
private void setupMockColumnFamiliesForBloomType(Table table,
Map<String, BloomType> familyToDataBlockEncoding) throws IOException {
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]);
for (Entry<String, BloomType> entry : familyToDataBlockEncoding.entrySet()) {
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
.setMaxVersions(1)
.setBloomFilterType(entry.getValue())
.setBlockCacheEnabled(false)
.setTimeToLive(0));
}
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
代码示例来源:origin: apache/hbase
private void setupMockColumnFamiliesForBlockSize(Table table,
Map<String, Integer> familyToDataBlockEncoding) throws IOException {
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]);
for (Entry<String, Integer> entry : familyToDataBlockEncoding.entrySet()) {
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
.setMaxVersions(1)
.setBlocksize(entry.getValue())
.setBlockCacheEnabled(false)
.setTimeToLive(0));
}
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
代码示例来源:origin: apache/hbase
RegionSplitter(Table table) throws IOException {
this.table = table;
this.tableName = table.getName();
this.family = table.getTableDescriptor().getFamiliesKeys().iterator().next();
admin = TEST_UTIL.getAdmin();
rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
connection = TEST_UTIL.getConnection();
}
代码示例来源:origin: apache/hbase
/**
* Test a table creation including a coprocessor path
* which is on the classpath
* @result Table will be created with the coprocessor
*/
@Test
public void testCreationClasspathCoprocessor() throws Exception {
Configuration conf = UTIL.getConfiguration();
// load coprocessor under test
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
CoprocessorWhitelistMasterObserver.class.getName());
conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY,
new String[]{});
// set retries low to raise exception quickly
conf.setInt("hbase.client.retries.number", 5);
UTIL.startMiniCluster();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
htd.addFamily(hcd);
htd.addCoprocessor(TestRegionObserver.class.getName());
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
LOG.info("Creating Table");
admin.createTable(htd);
// ensure table was created and coprocessor is added to table
LOG.info("Done Creating Table");
Table t = connection.getTable(TEST_TABLE);
assertEquals(1, t.getTableDescriptor().getCoprocessors().size());
}
}
代码示例来源:origin: apache/hbase
HFileOutputFormat2.serializeColumnFamilyAttribute
(HFileOutputFormat2.compressionDetails,
Arrays.asList(table.getTableDescriptor())));
代码示例来源:origin: apache/hbase
setupMockColumnFamiliesForDataBlockEncoding(table,
familyToDataBlockEncoding);
HTableDescriptor tableDescriptor = table.getTableDescriptor();
conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
HFileOutputFormat2.serializeColumnFamilyAttribute
代码示例来源:origin: apache/hbase
HFileOutputFormat2.serializeColumnFamilyAttribute
(HFileOutputFormat2.blockSizeDetails, Arrays.asList(table
.getTableDescriptor())));
代码示例来源:origin: apache/hbase
conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY,
HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails,
Arrays.asList(table.getTableDescriptor())));
代码示例来源:origin: apache/hbase
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testJobConfiguration() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration")
.toString());
Job job = new Job(conf);
job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
Table table = Mockito.mock(Table.class);
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
setupMockStartKeys(regionLocator);
setupMockTableName(regionLocator);
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
assertEquals(job.getNumReduceTasks(), 4);
}
代码示例来源:origin: apache/hbase
protected RegionInfo createRegion(Configuration conf, final Table htbl,
byte[] startKey, byte[] endKey) throws IOException {
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HTableDescriptor htd = htbl.getTableDescriptor();
RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
.setStartKey(startKey)
.setEndKey(endKey)
.build();
LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
hri.getEncodedName());
fs.mkdirs(p);
Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
FSDataOutputStream out = fs.create(riPath);
out.write(RegionInfo.toDelimitedByteArray(hri));
out.close();
// add to meta.
MetaTableAccessor.addRegionToMeta(TEST_UTIL.getConnection(), hri);
meta.close();
return hri;
}
代码示例来源:origin: apache/hbase
@Test
public void testGetTableDescriptor() throws IOException {
Table table = null;
try {
table = TEST_UTIL.getConnection().getTable(TABLE);
HTableDescriptor local = table.getTableDescriptor();
assertEquals(remoteTable.getTableDescriptor(), local);
} finally {
if (null != table) table.close();
}
}
代码示例来源:origin: apache/hbase
/**
* Add metadata, and verify that this only affects one table
*/
private void runTestSnapshotMetadataChangesIndependent() throws Exception {
// Add a new column family to the original table
byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2);
admin.disableTable(originalTableName);
admin.addColumnFamily(originalTableName, hcd);
// Verify that it is not in the snapshot
admin.enableTable(originalTableName);
UTIL.waitTableAvailable(originalTableName);
// get a description of the cloned table
// get a list of its families
// assert that the family is there
HTableDescriptor originalTableDescriptor = originalTable.getTableDescriptor();
HTableDescriptor clonedTableDescriptor = admin.getTableDescriptor(cloneTableName);
Assert.assertTrue("The original family was not found. There is something wrong. ",
originalTableDescriptor.hasFamily(TEST_FAM));
Assert.assertTrue("The original family was not found in the clone. There is something wrong. ",
clonedTableDescriptor.hasFamily(TEST_FAM));
Assert.assertTrue("The new family was not found. ",
originalTableDescriptor.hasFamily(TEST_FAM_2));
Assert.assertTrue("The new family was not found. ",
!clonedTableDescriptor.hasFamily(TEST_FAM_2));
}
代码示例来源:origin: apache/kylin
public static void prepareTestData() throws Exception {
try {
util.getHBaseAdmin().disableTable(TABLE);
util.getHBaseAdmin().deleteTable(TABLE);
} catch (Exception e) {
// ignore table not found
}
Table table = util.createTable(TABLE, FAM);
HRegionInfo hRegionInfo = new HRegionInfo(table.getName());
region = util.createLocalHRegion(hRegionInfo, table.getTableDescriptor());
gtInfo = newInfo();
GridTable gridTable = newTable(gtInfo);
IGTScanner scanner = gridTable.scan(new GTScanRequestBuilder().setInfo(gtInfo).setRanges(null)
.setDimensions(null).setFilterPushDown(null).createGTScanRequest());
for (GTRecord record : scanner) {
byte[] value = record.exportColumns(gtInfo.getPrimaryKey()).toBytes();
byte[] key = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN + value.length];
System.arraycopy(Bytes.toBytes(baseCuboid), 0, key, RowConstants.ROWKEY_SHARDID_LEN,
RowConstants.ROWKEY_CUBOIDID_LEN);
System.arraycopy(value, 0, key, RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN, value.length);
Put put = new Put(key);
put.addColumn(FAM, COL_M, record.exportColumns(gtInfo.getColumnBlock(1)).toBytes());
region.put(put);
}
}
内容来源于网络,如有侵权,请联系作者删除!