本文整理了Java中org.apache.hadoop.hbase.client.Table.getConfiguration()
方法的一些代码示例,展示了Table.getConfiguration()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getConfiguration()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Table
类名称:Table
方法名:getConfiguration
[英]Returns the org.apache.hadoop.conf.Configuration object used by this instance.
The reference returned is not a copy, so any change made to it will affect this instance.
[中]返回组织。阿帕奇。hadoop。此实例使用的conf.Configuration对象。
返回的引用不是副本,因此对其所做的任何更改都将影响此实例。
代码示例来源:origin: apache/hbase
/**
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: apache/hbase
/**
* Sets the HBase table.
*
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: apache/hbase
secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
return performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
} finally {
代码示例来源:origin: apache/hbase
secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
return performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
} finally {
代码示例来源:origin: apache/hbase
static Table createTestTable(TableName name, byte[][] rows, byte[][] families,
byte[][] qualifiers, byte[] cellValue) throws IOException {
Table ht = TEST_UTIL.createTable(name, families);
List<Put> puts = createPuts(rows, families, qualifiers, cellValue);
ht.put(puts);
ht.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CLIENT_TIMEOUT);
return ht;
}
代码示例来源:origin: apache/hbase
/**
* Verifies that getConfiguration returns the same Configuration object used
* to create the HTable instance.
*/
@Test
public void testGetConfiguration() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
Configuration conf = TEST_UTIL.getConfiguration();
Table table = TEST_UTIL.createTable(tableName, FAMILIES);
assertSame(conf, table.getConfiguration());
}
代码示例来源:origin: apache/kylin
admin = hbaseConnection.getAdmin();
if (!enabled(table.getConfiguration())) {
logger.info("Region size calculation disabled.");
return;
代码示例来源:origin: apache/hbase
try {
LOG.info("Before map/reduce startup");
job = new Job(table.getConfiguration(), "process column contents");
job.setNumReduceTasks(1);
Scan scan = new Scan();
代码示例来源:origin: apache/hbase
try {
LOG.info("Before map/reduce startup");
job = new Job(table.getConfiguration(), "process column contents");
job.setNumReduceTasks(1);
Scan scan = new Scan();
代码示例来源:origin: apache/phoenix
@Override
public Configuration getConfiguration() {
return delegate.getConfiguration();
}
代码示例来源:origin: com.aliyun.hbase/alihbase-mapreduce
/**
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: com.aliyun.hbase/alihbase-mapreduce
/**
* Sets the HBase table.
*
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: org.locationtech.geomesa/geomesa-bigtable-spark
/**
* Sets the HBase table.
*
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: org.apache.hbase/hbase-mapreduce
/**
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: org.apache.hbase/hbase-mapreduce
/**
* Sets the HBase table.
*
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: org.cloudgraph/cloudgraph-hbase
/**
* Sets the HBase table.
*
* @param table
* The root {@link Table} to scan.
*/
public void setTable(Table table) {
Configuration conf = table.getConfiguration();
logScannerActivity = conf.getBoolean(ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.table = table;
}
代码示例来源:origin: harbby/presto-connectors
/**
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: harbby/presto-connectors
/**
* Sets the HBase table.
*
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/
public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration();
logScannerActivity = conf.getBoolean(
ScannerCallable.LOG_SCANNER_ACTIVITY, false);
logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
this.htable = htable;
}
代码示例来源:origin: org.apache.hbase/hbase-server
static Table createTestTable(TableName name, byte[][] rows, byte[][] families,
byte[][] qualifiers, byte[] cellValue) throws IOException {
Table ht = TEST_UTIL.createTable(name, families);
List<Put> puts = createPuts(rows, families, qualifiers, cellValue);
ht.put(puts);
ht.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CLIENT_TIMEOUT);
return ht;
}
代码示例来源:origin: org.apache.hbase/hbase-server
/**
* Verifies that getConfiguration returns the same Configuration object used
* to create the HTable instance.
*/
@Test
public void testGetConfiguration() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
Configuration conf = TEST_UTIL.getConfiguration();
Table table = TEST_UTIL.createTable(tableName, FAMILIES);
assertSame(conf, table.getConfiguration());
}
内容来源于网络,如有侵权,请联系作者删除!