本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getProperty()
方法的一些代码示例,展示了Table.getProperty()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getProperty()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getProperty
暂无
代码示例来源:origin: apache/hive
public boolean isNonNative() {
return getProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE)
!= null;
}
代码示例来源:origin: apache/drill
public boolean isNonNative() {
return getProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE)
!= null;
}
代码示例来源:origin: apache/drill
/** Checks if a table is a valid ACID table.
* Note, users are responsible for using the correct TxnManager. We do not look at
* SessionState.get().getTxnMgr().supportsAcid() here
* @param table table
* @return true if table is a legit ACID table, false otherwise
*/
public static boolean isAcidTable(Table table) {
if (table == null) {
return false;
}
String tableIsTransactional = table.getProperty(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
if (tableIsTransactional == null) {
tableIsTransactional = table.getProperty(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
}
return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
}
代码示例来源:origin: apache/hive
public int getBucketingVersion() {
return Utilities.getBucketingVersion(
getProperty(hive_metastoreConstants.TABLE_BUCKETING_VERSION));
}
代码示例来源:origin: apache/hive
protected long getSize(HiveConf conf, Table table) {
Path path = table.getPath();
String size = table.getProperty("totalSize");
return getSize(conf, size, path);
}
代码示例来源:origin: apache/hive
/**
* Returns the acidOperationalProperties for a given table.
* @param table A table object
* @return the acidOperationalProperties object for the corresponding table.
*/
public static AcidOperationalProperties getAcidOperationalProperties(Table table) {
String transactionalProperties = table.getProperty(
hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
if (transactionalProperties == null) {
// If the table does not define any transactional properties, we return a default type.
return AcidOperationalProperties.getDefault();
}
return AcidOperationalProperties.parseString(transactionalProperties);
}
代码示例来源:origin: apache/drill
protected long getSize(HiveConf conf, Table table) {
Path path = table.getPath();
String size = table.getProperty("totalSize");
return getSize(conf, size, path);
}
代码示例来源:origin: apache/drill
/**
* Returns the acidOperationalProperties for a given table.
* @param table A table object
* @return the acidOperationalProperties object for the corresponding table.
*/
public static AcidOperationalProperties getAcidOperationalProperties(Table table) {
String transactionalProperties = table.getProperty(
hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
if (transactionalProperties == null) {
// If the table does not define any transactional properties, we return a legacy type.
return AcidOperationalProperties.getLegacy();
}
return AcidOperationalProperties.parseString(transactionalProperties);
}
代码示例来源:origin: apache/hive
public HiveStorageHandler getStorageHandler() {
if (storageHandler != null || !isNonNative()) {
return storageHandler;
}
try {
storageHandler = HiveUtils.getStorageHandler(
SessionState.getSessionConf(),
getProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE));
} catch (Exception e) {
throw new RuntimeException(e);
}
return storageHandler;
}
代码示例来源:origin: apache/drill
public HiveStorageHandler getStorageHandler() {
if (storageHandler != null || !isNonNative()) {
return storageHandler;
}
try {
storageHandler = HiveUtils.getStorageHandler(
SessionState.getSessionConf(),
getProperty(
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE));
} catch (Exception e) {
throw new RuntimeException(e);
}
return storageHandler;
}
代码示例来源:origin: apache/hive
tableMeta.setComments(table.getProperty("comment"));
tableMetas.add(tableMeta);
代码示例来源:origin: apache/drill
tableMeta.setComments(table.getProperty("comment"));
tableMetas.add(tableMeta);
代码示例来源:origin: apache/hive
for (Partition partition : partitions) {
final FileSystem newPathFileSystem = partition.getPartitionPath().getFileSystem(this.getConf());
boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
final FileStatus status = newPathFileSystem.getFileStatus(partition.getPartitionPath());
Hive.trashFiles(newPathFileSystem, new FileStatus[]{status}, this.getConf(), isAutoPurge);
代码示例来源:origin: apache/hive
/**
* @param context current JobContext
* @param baseCommitter OutputCommitter to contain
* @throws IOException
*/
public FileOutputCommitterContainer(JobContext context,
org.apache.hadoop.mapred.OutputCommitter baseCommitter) throws IOException {
super(context, baseCommitter);
jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
dynamicPartitioningUsed = jobInfo.isDynamicPartitioningUsed();
this.partitionsDiscovered = !dynamicPartitioningUsed;
cachedStorageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
Table table = new Table(jobInfo.getTableInfo().getTable());
if (dynamicPartitioningUsed && Boolean.parseBoolean((String)table.getProperty("EXTERNAL"))
&& jobInfo.getCustomDynamicPath() != null
&& jobInfo.getCustomDynamicPath().length() > 0) {
customDynamicLocationUsed = true;
} else {
customDynamicLocationUsed = false;
}
this.maxAppendAttempts = context.getConfiguration().getInt(HCatConstants.HCAT_APPEND_LIMIT, APPEND_COUNTER_WARN_THRESHOLD);
}
代码示例来源:origin: apache/hive
String propertyName = showTblPrpt.getPropertyName();
if (propertyName != null) {
String propertyValue = tbl.getProperty(propertyName);
if (propertyValue == null) {
String errMsg = "Table " + tableName + " does not have property: " + propertyName;
代码示例来源:origin: apache/hive
try {
final FileSystem newPathFileSystem = newTPart.getPartitionPath().getFileSystem(this.getConf());
boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
final FileStatus status = newPathFileSystem.getFileStatus(newTPart.getPartitionPath());
Hive.trashFiles(newPathFileSystem, new FileStatus[]{status}, this.getConf(), isAutoPurge);
代码示例来源:origin: apache/hive
final String timeWindowString = mv.getProperty(MATERIALIZED_VIEW_REWRITING_TIME_WINDOW);
final String mode;
if (!org.apache.commons.lang.StringUtils.isEmpty(timeWindowString)) {
代码示例来源:origin: apache/hive
return null;
rowCnt = Long.parseLong(tbl.getProperty(StatsSetupConst.ROW_COUNT));
if (rowCnt == null) {
代码示例来源:origin: apache/drill
return null;
rowCnt = Long.parseLong(tbl.getProperty(StatsSetupConst.ROW_COUNT));
if (rowCnt == null) {
代码示例来源:origin: apache/hive
final String timeWindowString = mv.getProperty(MATERIALIZED_VIEW_REWRITING_TIME_WINDOW);
final String mode;
if (!org.apache.commons.lang.StringUtils.isEmpty(timeWindowString)) {
内容来源于网络,如有侵权,请联系作者删除!