本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.validatePartColumnNames()
方法的一些代码示例,展示了Table.validatePartColumnNames()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.validatePartColumnNames()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:validatePartColumnNames
暂无
代码示例来源:origin: apache/hive
public static void validatePartSpec(Table tbl, Map<String, String> partSpec,
ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws SemanticException {
tbl.validatePartColumnNames(partSpec, shouldBeFull);
validatePartColumnType(tbl, partSpec, astNode, conf);
}
代码示例来源:origin: apache/drill
public static void validatePartSpec(Table tbl, Map<String, String> partSpec,
ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws SemanticException {
tbl.validatePartColumnNames(partSpec, shouldBeFull);
validatePartColumnType(tbl, partSpec, astNode, conf);
}
代码示例来源:origin: apache/hive
private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab,
Map<String, String> partSpec, String dest) throws SemanticException {
List<FieldSchema> parts = dest_tab.getPartitionKeys();
if (parts == null || parts.isEmpty()) {
return null; // table is not partitioned
}
if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition
throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest),
ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
}
DynamicPartitionCtx dpCtx = qbm.getDPCtx(dest);
if (dpCtx == null) {
dest_tab.validatePartColumnNames(partSpec, false);
dpCtx = new DynamicPartitionCtx(partSpec,
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME),
conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
qbm.setDPCtx(dest, dpCtx);
}
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP
throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest),
ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg()));
}
if ((dest_tab.getNumBuckets() > 0)) {
dpCtx.setNumBuckets(dest_tab.getNumBuckets());
}
return dpCtx;
}
代码示例来源:origin: apache/hive
tbl.validatePartColumnNames(partSpec, true);
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
代码示例来源:origin: apache/drill
boolean forceCreate, String partPath, boolean inheritTableSpecs, List<Path> newFiles)
throws HiveException {
tbl.validatePartColumnNames(partSpec, true);
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
代码示例来源:origin: apache/drill
dest_tab.validatePartColumnNames(partSpec, false);
dpCtx = new DynamicPartitionCtx(dest_tab, partSpec,
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME),
代码示例来源:origin: apache/hive
destinationTable.validatePartColumnNames(partSpec, false);
dpCtx = new DynamicPartitionCtx(partSpec,
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME),
代码示例来源:origin: com.facebook.presto.hive/hive-apache
public static void validatePartSpec(Table tbl, Map<String, String> partSpec,
ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws SemanticException {
tbl.validatePartColumnNames(partSpec, shouldBeFull);
validatePartColumnType(tbl, partSpec, astNode, conf);
}
代码示例来源:origin: com.facebook.presto.hive/hive-apache
dest_tab.validatePartColumnNames(partSpec, false);
dpCtx = new DynamicPartitionCtx(dest_tab, partSpec,
conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME),
代码示例来源:origin: com.facebook.presto.hive/hive-apache
boolean forceCreate, String partPath, boolean inheritTableSpecs, List<Path> newFiles)
throws HiveException {
tbl.validatePartColumnNames(partSpec, true);
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
内容来源于网络,如有侵权,请联系作者删除!