本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getPath()
方法的一些代码示例,展示了Table.getPath()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getPath()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getPath
暂无
代码示例来源:origin: apache/hive
@Override
public Path getPath() {
return table.getPath();
}
}
代码示例来源:origin: apache/hive
final public Path getDataLocation() {
if (path == null) {
path = getPath();
}
return path;
}
代码示例来源:origin: apache/drill
final public Path getDataLocation() {
if (path == null) {
path = getPath();
}
return path;
}
代码示例来源:origin: apache/hive
public boolean isEmpty() throws HiveException {
Preconditions.checkNotNull(getPath());
try {
FileSystem fs = FileSystem.get(getPath().toUri(), SessionState.getSessionConf());
return !fs.exists(getPath()) || fs.listStatus(getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0;
} catch (IOException e) {
throw new HiveException(e);
}
}
代码示例来源:origin: apache/hive
@SuppressWarnings("nls")
public FileStatus[] getSortedPaths() {
try {
// Previously, this got the filesystem of the Table, which could be
// different from the filesystem of the partition.
FileSystem fs = FileSystem.get(getPath().toUri(), SessionState.getSessionConf());
String pathPattern = getPath().toString();
if (getNumBuckets() > 0) {
pathPattern = pathPattern + "/*";
}
LOG.info("Path pattern = " + pathPattern);
FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER);
Arrays.sort(srcs);
for (FileStatus src : srcs) {
LOG.info("Got file: " + src.getPath());
}
if (srcs.length == 0) {
return null;
}
return srcs;
} catch (Exception e) {
throw new RuntimeException("Cannot get path ", e);
}
}
代码示例来源:origin: apache/drill
@SuppressWarnings("nls")
public FileStatus[] getSortedPaths() {
try {
// Previously, this got the filesystem of the Table, which could be
// different from the filesystem of the partition.
FileSystem fs = FileSystem.get(getPath().toUri(), SessionState.getSessionConf());
String pathPattern = getPath().toString();
if (getNumBuckets() > 0) {
pathPattern = pathPattern + "/*";
}
LOG.info("Path pattern = " + pathPattern);
FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER);
Arrays.sort(srcs);
for (FileStatus src : srcs) {
LOG.info("Got file: " + src.getPath());
}
if (srcs.length == 0) {
return null;
}
return srcs;
} catch (Exception e) {
throw new RuntimeException("Cannot get path ", e);
}
}
代码示例来源:origin: apache/hive
protected long getSize(HiveConf conf, Table table) {
Path path = table.getPath();
String size = table.getProperty("totalSize");
return getSize(conf, size, path);
}
代码示例来源:origin: apache/hive
/**
* Find the bytes on disk occupied by a table
* @param conf
* - hive conf
* @param table
* - table
* @return size on disk
*/
public static long getFileSizeForTable(HiveConf conf, Table table) {
Path path = table.getPath();
long size = 0;
try {
FileSystem fs = path.getFileSystem(conf);
size = fs.getContentSummary(path).getLength();
} catch (Exception e) {
size = 0;
}
return size;
}
代码示例来源:origin: apache/drill
protected long getSize(HiveConf conf, Table table) {
Path path = table.getPath();
String size = table.getProperty("totalSize");
return getSize(conf, size, path);
}
代码示例来源:origin: apache/drill
/**
* Find the bytes on disk occupied by a table
* @param conf
* - hive conf
* @param table
* - table
* @return size on disk
*/
public static long getFileSizeForTable(HiveConf conf, Table table) {
Path path = table.getPath();
long size = 0;
try {
FileSystem fs = path.getFileSystem(conf);
size = fs.getContentSummary(path).getLength();
} catch (Exception e) {
size = 0;
}
return size;
}
代码示例来源:origin: apache/hive
private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
throws HiveException {
// output file system information
Path tblPath = tbl.getPath();
List<Path> locations = new ArrayList<Path>();
if (tbl.isPartitioned()) {
if (par == null) {
for (Partition curPart : db.getPartitions(tbl)) {
if (curPart.getLocation() != null) {
locations.add(new Path(curPart.getLocation()));
}
}
} else {
if (par.getLocation() != null) {
locations.add(new Path(par.getLocation()));
}
}
} else {
if (tblPath != null) {
locations.add(tblPath);
}
}
return locations;
}
代码示例来源:origin: apache/hive
/**
* Returns the table location path from a TableDesc object.
*
* @param hconf Configuration object.
* @param tableDesc Table description from where to get the table name.
* @return The path where the table is located.
*/
private static Path getTableLocationPath(final HiveConf hconf, final TableDesc tableDesc) {
Table table = null;
try {
Hive hive = Hive.get(hconf);
table = hive.getTable(tableDesc.getTableName());
} catch (HiveException e) {
LOG.warn("Unable to get the table location path for: " + tableDesc.getTableName(), e);
}
return (table != null) ? table.getPath() : null;
}
代码示例来源:origin: apache/drill
private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
throws HiveException {
// output file system information
Path tblPath = tbl.getPath();
List<Path> locations = new ArrayList<Path>();
if (tbl.isPartitioned()) {
if (par == null) {
for (Partition curPart : db.getPartitions(tbl)) {
if (curPart.getLocation() != null) {
locations.add(new Path(curPart.getLocation()));
}
}
} else {
if (par.getLocation() != null) {
locations.add(new Path(par.getLocation()));
}
}
} else {
if (tblPath != null) {
locations.add(tblPath);
}
}
return locations;
}
代码示例来源:origin: apache/drill
/**
* Returns the table location path from a TableDesc object.
*
* @param hconf Configuration object.
* @param tableDesc Table description from where to get the table name.
* @return The path where the table is located.
*/
private static Path getTableLocationPath(final HiveConf hconf, final TableDesc tableDesc) {
Table table = null;
try {
Hive hive = Hive.get(hconf);
table = hive.getTable(tableDesc.getTableName());
} catch (HiveException e) {
LOG.warn("Unable to get the table location path for: " + tableDesc.getTableName(), e);
}
return (table != null) ? table.getPath() : null;
}
代码示例来源:origin: apache/hive
private FetchWork convertToWork() throws HiveException {
inputs.clear();
Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
TableDesc tableDesc = Utilities.getTableDesc(table);
if (!table.isPartitioned()) {
inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
FetchWork work = new FetchWork(table.getPath(), tableDesc);
PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
work.setSplitSample(splitSample);
return work;
}
List<Path> listP = new ArrayList<Path>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
for (Partition partition : partsList.getNotDeniedPartns()) {
inputs.add(new ReadEntity(partition, parent, parent == null));
listP.add(partition.getDataLocation());
partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
}
Table sourceTable = partsList.getSourceTable();
inputs.add(new ReadEntity(sourceTable, parent, parent == null));
TableDesc table = Utilities.getTableDesc(sourceTable);
FetchWork work = new FetchWork(listP, partP, table);
if (!work.getPartDesc().isEmpty()) {
PartitionDesc part0 = work.getPartDesc().get(0);
PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
work.setSplitSample(splitSample);
}
return work;
}
代码示例来源:origin: apache/drill
private FetchWork convertToWork() throws HiveException {
inputs.clear();
Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
TableDesc tableDesc = Utilities.getTableDesc(table);
if (!table.isPartitioned()) {
inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
FetchWork work = new FetchWork(table.getPath(), tableDesc);
PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
work.setSplitSample(splitSample);
return work;
}
List<Path> listP = new ArrayList<Path>();
List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
for (Partition partition : partsList.getNotDeniedPartns()) {
inputs.add(new ReadEntity(partition, parent, parent == null));
listP.add(partition.getDataLocation());
partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
}
Table sourceTable = partsList.getSourceTable();
inputs.add(new ReadEntity(sourceTable, parent, parent == null));
TableDesc table = Utilities.getTableDesc(sourceTable);
FetchWork work = new FetchWork(listP, partP, table);
if (!work.getPartDesc().isEmpty()) {
PartitionDesc part0 = work.getPartDesc().get(0);
PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
work.setSplitSample(splitSample);
}
return work;
}
代码示例来源:origin: apache/drill
public static List<Path> getInputPathsForPartialScan(TableScanOperator tableScanOp,
Appendable aggregationKey) throws SemanticException {
List<Path> inputPaths = new ArrayList<Path>();
switch (tableScanOp.getConf().getTableMetadata().getTableSpec().specType) {
case TABLE_ONLY:
inputPaths.add(tableScanOp.getConf().getTableMetadata()
.getTableSpec().tableHandle.getPath());
break;
case STATIC_PARTITION:
Partition part = tableScanOp.getConf().getTableMetadata()
.getTableSpec().partHandle;
try {
aggregationKey.append(Warehouse.makePartPath(part.getSpec()));
} catch (MetaException e) {
throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_AGGKEY.getMsg(
part.getDataLocation().toString() + e.getMessage()));
} catch (IOException e) {
throw new RuntimeException(e);
}
inputPaths.add(part.getDataLocation());
break;
default:
assert false;
}
return inputPaths;
}
代码示例来源:origin: apache/hive
if (tbl.getPath() != null) {
tblLoc = tbl.getDataLocation().toString();
conf, tbl.getPath());
代码示例来源:origin: apache/hive
@Test
public void testDataDeletion() throws HiveException,
IOException, TException {
Database db = new Database();
db.setName(dbName);
hive.createDatabase(db);
Table table = new Table(dbName, tableName);
table.setDbName(dbName);
table.setInputFormatClass(TextInputFormat.class);
table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
table.setPartCols(partCols);
hive.createTable(table);
table = hive.getTable(dbName, tableName);
Path fakeTable = table.getPath().getParent().suffix(
Path.SEPARATOR + "faketable");
fs = fakeTable.getFileSystem(hive.getConf());
fs.mkdirs(fakeTable);
fs.deleteOnExit(fakeTable);
Path fakePart = new Path(table.getDataLocation().toString(),
"fakepartition=fakevalue");
fs.mkdirs(fakePart);
fs.deleteOnExit(fakePart);
hive.dropTable(dbName, tableName, true, true);
assertFalse(fs.exists(fakePart));
hive.dropDatabase(dbName);
assertFalse(fs.exists(fakeTable));
}
代码示例来源:origin: apache/hive
public static org.apache.hadoop.hive.metastore.api.Partition convertAddSpecToMetaPartition(
Table tbl, AddPartitionDesc.OnePartitionDesc addSpec, final HiveConf conf) throws HiveException {
Path location = addSpec.getLocation() != null
? new Path(tbl.getPath(), addSpec.getLocation()) : null;
if (location != null) {
内容来源于网络,如有侵权,请联系作者删除!