本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.getGenerationStamp()
方法的一些代码示例,展示了Block.getGenerationStamp()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.getGenerationStamp()
方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:getGenerationStamp
[英]Get generation stamp from the name of the metafile name
[中]从图元文件名的名称获取生成戳记
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Create RecoveringBlock with copy-on-truncate option.
*/
public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs,
Block recoveryBlock) {
super(b, locs); // startOffset is unknown
this.newGenerationStamp = recoveryBlock.getGenerationStamp();
this.recoveryBlock = recoveryBlock;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public long getGenStamp() {
return metaSuffix != null ? Block.getGenerationStamp(
getMetaFile().getName()) :
HdfsConstants.GRANDFATHER_GENERATION_STAMP;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Determine whether the block ID was randomly generated (legacy) or
* sequentially generated. The generation stamp value is used to
* make the distinction.
*
* @return true if the block ID was randomly generated, false otherwise.
*/
boolean isLegacyBlock(Block block) {
return block.getGenerationStamp() < getLegacyGenerationStampLimit();
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public boolean isCorruptedDuringWrite() {
return stored.getGenerationStamp() > corrupted.getGenerationStamp();
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* @return true if the given storage has the given block listed for
* invalidation. Blocks are compared including their generation stamps:
* if a block is pending invalidation but with a different generation stamp,
* returns false.
*/
synchronized boolean contains(final DatanodeInfo dn, final Block block) {
final LightWeightHashSet<Block> s = getBlocksSet(dn, block);
if (s == null) {
return false; // no invalidate blocks for this storage ID
}
Block blockInSet = s.getElement(block);
return blockInSet != null &&
block.getGenerationStamp() == blockInSet.getGenerationStamp();
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
boolean isGenStampInFuture(Block block) {
if (isLegacyBlock(block)) {
return block.getGenerationStamp() > getLegacyGenerationStamp();
} else {
return block.getGenerationStamp() > getGenerationStamp();
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public static void blockToXml(ContentHandler contentHandler, Block block)
throws SAXException {
contentHandler.startElement("", "", "BLOCK", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "BLOCK_ID",
Long.toString(block.getBlockId()));
XMLUtils.addSaxString(contentHandler, "NUM_BYTES",
Long.toString(block.getNumBytes()));
XMLUtils.addSaxString(contentHandler, "GENSTAMP",
Long.toString(block.getGenerationStamp()));
contentHandler.endElement("", "", "BLOCK");
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
LocalReplica(Block block, FsVolumeSpi vol, File dir) {
this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
*/
LocalReplicaInPipeline(Block block,
FsVolumeSpi vol, File dir, Thread writer) {
this(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer, 0L);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Constructor.
* @param block a block
* @param vol volume where replica is located
*/
ReplicaInfo(Block block, FsVolumeSpi vol) {
this(vol, block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp());
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
@Override
public void store(FileRegion token) throws IOException {
final Block block = token.getBlock();
final ProvidedStorageLocation psl = token.getProvidedStorageLocation();
out.append(String.valueOf(block.getBlockId())).append(delim);
out.append(psl.getPath().toString()).append(delim);
out.append(Long.toString(psl.getOffset())).append(delim);
out.append(Long.toString(psl.getLength())).append(delim);
out.append(Long.toString(block.getGenerationStamp()));
if (psl.getNonce().length > 0) {
out.append(delim)
.append(Base64.getEncoder().encodeToString(psl.getNonce()));
}
out.append("\n");
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
static File moveBlockFiles(Block b, ReplicaInfo replicaInfo, File destdir)
throws IOException {
final File dstfile = new File(destdir, b.getBlockName());
final File dstmeta = FsDatasetUtil.getMetaFile(dstfile, b.getGenerationStamp());
try {
replicaInfo.renameMeta(dstmeta.toURI());
} catch (IOException e) {
throw new IOException("Failed to move meta file for " + b
+ " from " + replicaInfo.getMetadataURI() + " to " + dstmeta, e);
}
try {
replicaInfo.renameData(dstfile.toURI());
} catch (IOException e) {
throw new IOException("Failed to move block file for " + b
+ " from " + replicaInfo.getBlockURI() + " to "
+ dstfile.getAbsolutePath(), e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("addFinalizedBlock: Moved " + replicaInfo.getMetadataURI()
+ " to " + dstmeta + " and " + replicaInfo.getBlockURI()
+ " to " + dstfile);
}
return dstfile;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Write an array of blocks as compactly as possible. This uses
* delta-encoding for the generation stamp and size, following
* the principle that genstamp increases relatively slowly,
* and size is equal for all but the last block of a file.
*/
public static void writeCompactBlockArray(
Block[] blocks, DataOutputStream out) throws IOException {
WritableUtils.writeVInt(out, blocks.length);
Block prev = null;
for (Block b : blocks) {
long szDelta = b.getNumBytes() -
(prev != null ? prev.getNumBytes() : 0);
long gsDelta = b.getGenerationStamp() -
(prev != null ? prev.getGenerationStamp() : 0);
out.writeLong(b.getBlockId()); // blockid is random
WritableUtils.writeVLong(out, szDelta);
WritableUtils.writeVLong(out, gsDelta);
prev = b;
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public static Block[] readCompactBlockArray(
DataInput in, int logVersion) throws IOException {
int num = WritableUtils.readVInt(in);
if (num < 0) {
throw new IOException("Invalid block array length: " + num);
}
Block prev = null;
Block[] ret = new Block[num];
for (int i = 0; i < num; i++) {
long id = in.readLong();
long sz = WritableUtils.readVLong(in) +
((prev != null) ? prev.getNumBytes() : 0);
long gs = WritableUtils.readVLong(in) +
((prev != null) ? prev.getGenerationStamp() : 0);
ret[i] = new Block(id, sz, gs);
prev = ret[i];
}
return ret;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
fileIoProvider.mkdirsWithExistsCheck(volume, blockDir);
File blockFile = FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
if (dfsUsage instanceof CachingGetSpaceUsed) {
((CachingGetSpaceUsed) dfsUsage).incDfsUsed(
b.getNumBytes() + metaFile.length());
}
return blockFile;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Get the meta information of the replica that matches both block id
* and generation stamp
* @param bpid block pool id
* @param block block with its id as the key
* @return the replica's meta information
* @throws IllegalArgumentException if the input block or block pool is null
*/
ReplicaInfo get(String bpid, Block block) {
checkBlockPool(bpid);
checkBlock(block);
ReplicaInfo replicaInfo = get(bpid, block.getBlockId());
if (replicaInfo != null &&
block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
return replicaInfo;
}
return null;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Commit block's length and generation stamp as reported by the client.
* Set block state to {@link BlockUCState#COMMITTED}.
* @param block - contains client reported block length and generation
* @return staleReplica's List.
* @throws IOException if block ids are inconsistent.
*/
List<ReplicaUnderConstruction> commitBlock(Block block) throws IOException {
if (getBlockId() != block.getBlockId()) {
throw new IOException("Trying to commit inconsistent block: id = "
+ block.getBlockId() + ", expected id = " + getBlockId());
}
Preconditions.checkState(!isComplete());
uc.commit();
this.setNumBytes(block.getNumBytes());
// Sort out invalid replicas.
return setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
public FinalizedProvidedReplica(FileRegion fileRegion, FsVolumeSpi volume,
Configuration conf, FileSystem remoteFS) {
super(fileRegion.getBlock().getBlockId(),
fileRegion.getProvidedStorageLocation().getPath().toUri(),
fileRegion.getProvidedStorageLocation().getOffset(),
fileRegion.getBlock().getNumBytes(),
fileRegion.getBlock().getGenerationStamp(),
new RawPathHandle(ByteBuffer
.wrap(fileRegion.getProvidedStorageLocation().getNonce())),
volume, conf, remoteFS);
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
* @param bpid block pool id
* @param block block with its id as the key
* @return the removed replica's meta information
* @throws IllegalArgumentException if the input block is null
*/
ReplicaInfo remove(String bpid, Block block) {
checkBlockPool(bpid);
checkBlock(block);
try (AutoCloseableLock l = lock.acquire()) {
FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
if (set != null) {
ReplicaInfo replicaInfo =
set.get(block.getBlockId(), LONG_AND_BLOCK_COMPARATOR);
if (replicaInfo != null &&
block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
return set.removeAndGet(replicaInfo);
}
}
}
return null;
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Commit a block of a file
*
* @param block block to be committed
* @param commitBlock - contains client reported block length and generation
* @return true if the block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private boolean commitBlock(final BlockInfo block,
final Block commitBlock) throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false;
assert block.getNumBytes() <= commitBlock.getNumBytes() :
"commitBlock length is less than the stored one "
+ commitBlock.getNumBytes() + " vs. " + block.getNumBytes();
if(block.getGenerationStamp() != commitBlock.getGenerationStamp()) {
throw new IOException("Commit block with mismatching GS. NN has " +
block + ", client submits " + commitBlock);
}
List<ReplicaUnderConstruction> staleReplicas =
block.commitBlock(commitBlock);
removeStaleReplicas(staleReplicas, block);
return true;
}
内容来源于网络,如有侵权,请联系作者删除!