org.apache.hadoop.hdfs.protocol.Block类的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(8.3k)|赞(0)|评价(0)|浏览(257)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block类的一些代码示例,展示了Block类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block类的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block

Block介绍

[英]A Block is a Hadoop FS primitive, identified by a long.
[中]块是Hadoop FS原语,由长。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public DBlock getInternalBlock(StorageGroup storage) {
 int idxInLocs = locations.indexOf(storage);
 if (idxInLocs == -1) {
  return null;
 }
 byte idxInGroup = indices[idxInLocs];
 long blkId = getBlock().getBlockId() + idxInGroup;
 long numBytes = getInternalBlockLength(getNumBytes(), cellSize,
   dataBlockNum, idxInGroup);
 Block blk = new Block(getBlock());
 blk.setBlockId(blkId);
 blk.setNumBytes(numBytes);
 DBlock dblk = new DBlock(blk);
 dblk.addLocation(storage);
 return dblk;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 * @param writer a thread that is writing to this replica
 */
LocalReplicaInPipeline(Block block,
  FsVolumeSpi vol, File dir, Thread writer) {
 this(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
   vol, dir, writer, 0L);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public String toString() {
  return block.getBlockName() + "\t" + path;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static Block[] readBlocks(
  DataInputStream in,
  int logVersion) throws IOException {
 int numBlocks = in.readInt();
 if (numBlocks < 0) {
  throw new IOException("invalid negative number of blocks");
 } else if (numBlocks > MAX_BLOCKS) {
  throw new IOException("invalid number of blocks: " + numBlocks +
    ".  The maximum number of blocks per file is " + MAX_BLOCKS);
 }
 Block[] blocks = new Block[numBlocks];
 for (int i = 0; i < numBlocks; i++) {
  Block blk = new Block();
  blk.readFields(in);
  blocks[i] = blk;
 }
 return blocks;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 Block b = new Block(super.nextValue());
 // There may be an occasional conflict with randomly generated
 // block IDs. Skip over the conflicts.
 while(isValidBlock(b)) {
  b.setBlockId(super.nextValue());
 }
 if (b.getBlockId() < 0) {
  throw new IllegalStateException("All positive block IDs are used, " +
    "wrapping to negative IDs, " +
    "which might conflict with erasure coded block groups.");
 }
 return b.getBlockId();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

throws IOException {
ReplicaInfo newReplica = null;
long blockId = block.getBlockId();
long genStamp = block.getGenerationStamp();
if (isFinalized) {
 newReplica = new ReplicaBuilder(ReplicaState.FINALIZED)
   .setBlockId(blockId)
   .setLength(block.getNumBytes())
   .setGenerationStamp(genStamp)
   .setFsVolume(volume)
   .build();
} else {
 File file = new File(rbwDir, block.getBlockName());
 boolean loadRwr = true;
 File restartMeta = new File(file.getParent()  +

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

static File moveBlockFiles(Block b, ReplicaInfo replicaInfo, File destdir)
  throws IOException {
 final File dstfile = new File(destdir, b.getBlockName());
 final File dstmeta = FsDatasetUtil.getMetaFile(dstfile, b.getGenerationStamp());
 try {
  replicaInfo.renameMeta(dstmeta.toURI());
 } catch (IOException e) {
  throw new IOException("Failed to move meta file for " + b
    + " from " + replicaInfo.getMetadataURI() + " to " + dstmeta, e);
 }
 try {
  replicaInfo.renameData(dstfile.toURI());
 } catch (IOException e) {
  throw new IOException("Failed to move block file for " + b
    + " from " + replicaInfo.getBlockURI() + " to "
    + dstfile.getAbsolutePath(), e);
 }
 if (LOG.isDebugEnabled()) {
  LOG.debug("addFinalizedBlock: Moved " + replicaInfo.getMetadataURI()
    + " to " + dstmeta + " and " + replicaInfo.getBlockURI()
    + " to " + dstfile);
 }
 return dstfile;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public static Block[] readCompactBlockArray(
  DataInput in, int logVersion) throws IOException {
 int num = WritableUtils.readVInt(in);
 if (num < 0) {
  throw new IOException("Invalid block array length: " + num);
 }
 Block prev = null;
 Block[] ret = new Block[num];
 for (int i = 0; i < num; i++) {
  long id = in.readLong();
  long sz = WritableUtils.readVLong(in) +
    ((prev != null) ? prev.getNumBytes() : 0);
  long gs = WritableUtils.readVLong(in) +
    ((prev != null) ? prev.getGenerationStamp() : 0);
  ret[i] = new Block(id, sz, gs);
  prev = ret[i];
 }
 return ret;
}

代码示例来源:origin: linkedin/dynamometer

@Override
synchronized public void setNumBytes(long length) {
 if (!finalized) {
  bytesRcvd = length;
 } else {
  theBlock.setNumBytes(length);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Create new block with a unique block id and a new generation stamp.
 * @param blockType is the file under striping or contiguous layout?
 */
Block createNewBlock(BlockType blockType) throws IOException {
 assert hasWriteLock();
 Block b = new Block(nextBlockId(blockType), 0, 0);
 // Increment the generation stamp for every new block.
 b.setGenerationStamp(nextGenerationStamp(false));
 return b;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

@Override
public Block getStoredBlock(long blkid) throws IOException {
 Block b = new Block(blkid);
 BInfo binfo = blockMap.get(b);
 if (binfo == null) {
  return null;
 }
 b.setGenerationStamp(binfo.getGenerationStamp());
 b.setNumBytes(binfo.getNumBytes());
 return b;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public void store(FileRegion token) throws IOException {
 final Block block = token.getBlock();
 final ProvidedStorageLocation psl = token.getProvidedStorageLocation();
 out.append(String.valueOf(block.getBlockId())).append(delim);
 out.append(psl.getPath().toString()).append(delim);
 out.append(Long.toString(psl.getOffset())).append(delim);
 out.append(Long.toString(psl.getLength())).append(delim);
 out.append(Long.toString(block.getGenerationStamp()));
 if (psl.getNonce().length > 0) {
  out.append(delim)
    .append(Base64.getEncoder().encodeToString(psl.getNonce()));
 }
 out.append("\n");
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

private void corruptBlockGS(final Block block)
 throws IOException {
 if (block == null) {
  throw new IOException("Block isn't suppose to be null");
 }
 long oldGS = block.getGenerationStamp();
 long newGS = oldGS - rand.nextLong();
 assertTrue("Old and new GS shouldn't be the same",
  block.getGenerationStamp() != newGS);
 block.setGenerationStamp(newGS);
 if(LOG.isDebugEnabled()) {
  LOG.debug("Generation stamp of " + block.getBlockName() +
    " is changed to " + block.getGenerationStamp() + " from " + oldGS);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

continue;
if (!Block.isBlockFilename(file)) {
 if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
  long blockId = Block.getBlockId(file.getName());
  verifyFileLocation(file.getParentFile(), bpFinalizedDir,
    blockId);
long blockId = Block.filename2id(file.getName());
File metaFile = null;

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (!Block.isBlockFilename(file)) {
 continue;
long blockId = Block.filename2id(file.getName());
Block block = new Block(blockId, file.length(), genStamp);
addReplicaToReplicasMap(block, volumeMap, lazyWriteReplicaMap,
  isFinalized);

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public static void getBlockInfo(Block b, long[] blockArray, int index){
 b.setBlockId(blockArray[index2BlockId(index)]);
 b.setNumBytes(blockArray[index2BlockLen(index)]);
 b.setGenerationStamp(blockArray[index2BlockGenStamp(index)]);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Allocate a block at the given pending filename
 *
 * @param src    path to the file
 * @param inodes INode representing each of the components of src.
 *               <code>inodes[inodes.length-1]</code> is the INode for the file.
 */
private Block allocateBlock(String src, INode[] inodes) throws IOException {
 Block b = new Block(FSNamesystem.randBlockId.nextLong(), 0, 0);
 while (isValidBlock(b)) {
  b.setBlockId(FSNamesystem.randBlockId.nextLong());
 }
 b.setGenerationStamp(getGenerationStamp());
 b = dir.addBlock(src, inodes, b);
 return b;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public BlockInfo getStoredBlock(Block block) {
 if (!BlockIdManager.isStripedBlockID(block.getBlockId())) {
  return blocksMap.getStoredBlock(block);
 }
 if (!hasNonEcBlockUsingStripedID) {
  return blocksMap.getStoredBlock(
    new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
 }
 BlockInfo info = blocksMap.getStoredBlock(block);
 if (info != null) {
  return info;
 }
 return blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public boolean equals(Object o) {
 return super.equals(o);
}
@Override

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public FileRegion(long blockId, Path path, long offset,
         long length, long genStamp, byte[] nonce) {
 this(new Block(blockId, length, genStamp),
     new ProvidedStorageLocation(path, offset, length, nonce));
}

相关文章