org.apache.hadoop.hdfs.protocol.Block.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(8.1k)|赞(0)|评价(0)|浏览(226)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.<init>()方法的一些代码示例,展示了Block.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:<init>

Block.<init>介绍

[英]Find the blockid from the given filename
[中]从给定的文件名中查找块ID

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public FileRegion(long blockId, Path path, long offset,
         long length, long genStamp, byte[] nonce) {
 this(new Block(blockId, length, genStamp),
     new ProvidedStorageLocation(path, offset, length, nonce));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static Block[] readBlocks(
  DataInputStream in,
  int logVersion) throws IOException {
 int numBlocks = in.readInt();
 if (numBlocks < 0) {
  throw new IOException("invalid negative number of blocks");
 } else if (numBlocks > MAX_BLOCKS) {
  throw new IOException("invalid number of blocks: " + numBlocks +
    ".  The maximum number of blocks per file is " + MAX_BLOCKS);
 }
 Block[] blocks = new Block[numBlocks];
 for (int i = 0; i < numBlocks; i++) {
  Block blk = new Block();
  blk.readFields(in);
  blocks[i] = blk;
 }
 return blocks;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

void enqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
  ReplicaState reportedState) {
 block = new Block(block);
 getBlockQueue(block).add(
   new ReportedBlockInfo(storageInfo, block, reportedState));
 count++;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public static Block blockFromXml(Stanza st)
  throws InvalidXmlException {
 long blockId = Long.parseLong(st.getValue("BLOCK_ID"));
 long numBytes = Long.parseLong(st.getValue("NUM_BYTES"));
 long generationStamp = Long.parseLong(st.getValue("GENSTAMP"));
 return new Block(blockId, numBytes, generationStamp);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Identify the block stored in the given datanode storage. Note that
 * the returned block has the same block Id with the one seen/reported by the
 * DataNode.
 */
Block getBlockOnStorage(DatanodeStorageInfo storage) {
 int index = getStorageBlockIndex(storage);
 if (index < 0) {
  return null;
 } else {
  Block block = new Block(this);
  block.setBlockId(this.getBlockId() + index);
  return block;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

toUC.add(new StatefulBlockInfo(storedBlock, new Block(replica),
   reportedState));
} else if (reportedState == ReplicaState.FINALIZED &&
 toAdd.add(new BlockInfoToAdd(storedBlock, new Block(replica)));

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public static Block[] readCompactBlockArray(
  DataInput in, int logVersion) throws IOException {
 int num = WritableUtils.readVInt(in);
 if (num < 0) {
  throw new IOException("Invalid block array length: " + num);
 }
 Block prev = null;
 Block[] ret = new Block[num];
 for (int i = 0; i < num; i++) {
  long id = in.readLong();
  long sz = WritableUtils.readVLong(in) +
    ((prev != null) ? prev.getNumBytes() : 0);
  long gs = WritableUtils.readVLong(in) +
    ((prev != null) ? prev.getGenerationStamp() : 0);
  ret[i] = new Block(id, sz, gs);
  prev = ret[i];
 }
 return ret;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public Block getStoredBlock(String bpid, long blkid)
  throws IOException {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  ReplicaInfo r = volumeMap.get(bpid, blkid);
  if (r == null) {
   return null;
  }
  return new Block(blkid, r.getBytesOnDisk(), r.getGenerationStamp());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Create new block with a unique block id and a new generation stamp.
 * @param blockType is the file under striping or contiguous layout?
 */
Block createNewBlock(BlockType blockType) throws IOException {
 assert hasWriteLock();
 Block b = new Block(nextBlockId(blockType), 0, 0);
 // Increment the generation stamp for every new block.
 b.setGenerationStamp(nextGenerationStamp(false));
 return b;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

static LocatedBlock makeLocatedBlock(FSNamesystem fsn, BlockInfo blk,
  DatanodeStorageInfo[] locs, long offset) throws IOException {
 LocatedBlock lBlk = BlockManager.newLocatedBlock(
   fsn.getExtendedBlock(new Block(blk)), blk, locs, offset);
 fsn.getBlockManager().setBlockToken(lBlk,
   BlockTokenIdentifier.AccessMode.WRITE);
 return lBlk;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/** Set expected locations */
public void setExpectedLocations(Block block, DatanodeStorageInfo[] targets,
  BlockType blockType) {
 if (targets == null) {
  return;
 }
 int numLocations = 0;
 for (DatanodeStorageInfo target : targets) {
  if (target != null) {
   numLocations++;
  }
 }
 this.replicas = new ReplicaUnderConstruction[numLocations];
 int offset = 0;
 for(int i = 0; i < targets.length; i++) {
  if (targets[i] != null) {
   // when creating a new striped block we simply sequentially assign block
   // index to each storage
   Block replicaBlock = blockType == BlockType.STRIPED ?
     new Block(block.getBlockId() + i, 0, block.getGenerationStamp()) :
     block;
   replicas[offset++] = new ReplicaUnderConstruction(replicaBlock,
     targets[i], ReplicaState.RBW);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 Block b = new Block(super.nextValue());
 // There may be an occasional conflict with randomly generated
 // block IDs. Skip over the conflicts.
 while(isValidBlock(b)) {
  b.setBlockId(super.nextValue());
 }
 if (b.getBlockId() < 0) {
  throw new IllegalStateException("All positive block IDs are used, " +
    "wrapping to negative IDs, " +
    "which might conflict with erasure coded block groups.");
 }
 return b.getBlockId();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public BlockInfo getStoredBlock(Block block) {
 if (!BlockIdManager.isStripedBlockID(block.getBlockId())) {
  return blocksMap.getStoredBlock(block);
 }
 if (!hasNonEcBlockUsingStripedID) {
  return blocksMap.getStoredBlock(
    new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
 }
 BlockInfo info = blocksMap.getStoredBlock(block);
 if (info != null) {
  return info;
 }
 return blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

files, file, i);
long blockId = Block.filename2id(file.getName());
Block block = new Block(blockId, file.length(), genStamp);
addReplicaToReplicasMap(block, volumeMap, lazyWriteReplicaMap,
  isFinalized);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void createReplicationWork(int sourceIndex,
  DatanodeStorageInfo target) {
 BlockInfoStriped stripedBlk = (BlockInfoStriped) getBlock();
 final byte blockIndex = liveBlockIndicies[sourceIndex];
 final DatanodeDescriptor source = getSrcNodes()[sourceIndex];
 final long internBlkLen = StripedBlockUtil.getInternalBlockLength(
   stripedBlk.getNumBytes(), stripedBlk.getCellSize(),
   stripedBlk.getDataBlockNum(), blockIndex);
 final Block targetBlk = new Block(stripedBlk.getBlockId() + blockIndex,
   internBlkLen, stripedBlk.getGenerationStamp());
 source.addBlockToBeReplicated(targetBlk,
   new DatanodeStorageInfo[] {target});
 if (BlockManager.LOG.isDebugEnabled()) {
  BlockManager.LOG.debug("Add replication task from source {} to "
    + "target {} for EC block {}", source, target, targetBlk);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

Block blk = new Block();
int i = 0;
for (; i < numBlocks - 1; i++) {

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public DBlock getInternalBlock(StorageGroup storage) {
 int idxInLocs = locations.indexOf(storage);
 if (idxInLocs == -1) {
  return null;
 }
 byte idxInGroup = indices[idxInLocs];
 long blkId = getBlock().getBlockId() + idxInGroup;
 long numBytes = getInternalBlockLength(getNumBytes(), cellSize,
   dataBlockNum, idxInGroup);
 Block blk = new Block(getBlock());
 blk.setBlockId(blkId);
 blk.setNumBytes(numBytes);
 DBlock dblk = new DBlock(blk);
 dblk.addLocation(storage);
 return dblk;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 skipTo((getCurrentValue() & ~BLOCK_GROUP_INDEX_MASK) + MAX_BLOCKS_IN_GROUP);
 // Make sure there's no conflict with existing random block IDs
 final Block b = new Block(getCurrentValue());
 while (hasValidBlockInRange(b)) {
  skipTo(getCurrentValue() + MAX_BLOCKS_IN_GROUP);
  b.setBlockId(getCurrentValue());
 }
 if (b.getBlockId() >= 0) {
  throw new IllegalStateException("All negative block group IDs are used, "
    + "growing into positive IDs, "
    + "which might conflict with non-erasure coded blocks.");
 }
 return getCurrentValue();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

long numBytes = StripedBlockUtil.getInternalBlockLength(
  extBlock.getNumBytes(), ecPolicy, blkIndex);
Block blk = new Block(ExtendedBlock.getLocalBlock(extBlock));
long blkId = blk.getBlockId() + blkIndex;
blk.setBlockId(blkId);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

Block corrupted = new Block(b.getCorrupted());
if (b.getStored().isStriped()) {
 corrupted.setBlockId(b.getStored().getBlockId());

相关文章