org.apache.hadoop.hbase.regionserver.HStore.createWriterInTmp()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(9.2k)|赞(0)|评价(0)|浏览(156)

本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.createWriterInTmp()方法的一些代码示例,展示了HStore.createWriterInTmp()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.createWriterInTmp()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:createWriterInTmp

HStore.createWriterInTmp介绍

暂无

代码示例

代码示例来源:origin: apache/hbase

/**
 * Creates a writer for a new file in a temporary directory.
 * @param fd The file details.
 * @return Writer for a new StoreFile in the tmp dir.
 * @throws IOException if creation failed
 */
protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind)
  throws IOException {
 // When all MVCC readpoints are 0, don't write them.
 // See HBASE-8166, HBASE-12600, and HBASE-13389.
 return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true,
 fd.maxMVCCReadpoint > 0, fd.maxTagsLength > 0, shouldDropBehind);
}

代码示例来源:origin: apache/hbase

@Override
 public StoreFileWriter createWriter(InternalScanner scanner,
   org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd,
   boolean shouldDropBehind) throws IOException {
  // make this writer with tags always because of possible new cells with tags.
  return store.createWriterInTmp(fd.maxKeyCount, compactionCompression, true, true, true,
   shouldDropBehind);
 }
};

代码示例来源:origin: apache/hbase

@Override
 public StoreFileWriter createWriter() throws IOException {
  StoreFileWriter writer = store.createWriterInTmp(kvCount,
    store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false);
  return writer;
 }
};

代码示例来源:origin: apache/hbase

private StripeCompactor createCompactor() throws Exception {
 HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo"));
 StoreFileWritersCapture writers = new StoreFileWritersCapture();
 HStore store = mock(HStore.class);
 HRegionInfo info = mock(HRegionInfo.class);
 when(info.getRegionNameAsString()).thenReturn("testRegion");
 when(store.getColumnFamilyDescriptor()).thenReturn(col);
 when(store.getRegionInfo()).thenReturn(info);
 when(
  store.createWriterInTmp(anyLong(), any(), anyBoolean(),
   anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
 Configuration conf = HBaseConfiguration.create();
 conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
 final Scanner scanner = new Scanner();
 return new StripeCompactor(conf, store) {
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs,
    byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
   return scanner;
  }
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs) throws IOException {
   return scanner;
  }
 };
}

代码示例来源:origin: apache/hbase

when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(), anyBoolean(),
 anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);

代码示例来源:origin: apache/hbase

when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(), anyBoolean(),
 anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);

代码示例来源:origin: apache/hbase

@Test
public void testCacheOnWriteInSchema() throws IOException {
 // Write some random data into the store
 StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE,
   HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false, false);
 writeStoreFile(writer);
 writer.close();
 // Verify the block types of interest were cached on write
 readStoreFile(writer.getPath());
}

代码示例来源:origin: apache/hbase

status.setStatus("Flushing " + store + ": creating writer");
writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(),
  false, true, true, false);
IOException e = null;

代码示例来源:origin: apache/hbase

status.setStatus("Flushing " + store + ": creating writer");
writer = store.createWriterInTmp(cellsCount,
  store.getColumnFamilyDescriptor().getCompressionType(), false, true,
  snapshot.isTagsPresent(), false);

代码示例来源:origin: apache/hbase

/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
 Configuration conf = HBaseConfiguration.create();
 FileSystem fs = FileSystem.get(conf);
 ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family)
   .setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF)
   .build();
 init(name.getMethodName(), conf, hcd);
 // Test createWriterInTmp()
 StoreFileWriter writer =
   store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false, false);
 Path path = writer.getPath();
 writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
 writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
 writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
 writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
 writer.close();
 // Verify that compression and encoding settings are respected
 HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), true, conf);
 assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
 assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
 reader.close();
}

代码示例来源:origin: harbby/presto-connectors

@Override
public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression,
                     boolean isCompaction, boolean includeMVCCReadpoint,
                     boolean includesTag)
  throws IOException {
 return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint,
   includesTag, false);
}

代码示例来源:origin: org.apache.hbase/hbase-server

private StripeCompactor createCompactor() throws Exception {
 HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo"));
 StoreFileWritersCapture writers = new StoreFileWritersCapture();
 HStore store = mock(HStore.class);
 HRegionInfo info = mock(HRegionInfo.class);
 when(info.getRegionNameAsString()).thenReturn("testRegion");
 when(store.getColumnFamilyDescriptor()).thenReturn(col);
 when(store.getRegionInfo()).thenReturn(info);
 when(
  store.createWriterInTmp(anyLong(), any(), anyBoolean(),
   anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
 Configuration conf = HBaseConfiguration.create();
 conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
 final Scanner scanner = new Scanner();
 return new StripeCompactor(conf, store) {
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs,
    byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
   return scanner;
  }
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs) throws IOException {
   return scanner;
  }
 };
}

代码示例来源:origin: org.apache.hbase/hbase-server

when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(), anyBoolean(),
 anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);

代码示例来源:origin: org.apache.hbase/hbase-server

when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(), anyBoolean(),
 anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);

代码示例来源:origin: org.apache.hbase/hbase-server

@Test
public void testCacheOnWriteInSchema() throws IOException {
 // Write some random data into the store
 StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE,
   HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false, false);
 writeStoreFile(writer);
 writer.close();
 // Verify the block types of interest were cached on write
 readStoreFile(writer.getPath());
}

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
 Configuration conf = HBaseConfiguration.create();
 FileSystem fs = FileSystem.get(conf);
 ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family)
   .setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF)
   .build();
 init(name.getMethodName(), conf, hcd);
 // Test createWriterInTmp()
 StoreFileWriter writer =
   store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false, false);
 Path path = writer.getPath();
 writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
 writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
 writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
 writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
 writer.close();
 // Verify that compression and encoding settings are respected
 HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), true, conf);
 assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
 assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
 reader.close();
}

相关文章

HStore类方法