org.apache.hadoop.hbase.client.Admin.majorCompact()方法的使用及代码示例

x33g5p2x  于2022-01-15 转载在 其他  
字(11.8k)|赞(0)|评价(0)|浏览(166)

本文整理了Java中org.apache.hadoop.hbase.client.Admin.majorCompact()方法的一些代码示例,展示了Admin.majorCompact()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Admin.majorCompact()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Admin
类名称:Admin
方法名:majorCompact

Admin.majorCompact介绍

[英]Major compact a table. Asynchronous operation.
[中]少校整理了一张桌子。异步操作。

代码示例

代码示例来源:origin: apache/hbase

@Override
 public Void call() throws Exception {
  try (Connection conn = getConnection()) {
   conn.getAdmin().majorCompact(tn);
   return null;
  }
 }
});

代码示例来源:origin: apache/hbase

@Override
 public Void call() throws Exception {
  try (Connection conn = getConnection()) {
   conn.getAdmin().majorCompact(tn);
   return null;
  }
 }
});

代码示例来源:origin: apache/hbase

@Override
 public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  Admin admin = util.getAdmin();
  boolean major = RandomUtils.nextInt(0, 100) < majorRatio;

  LOG.info("Performing action: Compact table " + tableName + ", major=" + major);
  try {
   if (major) {
    admin.majorCompact(tableName);
   } else {
    admin.compact(tableName);
   }
  } catch (Exception ex) {
   LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
   Thread.sleep(sleepTime);
  }
 }
}

代码示例来源:origin: apache/hbase

private long testCompactionWithoutThroughputLimit() throws Exception {
 Configuration conf = TEST_UTIL.getConfiguration();
 conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
 conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
 conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
 conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
 conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
  NoLimitThroughputController.class.getName());
 TEST_UTIL.startMiniCluster(1);
 try {
  HStore store = prepareData();
  assertEquals(10, store.getStorefilesCount());
  long startTime = System.currentTimeMillis();
  TEST_UTIL.getAdmin().majorCompact(tableName);
  while (store.getStorefilesCount() != 1) {
   Thread.sleep(20);
  }
  return System.currentTimeMillis() - startTime;
 } finally {
  TEST_UTIL.shutdownMiniCluster();
 }
}

代码示例来源:origin: apache/hbase

@Override
 public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  Admin admin = util.getAdmin();
  boolean major = RandomUtils.nextInt(0, 100) < majorRatio;

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
   return;
  }

  LOG.info("Performing action: Compact mob of table " + tableName + ", major=" + major);
  try {
   if (major) {
    admin.majorCompact(tableName, CompactType.MOB);
   } else {
    admin.compact(tableName, CompactType.MOB);
   }
  } catch (Exception ex) {
   LOG.warn("Mob Compaction failed, might be caused by other chaos: " + ex.getMessage());
  }
  if (sleepTime > 0) {
   Thread.sleep(sleepTime);
  }
 }
}

代码示例来源:origin: apache/hbase

private void compactAndWait() throws IOException, InterruptedException {
 LOG.debug("Compacting table " + tableName);
 HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
 Admin admin = TEST_UTIL.getAdmin();
 admin.majorCompact(tableName);
 // Waiting for the compaction to start, at least .5s.
 final long maxWaitime = System.currentTimeMillis() + 500;
 boolean cont;
 do {
  cont = rs.compactSplitThread.getCompactionQueueSize() == 0;
  Threads.sleep(1);
 } while (cont && System.currentTimeMillis() < maxWaitime);
 while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
  Threads.sleep(1);
 }
 LOG.debug("Compaction queue size reached 0, continuing");
}

代码示例来源:origin: apache/hbase

assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
 Thread.sleep(20);

代码示例来源:origin: apache/hbase

@Test
public void testNoCompactions() throws Exception {
 Put p = new Put(Bytes.toBytes("to_reject"));
 p.addColumn(
   Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), Bytes.toBytes("reject"));
 final TableName tn = writeUntilViolationAndVerifyViolation(
   SpaceViolationPolicy.NO_WRITES_COMPACTIONS, p);
 // We know the policy is active at this point
 // Major compactions should be rejected
 try {
  TEST_UTIL.getAdmin().majorCompact(tn);
  fail("Expected that invoking the compaction should throw an Exception");
 } catch (DoNotRetryIOException e) {
  // Expected!
 }
 // Minor compactions should also be rejected.
 try {
  TEST_UTIL.getAdmin().compact(tn);
  fail("Expected that invoking the compaction should throw an Exception");
 } catch (DoNotRetryIOException e) {
  // Expected!
 }
}

代码示例来源:origin: apache/hbase

assertEquals("Before compaction: mob value of k0", newValue0,
 Bytes.toString(CellUtil.cloneValue(cell)));
admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB);
waitUntilMobCompactionFinished(tableName);

代码示例来源:origin: apache/hbase

TEST_UTIL.getAdmin().majorCompact(tableName);

代码示例来源:origin: apache/hbase

/**
 * Verify that deleting the snapshot does not affect either table.
 */
private void runTestSnapshotDeleteIndependent() throws Exception {
 // Ensure the original table does not reference the HFiles anymore
 admin.majorCompact(originalTableName);
 // Deleting the snapshot used to break the cloned table by deleting in-use HFiles
 admin.deleteSnapshot(snapshotName);
 // Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted
 Pattern pattern = Pattern.compile(snapshotNameAsString);
 do {
  Thread.sleep(5000);
 } while (!admin.listSnapshots(pattern).isEmpty());
 try (Table original = UTIL.getConnection().getTable(originalTableName)) {
  try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
   // Verify that all regions of both tables are readable
   final int origTableRowCount = countRows(original);
   final int clonedTableRowCount = countRows(clonedTable);
   Assert.assertEquals(origTableRowCount, clonedTableRowCount);
  }
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testInvalidColumnFamily() throws IOException, InterruptedException {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 byte [] family = Bytes.toBytes("family");
 byte [] fakecf = Bytes.toBytes("fakecf");
 boolean caughtMinorCompact = false;
 boolean caughtMajorCompact = false;
 Table ht = null;
 try {
  ht = TEST_UTIL.createTable(tableName, family);
  Admin admin = TEST_UTIL.getAdmin();
  try {
   admin.compact(tableName, fakecf);
  } catch (IOException ioe) {
   caughtMinorCompact = true;
  }
  try {
   admin.majorCompact(tableName, fakecf);
  } catch (IOException ioe) {
   caughtMajorCompact = true;
  }
 } finally {
  if (ht != null) {
   TEST_UTIL.deleteTable(tableName);
  }
  assertTrue(caughtMinorCompact);
  assertTrue(caughtMajorCompact);
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testPurgeExpiredFiles() throws Exception {
 HStore store = prepareData();
 assertEquals(10, store.getStorefilesCount());
 TEST_UTIL.getAdmin().majorCompact(tableName);
 TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
  @Override
  public boolean evaluate() throws Exception {
   return store.getStorefilesCount() == 1;
  }
  @Override
  public String explainFailure() throws Exception {
   return "The store file count " + store.getStorefilesCount() + " is still greater than 1";
  }
 });
}

代码示例来源:origin: apache/hbase

/**
 * @throws java.lang.Exception
 */
@Before
public void setUp() throws Exception {
 table = TEST_UTIL.createTable(TABLENAME, FAMILY);
 // future timestamp
 for (int i = 0; i < numRows; i++) {
  long ts = System.currentTimeMillis() * 2;
  Put put = new Put(ROW, ts);
  put.addColumn(FAMILY, COLUMN, VALUE);
  table.put(put);
 }
 // major compaction, purged future deletes
 TEST_UTIL.getAdmin().flush(TABLENAME);
 TEST_UTIL.getAdmin().majorCompact(TABLENAME);
 // waiting for the major compaction to complete
 TEST_UTIL.waitFor(6000, new Waiter.Predicate<IOException>() {
  @Override
  public boolean evaluate() throws IOException {
   return TEST_UTIL.getAdmin().getCompactionState(TABLENAME) ==
     CompactionState.NONE;
  }
 });
 table.close();
}

代码示例来源:origin: apache/hbase

admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB);
} else {
 admin.compact(tableName, hcd1.getName(), CompactType.MOB);

代码示例来源:origin: apache/hbase

@Override
@Test
public void loadTest() throws Exception {
 Admin admin = TEST_UTIL.getAdmin();
 compression = Compression.Algorithm.GZ; // used for table setup
 super.loadTest();
 HColumnDescriptor hcd = getColumnDesc(admin);
 System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
 Table t = TEST_UTIL.getConnection().getTable(TABLE);
 assertAllOnLine(t);
 admin.disableTable(TABLE);
 admin.modifyColumnFamily(TABLE, hcd);
 System.err.println("\nRe-enabling table\n");
 admin.enableTable(TABLE);
 System.err.println("\nNew column descriptor: " +
   getColumnDesc(admin) + "\n");
 // The table may not have all regions on line yet.  Assert online before
 // moving to major compact.
 assertAllOnLine(t);
 System.err.println("\nCompacting the table\n");
 admin.majorCompact(TABLE);
 // Wait until compaction completes
 Threads.sleepWithoutInterrupt(5000);
 HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
 while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
  Threads.sleep(50);
 }
 System.err.println("\nDone with the test, shutting down the cluster\n");
}

代码示例来源:origin: apache/hbase

@Test
 public void test() throws IOException, KeeperException, InterruptedException {
  long now = System.currentTimeMillis();
  put(0, 100, now - 10000);
  assertValueEquals(0, 100);

  setExpireBefore(now - 5000);
  Thread.sleep(5000);
  UTIL.getAdmin().flush(NAME);
  assertNotExists(0, 100);

  put(0, 50, now - 1000);
  UTIL.getAdmin().flush(NAME);
  put(50, 100, now - 100);
  UTIL.getAdmin().flush(NAME);
  assertValueEquals(0, 100);

  setExpireBefore(now - 500);
  Thread.sleep(5000);
  UTIL.getAdmin().majorCompact(NAME);
  UTIL.waitFor(30000, () -> UTIL.getHBaseCluster().getRegions(NAME).iterator().next()
    .getStore(FAMILY).getStorefilesCount() == 1);
  assertNotExists(0, 50);
  assertValueEquals(50, 100);
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testRawScanAndMajorCompaction() throws IOException {
 try (Table t = createTable()) {
  t.put(new Put(ROW).addColumn(FAMILY, col1, 1000001, value));
  t.put(new Put(ROW).addColumn(FAMILY, col1, 1000002, value));
  t.put(new Put(ROW).addColumn(FAMILY, col1, 1000003, value));
  t.put(new Put(ROW).addColumn(FAMILY, col1, 1000004, value));
  t.delete(new Delete(ROW).addColumn(FAMILY, col1, 1000004));
  t.delete(new Delete(ROW).addColumn(FAMILY, col1, 1000003));
  try (ResultScanner scannner = t.getScanner(new Scan().setRaw(true).setMaxVersions())) {
   Result r = scannner.next();
   assertNull(scannner.next());
   assertEquals(6, r.size());
  }
  TEST_UTIL.getAdmin().flush(t.getName());
  try (ResultScanner scannner = t.getScanner(new Scan().setRaw(true).setMaxVersions())) {
   Result r = scannner.next();
   assertNull(scannner.next());
   assertEquals(6, r.size());
  }
  TEST_UTIL.getAdmin().majorCompact(t.getName());
  Threads.sleep(5000);
  try (ResultScanner scannner = t.getScanner(new Scan().setRaw(true).setMaxVersions())) {
   Result r = scannner.next();
   assertNull(scannner.next());
   assertEquals(1, r.size());
   assertEquals(1000002, r.rawCells()[0].getTimestamp());
  }
 }
}

代码示例来源:origin: apache/hbase

ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
UTIL.getAdmin().majorCompact(tableName);

代码示例来源:origin: apache/hbase

@Test
public void testMajorCompaction() throws Exception {
 TableName tn = helper.createTableWithRegions(1);
 // Set a quota
 QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(
   tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
 admin.setQuota(settings);
 // Write some data and flush it to disk.
 final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
 helper.writeData(tn, sizePerBatch);
 admin.flush(tn);
 // Write the same data again, flushing it to a second file
 helper.writeData(tn, sizePerBatch);
 admin.flush(tn);
 // After two flushes, both hfiles would contain similar data. We should see 2x the data.
 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
  @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
   return snapshot.getUsage() >= 2L * sizePerBatch;
  }
 });
 // Rewrite the two files into one.
 admin.majorCompact(tn);
 // After we major compact the table, we should notice quickly that the amount of data in the
 // table is much closer to reality (the duplicate entries across the two files are removed).
 TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
  @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
   return snapshot.getUsage() >= sizePerBatch && snapshot.getUsage() <= 2L * sizePerBatch;
  }
 });
}

相关文章

Admin类方法