org.apache.hadoop.hbase.HBaseTestingUtility.deleteTable()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(11.9k)|赞(0)|评价(0)|浏览(140)

本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.deleteTable()方法的一些代码示例,展示了HBaseTestingUtility.deleteTable()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.deleteTable()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:deleteTable

HBaseTestingUtility.deleteTable介绍

[英]Drop an existing table
[中]删除现有表

代码示例

代码示例来源:origin: apache/hbase

@Test
public void testCreateAndDrop() throws Exception {
 TEST_UTIL.createTable(tableName, Bytes.toBytes("cf"));
 // wait for created table to be assigned
 TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
  @Override
  public boolean evaluate() throws Exception {
   return getTableRegionMap().get(tableName) != null;
  }
 });
 TEST_UTIL.deleteTable(tableName);
}

代码示例来源:origin: apache/hbase

@Test
public void testDryModeWithoutBulkOutputAndTableExists() throws Exception {
 util.createTable(tn, FAMILY);
 args.put(ImportTsv.DRY_RUN_CONF_KEY, "true");
 doMROnTableTest(null, 1);
 // Dry mode should not delete an existing table. If it's not present,
 // this will throw TableNotFoundException.
 util.deleteTable(tn);
}

代码示例来源:origin: apache/hbase

@Test
public void testSkipEmptyColumns() throws Exception {
 Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
 args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
 args.put(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,HBASE_TS_KEY,FAM:A,FAM:B");
 args.put(ImportTsv.SEPARATOR_CONF_KEY, ",");
 args.put(ImportTsv.SKIP_EMPTY_COLUMNS, "true");
 // 2 Rows of data as input. Both rows are valid and only 3 columns are no-empty among 4
 String data = "KEY,1234,VALUE1,VALUE2\nKEY,1235,,VALUE2\n";
 doMROnTableTest(util, tn, FAMILY, data, args, 1, 3);
 util.deleteTable(tn);
}

代码示例来源:origin: apache/hbase

@Test
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
 TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
 admin.cloneSnapshot(snapshotName0, clonedTableName);
 verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
 admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotType.FLUSH);
 UTIL.deleteTable(clonedTableName);
 admin.cloneSnapshot(snapshotName2, clonedTableName);
 verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
 UTIL.deleteTable(clonedTableName);
}

代码示例来源:origin: apache/hbase

@Test
public void testCreateDeleteTable() throws IOException {
 // Create table then get the single region for our new table.
 HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
 hdt.setRegionReplication(NB_SERVERS);
 hdt.addCoprocessor(SlowMeCopro.class.getName());
 Table table = HTU.createTable(hdt, new byte[][]{f}, null);
 Put p = new Put(row);
 p.addColumn(f, row, row);
 table.put(p);
 Get g = new Get(row);
 Result r = table.get(g);
 Assert.assertFalse(r.isStale());
 try {
  // But if we ask for stale we will get it
  SlowMeCopro.cdl.set(new CountDownLatch(1));
  g = new Get(row);
  g.setConsistency(Consistency.TIMELINE);
  r = table.get(g);
  Assert.assertTrue(r.isStale());
  SlowMeCopro.cdl.get().countDown();
 } finally {
  SlowMeCopro.cdl.get().countDown();
  SlowMeCopro.sleepTime.set(0);
 }
 HTU.getAdmin().disableTable(hdt.getTableName());
 HTU.deleteTable(hdt.getTableName());
}

代码示例来源:origin: apache/hbase

private void setQuotaAndThenDropTable(SpaceViolationPolicy policy) throws Exception {
 Put put = new Put(Bytes.toBytes("to_reject"));
 put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
  Bytes.toBytes("reject"));
 // Do puts until we violate space policy
 final TableName tn = writeUntilViolationAndVerifyViolation(policy, put);
 // Now, drop the table
 TEST_UTIL.deleteTable(tn);
 LOG.debug("Successfully deleted table ", tn);
 // Now re-create the table
 TEST_UTIL.createTable(tn, Bytes.toBytes(SpaceQuotaHelperForTests.F1));
 LOG.debug("Successfully re-created table ", tn);
 // Put some rows now: should not violate as table/quota was dropped
 verifyNoViolation(policy, tn, put);
}

代码示例来源:origin: apache/hbase

@Before
public void beforeMethod() throws Exception {
 Admin admin = TEST_UTIL.getAdmin();
 if (admin.tableExists(TABLE_NAME)) {
  TEST_UTIL.deleteTable(TABLE_NAME);
 }
 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
 htd.addFamily(new HColumnDescriptor(CFA));
 htd.addFamily(new HColumnDescriptor(CFB));
 admin.createTable(htd);
}

代码示例来源:origin: apache/hbase

private long testFlushWithThroughputLimit() throws Exception {
 final long throughputLimit = 1024 * 1024;
 setMaxMinThroughputs(throughputLimit, throughputLimit);
 Configuration conf = hbtu.getConfiguration();
 conf.setLong(
  PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL,
  throughputLimit);
 hbtu.startMiniCluster(1);
 Table table = hbtu.createTable(tableName, family);
 Pair<Double, Long> result = generateAndFlushData(table);
 hbtu.deleteTable(tableName);
 LOG.debug("Throughput is: " + (result.getFirst() / 1024 / 1024) + " MB/s");
 // confirm that the speed limit work properly(not too fast, and also not too slow)
 // 20% is the max acceptable error rate.
 assertTrue(result.getFirst()  < throughputLimit * 1.2);
 assertTrue(result.getFirst() > throughputLimit * 0.8);
 return result.getSecond();
}

代码示例来源:origin: apache/hbase

@Test
public void testExecProcedure() throws Exception {
 String snapshotString = "offlineTableSnapshot";
 try {
  Table table = TEST_UTIL.createTable(tableName, Bytes.toBytes("cf"));
  for (int i = 0; i < 100; i++) {
   Put put = new Put(Bytes.toBytes(i)).addColumn(Bytes.toBytes("cf"), null, Bytes.toBytes(i));
   table.put(put);
  }
  // take a snapshot of the enabled table
  Map<String, String> props = new HashMap<>();
  props.put("table", tableName.getNameAsString());
  admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION, snapshotString,
   props).get();
  LOG.debug("Snapshot completed.");
 } finally {
  admin.deleteSnapshot(snapshotString).join();
  TEST_UTIL.deleteTable(tableName);
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testBulkOutputWithTsvImporterTextMapper() throws Exception {
 final TableName table = TableName.valueOf(name.getMethodName() + util.getRandomUUID());
 String FAMILY = "FAM";
 Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()),"hfiles");
 // Prepare the arguments required for the test.
 String[] args =
   new String[] {
     "-D" + ImportTsv.MAPPER_CONF_KEY
       + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper",
     "-D" + ImportTsv.COLUMNS_CONF_KEY
       + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
     "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
     "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(),
     table.getNameAsString()
     };
 String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n";
 doMROnTableTest(util, FAMILY, data, args, 4);
 util.deleteTable(table);
}

代码示例来源:origin: apache/hbase

@Test
public void testMROnTableWithCustomMapper()
throws Exception {
 util.createTable(tn, FAMILY);
 args.put(ImportTsv.MAPPER_CONF_KEY,
   "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper");
 doMROnTableTest(null, 3);
 util.deleteTable(tn);
}

代码示例来源:origin: apache/hbase

@Test
public void testBulkOutputWithTsvImporterTextMapper() throws Exception {
 Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles");
 args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper");
 args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
 String data = "KEY\u001bVALUE4\u001bVALUE8\n";
 doMROnTableTest(data, 4);
 util.deleteTable(tn);
}

代码示例来源:origin: apache/hbase

@Test
public void testReplicaGetWithPrimaryDown() throws IOException {
 // Create table then get the single region for our new table.
 HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
 hdt.setRegionReplication(NB_SERVERS);
 hdt.addCoprocessor(RegionServerStoppedCopro.class.getName());
 try {
  Table table = HTU.createTable(hdt, new byte[][] { f }, null);
  Put p = new Put(row);
  p.addColumn(f, row, row);
  table.put(p);
  // Flush so it can be picked by the replica refresher thread
  HTU.flush(table.getName());
  // Sleep for some time until data is picked up by replicas
  try {
   Thread.sleep(2 * REFRESH_PERIOD);
  } catch (InterruptedException e1) {
   LOG.error(e1.toString(), e1);
  }
  // But if we ask for stale we will get it
  Get g = new Get(row);
  g.setConsistency(Consistency.TIMELINE);
  Result r = table.get(g);
  Assert.assertTrue(r.isStale());
 } finally {
  HTU.getAdmin().disableTable(hdt.getTableName());
  HTU.deleteTable(hdt.getTableName());
 }
}

代码示例来源:origin: apache/hbase

try {
 LOG.debug("Ensuring table doesn't exist.");
 util.deleteTable(tableName);
} catch(Exception ex) {
 util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
} else {
 util.createTable(tableName, FAMILIES);
byte[] value = Bytes.toBytes("after_snapshot_value");
util.loadTable(table, FAMILIES, value);

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 // Create a pre-split table just to populate some regions
 TableName tableName = TableName.valueOf("testRegionMover");
 Admin admin = TEST_UTIL.getAdmin();
 if (admin.tableExists(tableName)) {
  TEST_UTIL.deleteTable(tableName);
 }
 TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
  .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build();
 String startKey = "a";
 String endKey = "z";
 admin.createTable(tableDesc, startKey.getBytes(), endKey.getBytes(), 9);
}

代码示例来源:origin: apache/hbase

@Test
public void testCreateTable() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys);
 TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
 // All regions should have favored nodes
 checkIfFavoredNodeInformationIsCorrect(tableName);
 List<HRegionInfo> regions = admin.getTableRegions(tableName);
 TEST_UTIL.deleteTable(tableName);
 checkNoFNForDeletedTable(regions);
}

代码示例来源:origin: apache/hbase

@Test
public void testCloneSnapshotOfCloned() throws IOException, InterruptedException {
 TableName clonedTableName =
  TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
 admin.cloneSnapshot(snapshotName0, clonedTableName);
 verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
 SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
 admin.disableTable(clonedTableName);
 admin.snapshot(snapshotName2, clonedTableName);
 TEST_UTIL.deleteTable(clonedTableName);
 waitCleanerRun();
 admin.cloneSnapshot(snapshotName2, clonedTableName);
 verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
 SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
 TEST_UTIL.deleteTable(clonedTableName);
}

代码示例来源:origin: apache/hbase

@Test
public void testMROnTable() throws Exception {
 util.createTable(tn, FAMILY);
 doMROnTableTest(null, 1);
 util.deleteTable(tn);
}

代码示例来源:origin: apache/hbase

@Test
public void testBulkOutputWithoutAnExistingTable() throws Exception {
 // Prepare the arguments required for the test.
 Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
 args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString());
 doMROnTableTest(null, 3);
 util.deleteTable(tn);
}

代码示例来源:origin: apache/hbase

@Test
public void testGetRegionLocationFromPrimaryMetaRegion() throws IOException, InterruptedException {
 HTU.getAdmin().setBalancerRunning(false, true);
 ((ConnectionImplementation) HTU.getAdmin().getConnection()).setUseMetaReplicas(true);
 // Create table then get the single region for our new table.
 HTableDescriptor hdt = HTU.createTableDescriptor("testGetRegionLocationFromPrimaryMetaRegion");
 hdt.setRegionReplication(2);
 try {
  HTU.createTable(hdt, new byte[][] { f }, null);
  RegionServerHostingPrimayMetaRegionSlowOrStopCopro.slowDownPrimaryMetaScan = true;
  // Get user table location, always get it from the primary meta replica
  RegionLocations url = ((ClusterConnection) HTU.getConnection())
    .locateRegion(hdt.getTableName(), row, false, false);
 } finally {
  RegionServerHostingPrimayMetaRegionSlowOrStopCopro.slowDownPrimaryMetaScan = false;
  ((ConnectionImplementation) HTU.getAdmin().getConnection()).setUseMetaReplicas(false);
  HTU.getAdmin().setBalancerRunning(true, true);
  HTU.getAdmin().disableTable(hdt.getTableName());
  HTU.deleteTable(hdt.getTableName());
 }
}

相关文章

HBaseTestingUtility类方法