本文整理了Java中org.apache.hadoop.hbase.client.Admin.deleteSnapshot()
方法的一些代码示例,展示了Admin.deleteSnapshot()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Admin.deleteSnapshot()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Admin
类名称:Admin
方法名:deleteSnapshot
[英]Delete an existing snapshot.
[中]删除现有快照。
代码示例来源:origin: apache/hbase
/**
* Delete HBase snapshot for backup.
* @param backupInfo backup info
* @throws IOException exception
*/
protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo,
Configuration conf) throws IOException {
LOG.debug("Trying to delete snapshot for full backup.");
for (String snapshotName : backupInfo.getSnapshotNames()) {
if (snapshotName == null) {
continue;
}
LOG.debug("Trying to delete snapshot: " + snapshotName);
try (Admin admin = conn.getAdmin()) {
admin.deleteSnapshot(snapshotName);
}
LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId()
+ " succeeded.");
}
}
代码示例来源:origin: apache/hbase
assertEquals(snapshot3, snapshots.get(0).getName());
admin.deleteSnapshot(snapshot3);
admin.close();
代码示例来源:origin: apache/hbase
public static void deleteSnapshot(Connection conn) throws IOException {
Configuration conf = conn.getConfiguration();
LOG.debug("Deleting " + BackupSystemTable.getSnapshotName(conf) + " from the system");
try (Admin admin = conn.getAdmin()) {
String snapshotName = BackupSystemTable.getSnapshotName(conf);
if (snapshotExists(admin, snapshotName)) {
admin.deleteSnapshot(snapshotName);
LOG.debug("Done deleting backup system table snapshot");
} else {
LOG.error("Snapshot " + snapshotName + " does not exists");
}
}
}
代码示例来源:origin: apache/hbase
public static void cleanupSnapshot(Admin admin, String snapshotName)
throws IOException {
// delete the taken snapshot
admin.deleteSnapshot(snapshotName);
assertNoSnapshots(admin);
}
代码示例来源:origin: apache/hbase
public static void deleteAllSnapshots(final Admin admin)
throws IOException {
// Delete all the snapshots
for (SnapshotDescription snapshot: admin.listSnapshots()) {
admin.deleteSnapshot(snapshot.getName());
}
SnapshotTestingUtils.assertNoSnapshots(admin);
}
代码示例来源:origin: apache/hbase
@Test(expected = QuotaExceededException.class)
public void testCloneSnapshotQuotaExceed() throws Exception {
String nsp = prefix + "_testTableQuotaExceedWithCloneSnapshot";
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1")
.build();
ADMIN.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
TableName tableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1");
TableName cloneTableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2");
HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
HTableDescriptor tableDescOne = new HTableDescriptor(tableName);
tableDescOne.addFamily(fam1);
ADMIN.createTable(tableDescOne);
String snapshot = "snapshot_testTableQuotaExceedWithCloneSnapshot";
ADMIN.snapshot(snapshot, tableName);
ADMIN.cloneSnapshot(snapshot, cloneTableName);
ADMIN.deleteSnapshot(snapshot);
}
代码示例来源:origin: apache/hbase
/**
* Verify that deleting the snapshot does not affect either table.
*/
private void runTestSnapshotDeleteIndependent() throws Exception {
// Ensure the original table does not reference the HFiles anymore
admin.majorCompact(originalTableName);
// Deleting the snapshot used to break the cloned table by deleting in-use HFiles
admin.deleteSnapshot(snapshotName);
// Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted
Pattern pattern = Pattern.compile(snapshotNameAsString);
do {
Thread.sleep(5000);
} while (!admin.listSnapshots(pattern).isEmpty());
try (Table original = UTIL.getConnection().getTable(originalTableName)) {
try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
// Verify that all regions of both tables are readable
final int origTableRowCount = countRows(original);
final int clonedTableRowCount = countRows(clonedTable);
Assert.assertEquals(origTableRowCount, clonedTableRowCount);
}
}
}
代码示例来源:origin: apache/hbase
UTIL.getAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
代码示例来源:origin: apache/hbase
@Test
@Override
public void testCloneLinksAfterDelete() throws IOException, InterruptedException {
// delay the flush to make sure
delayFlush = true;
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 20, FAMILY);
long tid = System.currentTimeMillis();
byte[] snapshotName3 = Bytes.toBytes("snaptb3-" + tid);
TableName clonedTableName3 =
TableName.valueOf(name.getMethodName() + System.currentTimeMillis());
admin.snapshot(snapshotName3, tableName);
delayFlush = false;
int snapshot3Rows = -1;
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
snapshot3Rows = TEST_UTIL.countRows(table);
}
admin.cloneSnapshot(snapshotName3, clonedTableName3);
admin.deleteSnapshot(snapshotName3);
super.testCloneLinksAfterDelete();
verifyRowCount(TEST_UTIL, clonedTableName3, snapshot3Rows);
admin.disableTable(clonedTableName3);
admin.deleteTable(clonedTableName3);
}
}
代码示例来源:origin: apache/hbase
@Test
public void testRestoreSnapshot() throws Exception {
String nsp = prefix + "_testRestoreSnapshot";
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(nsp)
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10").build();
ADMIN.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
TableName tableName1 = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1");
HTableDescriptor tableDescOne = new HTableDescriptor(tableName1);
HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
tableDescOne.addFamily(fam1);
ADMIN.createTable(tableDescOne, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
NamespaceTableAndRegionInfo nstate = getNamespaceState(nsp);
assertEquals("Intial region count should be 4.", 4, nstate.getRegionCount());
String snapshot = "snapshot_testRestoreSnapshot";
ADMIN.snapshot(snapshot, tableName1);
List<HRegionInfo> regions = ADMIN.getTableRegions(tableName1);
Collections.sort(regions);
ADMIN.split(tableName1, Bytes.toBytes("JJJ"));
Thread.sleep(2000);
assertEquals("Total regions count should be 5.", 5, nstate.getRegionCount());
ADMIN.disableTable(tableName1);
ADMIN.restoreSnapshot(snapshot);
assertEquals("Total regions count should be 4 after restore.", 4, nstate.getRegionCount());
ADMIN.enableTable(tableName1);
ADMIN.deleteSnapshot(snapshot);
}
代码示例来源:origin: apache/hbase
@Test
public void testWithMockedMapReduceWithNoStartRowStopRow() throws Exception {
setupCluster();
String snapshotName = "testWithMockedMapReduceMultiRegion";
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10);
Configuration conf = UTIL.getConfiguration();
conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, false);
Job job = new Job(conf);
Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
// test scan without startRow and stopRow
Scan scan2 = new Scan();
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan2,
TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false,
tmpTableDir, new RegionSplitter.UniformSplit(), 5);
verifyWithMockedMapReduce(job, 10, 50, HConstants.EMPTY_START_ROW,
HConstants.EMPTY_START_ROW);
} finally {
UTIL.getAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
}
}
代码示例来源:origin: apache/hbase
@Test
public void testWithMockedMapReduceWithSplitsPerRegion() throws Exception {
setupCluster();
String snapshotName = "testWithMockedMapReduceMultiRegion";
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10);
Configuration conf = UTIL.getConfiguration();
conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, false);
Job job = new Job(conf);
Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
// test scan with startRow and stopRow
Scan scan = new Scan(bbc, yya);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan,
TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false,
tmpTableDir, new RegionSplitter.UniformSplit(), 5);
verifyWithMockedMapReduce(job, 10, 40, bbc, yya);
} finally {
UTIL.getAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
}
}
代码示例来源:origin: apache/hbase
@Test
public void testInitTableSnapshotMapperJobConfig() throws Exception {
setupCluster();
final TableName tableName = TableName.valueOf(name.getMethodName());
String snapshotName = "foo";
try {
createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1);
Job job = new Job(UTIL.getConfiguration());
Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, false, tmpTableDir);
// TODO: would be better to examine directly the cache instance that results from this
// config. Currently this is not possible because BlockCache initialization is static.
Assert.assertEquals(
"Snapshot job should be configured for default LruBlockCache.",
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT,
job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01);
Assert.assertEquals(
"Snapshot job should not use BucketCache.",
0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01);
} finally {
UTIL.getAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
}
}
代码示例来源:origin: apache/hbase
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
boolean shutdownCluster) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testScanner");
try {
createTableAndSnapshot(util, tableName, snapshotName, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
Scan scan = new Scan(bbb, yyy); // limit the scan
TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
snapshotName, scan);
verifyScanner(scanner, bbb, yyy);
scanner.close();
} finally {
if (!shutdownCluster) {
util.getAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
}
代码示例来源:origin: apache/hbase
assertEquals("Total regions count should be.", 8, nstate.getRegionCount());
ADMIN.deleteSnapshot(snapshot);
代码示例来源:origin: apache/hbase
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
代码示例来源:origin: apache/hbase
util.getAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
代码示例来源:origin: apache/hbase
@Test
public void testInitTableSnapshotMapperJobConfig() throws Exception {
setupCluster();
final TableName tableName = TableName.valueOf(name.getMethodName());
String snapshotName = "foo";
try {
createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1);
JobConf job = new JobConf(UTIL.getConfiguration());
Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, false, tmpTableDir);
// TODO: would be better to examine directly the cache instance that results from this
// config. Currently this is not possible because BlockCache initialization is static.
Assert.assertEquals(
"Snapshot job should be configured for default LruBlockCache.",
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT,
job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01);
Assert.assertEquals(
"Snapshot job should not use BucketCache.",
0, job.getFloat("hbase.bucketcache.size", -1), 0.01);
} finally {
UTIL.getAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
}
}
代码示例来源:origin: apache/hbase
util.getAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
代码示例来源:origin: apache/hbase
emptyCfs, rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
内容来源于网络,如有侵权,请联系作者删除!