org.apache.hadoop.hbase.backup.example.ZKTableArchiveClient类的使用及代码示例

x33g5p2x  于2022-02-05 转载在 其他  
字(11.6k)|赞(0)|评价(0)|浏览(85)

本文整理了Java中org.apache.hadoop.hbase.backup.example.ZKTableArchiveClient类的一些代码示例,展示了ZKTableArchiveClient类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZKTableArchiveClient类的具体详情如下:
包路径:org.apache.hadoop.hbase.backup.example.ZKTableArchiveClient
类名称:ZKTableArchiveClient

ZKTableArchiveClient介绍

[英]Example class for how to use the table archiving coordinated via zookeeper
[中]有关如何通过zookeeper使用表格存档的示例类

代码示例

代码示例来源:origin: apache/hbase

/**
 * Test turning on/off archiving
 */
@Test
public void testArchivingEnableDisable() throws Exception {
 // 1. turn on hfile backups
 LOG.debug("----Starting archiving");
 archivingClient.enableHFileBackupAsync(TABLE_NAME);
 assertTrue("Archving didn't get turned on", archivingClient
   .getArchivingEnabled(TABLE_NAME));
 // 2. Turn off archiving and make sure its off
 archivingClient.disableHFileBackup();
 assertFalse("Archving didn't get turned off.", archivingClient.getArchivingEnabled(TABLE_NAME));
 // 3. Check enable/disable on a single table
 archivingClient.enableHFileBackupAsync(TABLE_NAME);
 assertTrue("Archving didn't get turned on", archivingClient
   .getArchivingEnabled(TABLE_NAME));
 // 4. Turn off archiving and make sure its off
 archivingClient.disableHFileBackup(TABLE_NAME);
 assertFalse("Archving didn't get turned off for " + STRING_TABLE_NAME,
  archivingClient.getArchivingEnabled(TABLE_NAME));
}

代码示例来源:origin: apache/hbase

public HFileArchiveManager(Connection connection, Configuration conf)
  throws ZooKeeperConnectionException, IOException {
 this.zooKeeper = new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(),
   connection);
 this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(),
  this.zooKeeper);
}

代码示例来源:origin: apache/hbase

/**
 * Disable hfile backups for all tables.
 * <p>
 * Previously backed up files are still retained (if present).
 * <p>
 * Asynchronous operation - some extra HFiles may be retained, in the archive directory after
 * disable is called, dependent on the latency in zookeeper to the servers.
 * @throws IOException if an unexpected exception occurs
 * @throws KeeperException if zookeeper can't be reached
 */
public void disableHFileBackup() throws IOException, KeeperException {
 createHFileArchiveManager().disableHFileBackup().stop();
}

代码示例来源:origin: apache/hbase

/**
 * Disable hfile backups for the given table.
 * <p>
 * Previously backed up files are still retained (if present).
 * <p>
 * Asynchronous operation - some extra HFiles may be retained, in the archive directory after
 * disable is called, dependent on the latency in zookeeper to the servers.
 * @param table name of the table stop backing up
 * @throws IOException if an unexpected exception occurs
 * @throws KeeperException if zookeeper can't be reached
 */
public void disableHFileBackup(String table) throws IOException, KeeperException {
 disableHFileBackup(Bytes.toBytes(table));
}

代码示例来源:origin: apache/hbase

/**
 * Setup the config for the cluster
 */
@BeforeClass
public static void setupCluster() throws Exception {
 setupConf(UTIL.getConfiguration());
 UTIL.startMiniZKCluster();
 CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration());
 archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION);
 // make hfile archiving node so we can archive files
 ZKWatcher watcher = UTIL.getZooKeeperWatcher();
 String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher);
 ZKUtil.createWithParents(watcher, archivingZNode);
 rss = mock(RegionServerServices.class);
}

代码示例来源:origin: apache/hbase

/**
 * Start archiving table for given hfile cleaner
 * @param tableName table to archive
 * @param cleaner cleaner to check to make sure change propagated
 * @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving
 * @throws IOException on failure
 * @throws KeeperException on failure
 */
private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner)
  throws IOException, KeeperException {
 // turn on hfile retention
 LOG.debug("----Starting archiving for table:" + tableName);
 archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName));
 assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName));
 // wait for the archiver to get the notification
 List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting();
 LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
 while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) {
  // spin until propagation - should be fast
 }
 return cleaners;
}

代码示例来源:origin: apache/hbase

/**
 * Determine if archiving is enabled (but not necessarily fully propagated) for a table
 * @param table name of the table to check
 * @return <tt>true</tt> if it is, <tt>false</tt> otherwise
 * @throws IOException if an unexpected network issue occurs
 * @throws KeeperException if zookeeper can't be reached
 */
public boolean getArchivingEnabled(String table) throws IOException, KeeperException {
 return getArchivingEnabled(Bytes.toBytes(table));
}

代码示例来源:origin: apache/hbase

/**
 * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived
 *         rather than deleted.
 * @throws KeeperException if we can't reach zookeeper
 * @throws IOException if an unexpected network issue occurs
 */
private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException,
  IOException {
 return new HFileArchiveManager(this.connection, this.getConf());
}

代码示例来源:origin: apache/hbase

@After
public void tearDown() throws Exception {
 try {
  FileSystem fs = UTIL.getTestFileSystem();
  // cleanup each of the files/directories registered
  for (Path file : toCleanup) {
  // remove the table and archive directories
   FSUtils.delete(fs, file, true);
  }
 } catch (IOException e) {
  LOG.warn("Failure to delete archive directory", e);
 } finally {
  toCleanup.clear();
 }
 // make sure that backups are off for all tables
 archivingClient.disableHFileBackup();
}

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Setup the config for the cluster
 */
@BeforeClass
public static void setupCluster() throws Exception {
 setupConf(UTIL.getConfiguration());
 UTIL.startMiniZKCluster();
 CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration());
 archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION);
 // make hfile archiving node so we can archive files
 ZKWatcher watcher = UTIL.getZooKeeperWatcher();
 String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher);
 ZKUtil.createWithParents(watcher, archivingZNode);
 rss = mock(RegionServerServices.class);
}

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Start archiving table for given hfile cleaner
 * @param tableName table to archive
 * @param cleaner cleaner to check to make sure change propagated
 * @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving
 * @throws IOException on failure
 * @throws KeeperException on failure
 */
private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner)
  throws IOException, KeeperException {
 // turn on hfile retention
 LOG.debug("----Starting archiving for table:" + tableName);
 archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName));
 assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName));
 // wait for the archiver to get the notification
 List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting();
 LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
 while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) {
  // spin until propagation - should be fast
 }
 return cleaners;
}

代码示例来源:origin: harbby/presto-connectors

/**
 * Determine if archiving is enabled (but not necessarily fully propagated) for a table
 * @param table name of the table to check
 * @return <tt>true</tt> if it is, <tt>false</tt> otherwise
 * @throws IOException if an unexpected network issue occurs
 * @throws KeeperException if zookeeper can't be reached
 */
public boolean getArchivingEnabled(String table) throws IOException, KeeperException {
 return getArchivingEnabled(Bytes.toBytes(table));
}

代码示例来源:origin: harbby/presto-connectors

/**
 * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived
 *         rather than deleted.
 * @throws KeeperException if we can't reach zookeeper
 * @throws IOException if an unexpected network issue occurs
 */
private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException,
  IOException {
 return new HFileArchiveManager(this.connection, this.getConf());
}

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Test turning on/off archiving
 */
@Test
public void testArchivingEnableDisable() throws Exception {
 // 1. turn on hfile backups
 LOG.debug("----Starting archiving");
 archivingClient.enableHFileBackupAsync(TABLE_NAME);
 assertTrue("Archving didn't get turned on", archivingClient
   .getArchivingEnabled(TABLE_NAME));
 // 2. Turn off archiving and make sure its off
 archivingClient.disableHFileBackup();
 assertFalse("Archving didn't get turned off.", archivingClient.getArchivingEnabled(TABLE_NAME));
 // 3. Check enable/disable on a single table
 archivingClient.enableHFileBackupAsync(TABLE_NAME);
 assertTrue("Archving didn't get turned on", archivingClient
   .getArchivingEnabled(TABLE_NAME));
 // 4. Turn off archiving and make sure its off
 archivingClient.disableHFileBackup(TABLE_NAME);
 assertFalse("Archving didn't get turned off for " + STRING_TABLE_NAME,
  archivingClient.getArchivingEnabled(TABLE_NAME));
}

代码示例来源:origin: apache/hbase

private TableHFileArchiveTracker(ZKWatcher watcher, HFileArchiveTableMonitor monitor) {
 super(watcher);
 watcher.registerListener(this);
 this.monitor = monitor;
 this.archiveHFileZNode = ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(),
  watcher);
}

代码示例来源:origin: harbby/presto-connectors

/**
 * Disable hfile backups for the given table.
 * <p>
 * Previously backed up files are still retained (if present).
 * <p>
 * Asynchronous operation - some extra HFiles may be retained, in the archive directory after
 * disable is called, dependent on the latency in zookeeper to the servers.
 * @param table name of the table stop backing up
 * @throws IOException if an unexpected exception occurs
 * @throws KeeperException if zookeeper can't be reached
 */
public void disableHFileBackup(String table) throws IOException, KeeperException {
 disableHFileBackup(Bytes.toBytes(table));
}

代码示例来源:origin: apache/hbase

/**
 * Disable hfile backups for the given table.
 * <p>
 * Previously backed up files are still retained (if present).
 * <p>
 * Asynchronous operation - some extra HFiles may be retained, in the archive directory after
 * disable is called, dependent on the latency in zookeeper to the servers.
 * @param table name of the table stop backing up
 * @throws IOException if an unexpected exception occurs
 * @throws KeeperException if zookeeper can't be reached
 */
public void disableHFileBackup(final byte[] table) throws IOException, KeeperException {
 createHFileArchiveManager().disableHFileBackup(table).stop();
}

代码示例来源:origin: harbby/presto-connectors

public HFileArchiveManager(HConnection connection, Configuration conf)
  throws ZooKeeperConnectionException, IOException {
 this.zooKeeper = new ZooKeeperWatcher(conf, "hfileArchiveManager-on-" + connection.toString(),
   connection);
 this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(),
  this.zooKeeper);
}

代码示例来源:origin: org.apache.hbase/hbase-server

@After
public void tearDown() throws Exception {
 try {
  FileSystem fs = UTIL.getTestFileSystem();
  // cleanup each of the files/directories registered
  for (Path file : toCleanup) {
  // remove the table and archive directories
   FSUtils.delete(fs, file, true);
  }
 } catch (IOException e) {
  LOG.warn("Failure to delete archive directory", e);
 } finally {
  toCleanup.clear();
 }
 // make sure that backups are off for all tables
 archivingClient.disableHFileBackup();
}

代码示例来源:origin: apache/hbase

/**
 * Determine if archiving is enabled (but not necessarily fully propagated) for a table
 * @param table name of the table to check
 * @return <tt>true</tt> if it is, <tt>false</tt> otherwise
 * @throws IOException if a connection to ZooKeeper cannot be established
 * @throws KeeperException
 */
public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException {
 HFileArchiveManager manager = createHFileArchiveManager();
 try {
  return manager.isArchivingEnabled(table);
 } finally {
  manager.stop();
 }
}

相关文章