org.apache.hadoop.hbase.client.Table.close()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(10.7k)|赞(0)|评价(0)|浏览(208)

本文整理了Java中org.apache.hadoop.hbase.client.Table.close()方法的一些代码示例,展示了Table.close()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.close()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Table
类名称:Table
方法名:close

Table.close介绍

[英]Releases any resources held or pending changes in internal buffers.
[中]释放内部缓冲区中保留或挂起的任何资源。

代码示例

代码示例来源:origin: apache/hbase

  1. /**
  2. * Remove specified namespace from the acl table.
  3. */
  4. static void removeNamespacePermissions(Configuration conf, String namespace, Table t)
  5. throws IOException{
  6. Delete d = new Delete(Bytes.toBytes(toNamespaceEntry(namespace)));
  7. if (LOG.isDebugEnabled()) {
  8. LOG.debug("Removing permissions of removed namespace "+ namespace);
  9. }
  10. try {
  11. t.delete(d);
  12. } finally {
  13. t.close();
  14. }
  15. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Test read only tables
  3. * @throws Exception
  4. */
  5. @Test
  6. public void testReadOnlyTable() throws Exception {
  7. final TableName name = TableName.valueOf(this.name.getMethodName());
  8. Table table = TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
  9. byte[] value = Bytes.toBytes("somedata");
  10. // This used to use an empty row... That must have been a bug
  11. Put put = new Put(value);
  12. put.addColumn(HConstants.CATALOG_FAMILY, HConstants.CATALOG_FAMILY, value);
  13. table.put(put);
  14. table.close();
  15. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Return the number of rows in the given table.
  3. */
  4. public int countRows(final TableName tableName) throws IOException {
  5. Table table = getConnection().getTable(tableName);
  6. try {
  7. return countRows(table);
  8. } finally {
  9. table.close();
  10. }
  11. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * For HADOOP-2579
  3. * @throws IOException
  4. */
  5. @Test (expected=TableExistsException.class)
  6. public void testTableExistsExceptionWithATable() throws IOException {
  7. final TableName name = TableName.valueOf(this.name.getMethodName());
  8. TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY).close();
  9. TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
  10. }

代码示例来源:origin: apache/hbase

  1. @Test
  2. public void testGetTableDescriptor() throws IOException {
  3. Table table = null;
  4. try {
  5. table = TEST_UTIL.getConnection().getTable(TABLE);
  6. HTableDescriptor local = table.getTableDescriptor();
  7. assertEquals(remoteTable.getTableDescriptor(), local);
  8. } finally {
  9. if (null != table) table.close();
  10. }
  11. }

代码示例来源:origin: apache/hbase

  1. @Test
  2. public void testQualifierAccess() throws Exception {
  3. final Table table = createTable(TEST_UTIL, TABLE, new byte[][] { FAMILY });
  4. try {
  5. doQualifierAccess(table);
  6. } finally {
  7. table.close();
  8. }
  9. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * @throws java.lang.Exception
  3. */
  4. @BeforeClass
  5. public static void setUpBeforeClass() throws Exception {
  6. TEST_UTIL.startMiniCluster();
  7. Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM));
  8. writeRows(table, TOTAL_ROWS, ROWS_WITH_ONE_COL);
  9. table.close();
  10. }

代码示例来源:origin: apache/hbase

  1. private void createTableWithNonDefaultProperties() throws Exception {
  2. final long startTime = System.currentTimeMillis();
  3. final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  4. originalTableName = TableName.valueOf(sourceTableNameAsString);
  5. // enable replication on a column family
  6. HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  7. HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  8. HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  9. HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);
  10. maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  11. bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  12. dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  13. blockSizeColumn.setBlocksize(BLOCK_SIZE);
  14. HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  15. htd.addFamily(maxVersionsColumn);
  16. htd.addFamily(bloomFilterColumn);
  17. htd.addFamily(dataBlockColumn);
  18. htd.addFamily(blockSizeColumn);
  19. htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  20. htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  21. assertTrue(htd.getConfiguration().size() > 0);
  22. admin.createTable(htd);
  23. Table original = UTIL.getConnection().getTable(originalTableName);
  24. originalTableName = TableName.valueOf(sourceTableNameAsString);
  25. originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  26. originalTableDescription = originalTableDescriptor.toStringCustomizedValues();
  27. original.close();
  28. }

代码示例来源:origin: apache/hbase

  1. @Test
  2. public void testNegativeMemstoreSize() throws IOException, InterruptedException {
  3. boolean IOEthrown = false;
  4. Table table = null;
  5. try {
  6. table = TEST_UTIL.getConnection().getTable(TableName.valueOf(tableName));
  7. // Adding data
  8. Put put1 = new Put(Bytes.toBytes("row1"));
  9. put1.addColumn(family, qualifier, Bytes.toBytes("Value1"));
  10. table.put(put1);
  11. Put put2 = new Put(Bytes.toBytes("row2"));
  12. put2.addColumn(family, qualifier, Bytes.toBytes("Value2"));
  13. table.put(put2);
  14. table.put(put2);
  15. } catch (IOException e) {
  16. IOEthrown = true;
  17. } finally {
  18. Assert.assertFalse("Shouldn't have thrown an exception", IOEthrown);
  19. if (table != null) {
  20. table.close();
  21. }
  22. }
  23. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Not really restarting the master. Simulate it by clear of new region
  3. * state since it is not persisted, will be lost after master restarts.
  4. */
  5. @Test
  6. public void testMergeAndRestartingMaster() throws Exception {
  7. final TableName tableName = TableName.valueOf(name.getMethodName());
  8. // Create table and load data.
  9. Table table = createTableAndLoadData(MASTER, tableName);
  10. try {
  11. MyMasterRpcServices.enabled.set(true);
  12. // Merge 1st and 2nd region
  13. mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 1, INITIAL_REGION_NUM - 1);
  14. } finally {
  15. MyMasterRpcServices.enabled.set(false);
  16. }
  17. table.close();
  18. }

代码示例来源:origin: apache/hbase

  1. @Test
  2. public void testCoprocessorError() throws Exception {
  3. Configuration configuration = new Configuration(util.getConfiguration());
  4. // Make it not retry forever
  5. configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
  6. Table table = util.getConnection().getTable(TEST_TABLE);
  7. try {
  8. CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]);
  9. TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
  10. TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol);
  11. service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance());
  12. fail("Should have thrown an exception");
  13. } catch (ServiceException e) {
  14. } finally {
  15. table.close();
  16. }
  17. }

代码示例来源:origin: apache/hbase

  1. @Test
  2. public void testFaultyScanner() throws Exception {
  3. TableName tableName = TEST_TABLE.getTableName();
  4. Table table = UTIL.createTable(tableName, FAMILY_NAME);
  5. try {
  6. final int NUM_ROWS = 100;
  7. loadTable(table, NUM_ROWS);
  8. checkTableRows(table, NUM_ROWS);
  9. } finally {
  10. table.close();
  11. }
  12. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Returns all rows from the hbase:meta table.
  3. *
  4. * @throws IOException When reading the rows fails.
  5. */
  6. public List<byte[]> getMetaTableRows() throws IOException {
  7. // TODO: Redo using MetaTableAccessor class
  8. Table t = getConnection().getTable(TableName.META_TABLE_NAME);
  9. List<byte[]> rows = new ArrayList<>();
  10. ResultScanner s = t.getScanner(new Scan());
  11. for (Result result : s) {
  12. LOG.info("getMetaTableRows: row -> " +
  13. Bytes.toStringBinary(result.getRow()));
  14. rows.add(result.getRow());
  15. }
  16. s.close();
  17. t.close();
  18. return rows;
  19. }

代码示例来源:origin: apache/hbase

  1. public void testRegionReplicaReplication(int regionReplication) throws Exception {
  2. // test region replica replication. Create a table with single region, write some data
  3. // ensure that data is replicated to the secondary region
  4. TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_"
  5. + regionReplication);
  6. HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  7. htd.setRegionReplication(regionReplication);
  8. HTU.getAdmin().createTable(htd);
  9. TableName tableNameNoReplicas =
  10. TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
  11. HTU.deleteTableIfAny(tableNameNoReplicas);
  12. HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1);
  13. Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  14. Table table = connection.getTable(tableName);
  15. Table tableNoReplicas = connection.getTable(tableNameNoReplicas);
  16. try {
  17. // load some data to the non-replicated table
  18. HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtility.fam1, 6000, 7000);
  19. // load the data to the table
  20. HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
  21. verifyReplication(tableName, regionReplication, 0, 1000);
  22. } finally {
  23. table.close();
  24. tableNoReplicas.close();
  25. HTU.deleteTableIfAny(tableNameNoReplicas);
  26. connection.close();
  27. }
  28. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * do a single put that is bypassed by a RegionObserver
  3. * @throws Exception
  4. */
  5. @Test
  6. public void testSimple() throws Exception {
  7. Table t = util.getConnection().getTable(tableName);
  8. Put p = new Put(row1);
  9. p.addColumn(test, dummy, dummy);
  10. // before HBASE-4331, this would throw an exception
  11. t.put(p);
  12. checkRowAndDelete(t,row1,0);
  13. t.close();
  14. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Can't enable a table if the table isn't in disabled state
  3. * @throws IOException
  4. */
  5. @Test (expected=TableNotDisabledException.class)
  6. public void testTableNotDisabledExceptionWithATable() throws IOException {
  7. final TableName name = TableName.valueOf(this.name.getMethodName());
  8. Table t = TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY);
  9. try {
  10. this.admin.enableTable(name);
  11. }finally {
  12. t.close();
  13. }
  14. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Ensure when we go to top level index pages that we get redirected to an info-server specific status
  3. * page.
  4. */
  5. @Test
  6. public void testInfoServersRedirect() throws Exception {
  7. // give the cluster time to start up
  8. UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close();
  9. int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort();
  10. assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "master-status");
  11. port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer()
  12. .getInfoServer().getPort();
  13. assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "rs-status");
  14. }

代码示例来源:origin: apache/hbase

  1. public static void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
  2. long expectedRows) throws IOException {
  3. Table table = util.getConnection().getTable(tableName);
  4. try {
  5. assertEquals(expectedRows, util.countRows(table));
  6. } finally {
  7. table.close();
  8. }
  9. }

代码示例来源:origin: apache/hbase

  1. /**
  2. * Test for hadoop-1581 'HBASE: Unopenable tablename bug'.
  3. * @throws Exception
  4. */
  5. @Test
  6. public void testTableNameClash() throws Exception {
  7. final String name = this.name.getMethodName();
  8. HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf(name + "SOMEUPPERCASE"));
  9. HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(name));
  10. htd1.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
  11. htd2.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
  12. admin.createTable(htd1);
  13. admin.createTable(htd2);
  14. // Before fix, below would fail throwing a NoServerForRegionException.
  15. TEST_UTIL.getConnection().getTable(htd2.getTableName()).close();
  16. }

代码示例来源:origin: apache/hbase

  1. @Test(expected = DoNotRetryIOException.class)
  2. public void testScanOnCorruptHFile() throws IOException {
  3. TableName tableName = TableName.valueOf(name.getMethodName());
  4. HTableDescriptor htd = new HTableDescriptor(tableName);
  5. htd.addCoprocessor(CorruptHFileCoprocessor.class.getName());
  6. htd.addFamily(new HColumnDescriptor(FAMILY_NAME));
  7. Table table = TEST_UTIL.createTable(htd, null);
  8. try {
  9. loadTable(table, 1);
  10. scan(table);
  11. } finally {
  12. table.close();
  13. }
  14. }

相关文章