本文整理了Java中org.apache.hadoop.hbase.client.Connection.close()
方法的一些代码示例,展示了Connection.close()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Connection.close()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Connection
类名称:Connection
方法名:close
暂无
代码示例来源:origin: apache/hbase
@Override
public void close() throws IOException {
if (this.table != null) {
this.table.close();
this.table = null;
}
// Null out the connection on close() even if we didn't explicitly close it
// to maintain typical semantics.
if (isManagedConnection) {
if (this.connection != null) {
this.connection.close();
}
}
this.connection = null;
}
代码示例来源:origin: apache/hbase
/**
* Instantiate a TableRecordWriter with a BufferedMutator for batch writing.
*/
public TableRecordWriter(JobConf job) throws IOException {
// expecting exactly one path
TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE));
try {
this.conn = ConnectionFactory.createConnection(job);
this.m_mutator = conn.getBufferedMutator(tableName);
} finally {
if (this.m_mutator == null) {
conn.close();
conn = null;
}
}
}
代码示例来源:origin: apache/hbase
/**
* Puts the specified RegionInfo into META with replica related columns
*/
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
RegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
Table meta = conn.getTable(TableName.META_TABLE_NAME);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
if (numReplicas > 1) {
Random r = new Random();
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
for (int i = 1; i < numReplicas; i++) {
ServerName sn = serversArr[r.nextInt(serversArr.length)];
// the column added here is just to make sure the master is able to
// see the additional replicas when it is asked to assign. The
// final value of these columns will be different and will be updated
// by the actual regionservers that start hosting the respective replicas
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), i);
}
}
meta.put(put);
meta.close();
conn.close();
}
代码示例来源:origin: apache/hbase
+ " the task's full log for more details.");
final Connection connection = ConnectionFactory.createConnection(context.getConfiguration());
Table table = connection.getTable(tSplit.getTable());
connection.close();
throw ioe;
代码示例来源:origin: apache/hbase
UTIL.getMiniHBaseCluster().getMaster().getConnection().close();
RESUME.countDown();
UTIL.waitFor(30000, () -> procExec.isFinished(dummyProcId));
try (Table table = UTIL.getConnection().getTable(TABLE_NAME)) {
table.put(new Put(Bytes.toBytes(1)).addColumn(CF, Bytes.toBytes("cq"), Bytes.toBytes(1)));
代码示例来源:origin: apache/hbase
Connection connection = null;
try {
connection = ConnectionFactory.createConnection(conf);
try {
labelsTable = connection.getTable(LABELS_TABLE_NAME);
} catch (IOException e) {
LOG.error("Error opening 'labels' table", e);
if (labelsTable != null) {
try {
labelsTable.close();
} catch (IOException ioe) {
LOG.warn("Error closing 'labels' table", ioe);
connection.close();
} catch (IOException ioe) {
LOG.warn("Failed close of temporary connection", ioe);
代码示例来源:origin: apache/hbase
HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
Connection conn = ConnectionFactory.createConnection(conf1);
int NB_ROWS_FAM3 = 6;
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
LOG.debug("written " + ADD_ROWS + " rows to " + table1);
HTable t2 = (HTable) conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
conn.close();
代码示例来源:origin: apache/hive
private void addHBaseDelegationToken(Configuration conf) throws IOException, MetaException {
if (User.isHBaseSecurityEnabled(conf)) {
Connection connection = ConnectionFactory.createConnection(hbaseConf);
try {
User curUser = User.getCurrent();
Job job = new Job(conf);
TokenUtil.addTokenForJob(connection, curUser, job);
} catch (InterruptedException e) {
throw new IOException("Error while obtaining hbase delegation token", e);
} finally {
if (connection != null) {
connection.close();
}
}
}
}
代码示例来源:origin: apache/hbase
@Override
public void workDone() throws IOException {
try {
table.close();
} finally {
connection.close();
}
}
代码示例来源:origin: apache/hbase
@Test(expected=RetriesExhaustedException.class)
public void testSocketClosed() throws IOException, InterruptedException {
TableName tableName = TableName.valueOf(name.getMethodName());
UTIL.createTable(tableName, fam1).close();
Configuration conf = new Configuration(UTIL.getConfiguration());
conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
MyRpcClientImpl.class.getName());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(name.getMethodName()));
table.get(new Get(Bytes.toBytes("asd")));
connection.close();
for (Socket socket : MyRpcClientImpl.savedSockets) {
assertTrue("Socket + " + socket + " is not closed", socket.isClosed());
}
}
}
代码示例来源:origin: apache/hbase
1000);
TEST_UTIL.startMiniCluster(1);
Connection conn = ConnectionFactory.createConnection(conf);
try {
TEST_UTIL.getAdmin()
.getCompactionThroughputController();
assertEquals(10L * 1024 * 1024, throughputController.getMaxThroughput(), EPSILON);
Table table = conn.getTable(tableName);
for (int i = 0; i < 5; i++) {
byte[] value = new byte[0];
instanceof NoLimitThroughputController);
} finally {
conn.close();
TEST_UTIL.shutdownMiniCluster();
代码示例来源:origin: apache/hbase
private void generatePartitions(Path partitionsPath) throws IOException {
Connection connection = ConnectionFactory.createConnection(getConf());
Pair<byte[][], byte[][]> regionKeys
= connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys();
connection.close();
tableHash.selectPartitions(regionKeys);
LOG.info("Writing " + tableHash.partitions.size() + " partition keys to " + partitionsPath);
tableHash.writePartitionFile(getConf(), partitionsPath);
}
代码示例来源:origin: apache/hbase
@Override
public void workDone() throws IOException {
try {
table.close();
} finally {
connection.close();
}
}
代码示例来源:origin: apache/hbase
@Test
public void testRegionServerStoppedOnScannerOpen() throws IOException {
this.conf.set("hbase.client.connection.impl",
RegionServerStoppedOnScannerOpenConnection.class.getName());
// Go against meta else we will try to find first region for the table on construction which
// means we'll have to do a bunch more mocking. Tests that go against meta only should be
// good for a bit of testing.
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY);
try {
Result result = null;
while ((result = scanner.next()) != null) {
LOG.info(Objects.toString(result));
}
} finally {
scanner.close();
table.close();
connection.close();
}
}
代码示例来源:origin: apache/hbase
.getLiveServerMetrics().size();
long keysToWrite = serverCount * KEYS_TO_WRITE_PER_SERVER;
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TABLE_NAME);
Assert.assertTrue("Writer is not done", isWriterDone);
connection.close();
代码示例来源:origin: apache/hbase
public static void initCredentials(JobConf job) throws IOException {
UserProvider userProvider = UserProvider.instantiate(job);
if (userProvider.isHadoopSecurityEnabled()) {
// propagate delegation related props from launcher job to MR job
if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
}
}
if (userProvider.isHBaseSecurityEnabled()) {
Connection conn = ConnectionFactory.createConnection(job);
try {
// login the server principal (if using secure Hadoop)
User user = userProvider.getCurrent();
TokenUtil.addTokenForJob(conn, job, user);
} catch (InterruptedException ie) {
LOG.error("Interrupted obtaining user authentication token", ie);
Thread.currentThread().interrupt();
} finally {
conn.close();
}
}
}
代码示例来源:origin: apache/hbase
@Override
public void workDone() throws IOException {
try {
table.close();
} finally {
connection.close();
}
}
}
代码示例来源:origin: apache/hbase
@Test
public void testConnectionClosedOnRegionLocate() throws IOException {
Configuration testConf = new Configuration(this.conf);
testConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
// Go against meta else we will try to find first region for the table on construction which
// means we'll have to do a bunch more mocking. Tests that go against meta only should be
// good for a bit of testing.
Connection connection = ConnectionFactory.createConnection(testConf);
Table table = connection.getTable(TableName.META_TABLE_NAME);
connection.close();
try {
Get get = new Get(Bytes.toBytes("dummyRow"));
table.get(get);
fail("Should have thrown DoNotRetryException but no exception thrown");
} catch (Exception e) {
if (!(e instanceof DoNotRetryIOException)) {
String errMsg =
"Should have thrown DoNotRetryException but actually " + e.getClass().getSimpleName();
LOG.error(errMsg, e);
fail(errMsg);
}
} finally {
table.close();
}
}
代码示例来源:origin: apache/hbase
LOG.info("testPerTableCFReplication");
ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf1);
Connection connection1 = ConnectionFactory.createConnection(conf1);
Connection connection2 = ConnectionFactory.createConnection(conf2);
Connection connection3 = ConnectionFactory.createConnection(conf3);
try {
Admin admin1 = connection1.getAdmin();
admin3.createTable(tabC);
Table htab1A = connection1.getTable(tabAName);
Table htab2A = connection2.getTable(tabAName);
Table htab3A = connection3.getTable(tabAName);
Table htab1B = connection1.getTable(tabBName);
deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
} finally {
connection1.close();
connection2.close();
connection3.close();
代码示例来源:origin: apache/hbase
Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
Connection peerConn = ConnectionFactory.createConnection(peerConf);
try {
TokenUtil.addTokenForJob(peerConn, user, job);
} finally {
peerConn.close();
Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
try {
TokenUtil.addTokenForJob(conn, user, job);
} finally {
conn.close();
内容来源于网络,如有侵权,请联系作者删除!