本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.set()
方法的一些代码示例,展示了Hive.set()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.set()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:set
[英]Marks if the given Hive object is allowed to close metastore connections.
[中]标记是否允许给定配置单元对象关闭元存储连接。
代码示例来源:origin: apache/hive
HiveUpdater(HiveConf conf, boolean fileRename) throws HiveException {
hive = Hive.get(conf);
Hive.set(hive);
doFileRename = fileRename;
}
代码示例来源:origin: apache/hive
/**
* Get a mocked Hive object that does not create a real meta store client object
* This gets rid of the datanucleus initializtion which makes it easier
* to run test from IDEs
* @param hiveConf
* @throws MetaException
*
*/
private void setupDataNucleusFreeHive(HiveConf hiveConf) throws MetaException {
Hive db = Mockito.mock(Hive.class);
Mockito.when(db.getMSC()).thenReturn(null);
Mockito.when(db.getConf()).thenReturn(hiveConf);
Hive.set(db);
}
代码示例来源:origin: apache/hive
@Override
public void run() {
try {
Hive.set(session.getSessionHive());
OperationHandle handle = session.getTables("catalog", "schema", "table", null);
session.closeOperation(handle);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
ready.await();
} catch (InterruptedException | BrokenBarrierException e) {
// ignore
}
}
}
}).start();
代码示例来源:origin: apache/hive
@Override
public void run() {
try {
Hive.set(session.getSessionHive());
OperationHandle handle = session.getTables("catalog", "schema", "table", null);
session.closeOperation(handle);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
ready.await();
} catch (InterruptedException | BrokenBarrierException e) {
// ignore
}
}
}
}).start();
代码示例来源:origin: apache/hive
@Override
public Object run() throws HiveSQLException {
assert (!parentHive.allowClose());
Hive.set(parentHive);
// TODO: can this result in cross-thread reuse of session state?
SessionState.setCurrentSessionState(parentSessionState);
PerfLogger.setPerfLogger(SessionState.getPerfLogger());
LogUtils.registerLoggingContext(queryState.getConf());
ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId());
try {
if (asyncPrepare) {
prepare(queryState);
}
runQuery();
} catch (HiveSQLException e) {
// TODO: why do we invent our own error path op top of the one from Future.get?
setOperationException(e);
LOG.error("Error running hive query: ", e);
} finally {
LogUtils.unregisterLoggingContext();
// If new hive object is created by the child thread, then we need to close it as it might
// have created a hms connection. Call Hive.closeCurrent() that closes the HMS connection, causes
// HMS connection leaks otherwise.
Hive.closeCurrent();
}
return null;
}
};
代码示例来源:origin: apache/hive
@Before
public void setup() throws Exception {
HiveConf conf = new HiveConf();
conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS, 2);
conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE, 10);
conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME, "1000000s");
conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT, "500ms");
conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL, "3s");
conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true);
conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name());
conf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false);
MetricsFactory.init(conf);
sm = new SessionManager(null, true);
sm.init(conf);
metrics = (CodahaleMetrics) MetricsFactory.getInstance();
Hive doNothingHive = mock(Hive.class);
Hive.set(doNothingHive);
}
代码示例来源:origin: org.apache.spark/spark-hive-thriftserver_2.11
@Override
protected synchronized void acquire(boolean userAccess) {
super.acquire(userAccess);
// if we have a metastore connection with impersonation, then set it first
if (sessionHive != null) {
Hive.set(sessionHive);
}
}
代码示例来源:origin: org.apache.spark/spark-hive-thriftserver
@Override
protected synchronized void acquire(boolean userAccess) {
super.acquire(userAccess);
// if we have a metastore connection with impersonation, then set it first
if (sessionHive != null) {
Hive.set(sessionHive);
}
}
代码示例来源:origin: org.spark-project.hive/hive-service
@Override
protected synchronized void acquire(boolean userAccess) {
super.acquire(userAccess);
// if we have a metastore connection with impersonation, then set it first
if (sessionHive != null) {
Hive.set(sessionHive);
}
}
代码示例来源:origin: com.github.hyukjinkwon/hive-service
@Override
protected synchronized void acquire(boolean userAccess) {
super.acquire(userAccess);
// if we have a metastore connection with impersonation, then set it first
if (sessionHive != null) {
Hive.set(sessionHive);
}
}
代码示例来源:origin: org.apache.hive/hive-service
private synchronized void acquireAfterOpLock(boolean userAccess) {
// Need to make sure that the this HiveServer2's session's SessionState is
// stored in the thread local for the handler thread.
SessionState.setCurrentSessionState(sessionState);
sessionState.setForwardedAddresses(SessionManager.getForwardedAddresses());
sessionState.setIsUsingThriftJDBCBinarySerDe(updateIsUsingThriftJDBCBinarySerDe());
if (userAccess) {
lastAccessTime = System.currentTimeMillis();
lockedByUser = true;
}
// set the thread name with the logging prefix.
sessionState.updateThreadName();
Hive.set(sessionHive);
}
代码示例来源:origin: org.apache.spark/spark-hive-thriftserver
public HiveSessionImplwithUGI(TProtocolVersion protocol, String username, String password,
HiveConf hiveConf, String ipAddress, String delegationToken) throws HiveSQLException {
super(protocol, username, password, hiveConf, ipAddress);
setSessionUGI(username);
setDelegationToken(delegationToken);
// create a new metastore connection for this particular user session
Hive.set(null);
try {
sessionHive = Hive.get(getHiveConf());
} catch (HiveException e) {
throw new HiveSQLException("Failed to setup metastore connection", e);
}
}
代码示例来源:origin: org.apache.spark/spark-hive-thriftserver_2.11
public HiveSessionImplwithUGI(TProtocolVersion protocol, String username, String password,
HiveConf hiveConf, String ipAddress, String delegationToken) throws HiveSQLException {
super(protocol, username, password, hiveConf, ipAddress);
setSessionUGI(username);
setDelegationToken(delegationToken);
// create a new metastore connection for this particular user session
Hive.set(null);
try {
sessionHive = Hive.get(getHiveConf());
} catch (HiveException e) {
throw new HiveSQLException("Failed to setup metastore connection", e);
}
}
代码示例来源:origin: com.github.hyukjinkwon/hive-service
public HiveSessionImplwithUGI(TProtocolVersion protocol, String username, String password,
HiveConf hiveConf, String ipAddress, String delegationToken) throws HiveSQLException {
super(protocol, username, password, hiveConf, ipAddress);
setSessionUGI(username);
setDelegationToken(delegationToken);
// create a new metastore connection for this particular user session
Hive.set(null);
try {
sessionHive = Hive.get(getHiveConf());
} catch (HiveException e) {
throw new HiveSQLException("Failed to setup metastore connection", e);
}
}
代码示例来源:origin: org.spark-project.hive/hive-service
public HiveSessionImplwithUGI(TProtocolVersion protocol, String username, String password,
HiveConf hiveConf, String ipAddress, String delegationToken) throws HiveSQLException {
super(protocol, username, password, hiveConf, ipAddress);
setSessionUGI(username);
setDelegationToken(delegationToken);
// create a new metastore connection for this particular user session
Hive.set(null);
try {
sessionHive = Hive.get(getHiveConf());
} catch (HiveException e) {
throw new HiveSQLException("Failed to setup metastore connection", e);
}
}
代码示例来源:origin: org.apache.hive/hive-service
@Override
public Object run() throws HiveSQLException {
Hive.set(parentHive);
// TODO: can this result in cross-thread reuse of session state?
SessionState.setCurrentSessionState(parentSessionState);
PerfLogger.setPerfLogger(parentPerfLogger);
LogUtils.registerLoggingContext(queryState.getConf());
try {
if (asyncPrepare) {
prepare(queryState);
}
runQuery();
} catch (HiveSQLException e) {
// TODO: why do we invent our own error path op top of the one from Future.get?
setOperationException(e);
LOG.error("Error running hive query: ", e);
} finally {
LogUtils.unregisterLoggingContext();
}
return null;
}
};
内容来源于网络,如有侵权,请联系作者删除!