我正尝试使用以下代码从spark的远程hive2服务器访问表:
import org.apache.spark.SparkContext, org.apache.spark.SparkConf, org.apache.spark.sql._
import com.typesafe.config._
import java.io._
import org.apache.hadoop.fs._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
object stack {
def main(args: Array[String]) {
val warehouseLocation = "/usr/hive/warehouse"
System.setProperty("javax.jdo.option.ConnectionURL","jdbc:mysql://sparkserver:3306/metastore?createDatabaseIfNotExist=true")
System.setProperty("javax.jdo.option.ConnectionUserName","hiveroot")
System.setProperty("javax.jdo.option.ConnectionPassword","hivepassword")
System.setProperty("hive.exec.scratchdir","/tmp/hive/${user.name}")
System.setProperty("spark.sql.warehouse.dir", warehouseLocation)
// System.setProperty("hive.metastore.uris", "thrift://sparkserver:9083")
System.setProperty("javax.jdo.option.ConnectionDriverName","com.mysql.jdbc.Driver")
System.setProperty("hive.metastore.warehouse.dir","/user/hive/warehouse")
val spark = SparkSession.builder().master("local")
.appName("spark remote")
// .config("javax.jdo.option.ConnectionURL","jdbc:mysql://sparkserver:3306/metastore?createDatabaseIfNotExist=true")
.config("javax.jdo.option.ConnectionURL","jdbc:mysql://sparkserver:3306/metastore?createDatabaseIfNotExist=true")
.config("javax.jdo.option.ConnectionUserName","hiveroot")
.config("javax.jdo.option.ConnectionPassword","hivepassword")
.config("hive.exec.scratchdir","/tmp/hive/${user.name}")
.config("spark.sql.warehouse.dir", warehouseLocation)
// .config("hive.metastore.uris", "thrift://sparkserver:9083")
.config("javax.jdo.option.ConnectionDriverName","com.mysql.jdbc.Driver")
.config("hive.querylog.location","/tmp/hivequerylogs/${user.name}")
.config("hive.support.concurrency","false")
.config("hive.server2.enable.doAs","true")
.config("hive.server2.authentication","PAM")
.config("hive.server2.custom.authentication.class","org.apache.hive.service.auth.PamAuthenticationProvider")
.config("hive.server2.authentication.pam.services","sshd,sudo")
.config("hive.stats.dbclass","jdbc:mysql")
.config("hive.stats.jdbcdriver","com.mysql.jdbc.Driver")
.config("hive.session.history.enabled","true")
.config("hive.metastore.schema.verification","false")
.config("hive.optimize.sort.dynamic.partition","false")
.config("hive.optimize.insert.dest.volume","false")
.config("datanucleus.fixedDatastore","true")
.config("hive.metastore.warehouse.dir","/user/hive/warehouse")
.config("datanucleus.autoCreateSchema","false")
.config("datanucleus.schema.autoCreateAll","true")
.config("datanucleus.schema.validateConstraints","true")
.config("datanucleus.schema.validateColumns","true")
.config("datanucleus.schema.validateTables","true")
.config("fs.default.name","hdfs://sparkserver:54310")
.config("dfs.namenode.name.dir","/usr/local/hadoop_tmp/hdfs/namenode")
.config("dfs.datanode.name.dir","/usr/local/hadoop_tmp/hdfs/datanode")
.enableHiveSupport()
.getOrCreate()
import spark.implicits._
import spark.sql
sql("select * from sample.source").collect.foreach(println)
sql("select * from sample.destination").collect.foreach(println)
}
}
到元存储的连接请求被远程配置单元服务器拒绝。
error:failed to 启动hive-metastore.service:找不到单元hive-metastore.service
谢谢您!
2条答案
按热度按时间9rbhqvlz1#
使用时:
.config("hive.metastore.uris", "hive2://hiveserver:9083")
,hiveserver
应该是正确的远程配置单元服务器的ip。形态
hive.metastore.uris
指向配置单元元存储服务;如果您在本地(localhost)运行并且想要远程元存储;您需要单独启动配置单元元存储服务。或者-默认情况下,配置单元使用本地配置单元元存储;所以在这种情况下,不需要为
hive.metastore.uris
而且-忘了提一下,你设置的属性-总是使用thrift
协议-是hiveserver1还是hiveserver2。所以,一定要用这个:
n3ipq98p2#
通常我们不需要单独指向远程元存储。
hive-site.xml将在内部通过jdbc指向元存储。
在初始化配置单元上下文之前,可以在程序中设置相同的配置,如下所示:
试试看。