druid异常

6qftjkof  于 2021-05-31  发布在  Hadoop
关注(0)|答案(0)|浏览(334)

我对dremio非常陌生,我正在尝试将dremio(独立版本)与现有的hive2.3.3(在hadoop2.7之上)设置集成。
奇怪的是,虽然我能够访问几个表,但我无法访问其他表,因为我面临以下错误:

master:8020 failed on connection exception: java.net.ConnectException

以下是我的设置:
dremio的一般配置:

dremio的高级配置:

我已经将以下hive-site.xml和core-site.xml文件复制到dremio root/conf文件夹中。
配置单元站点.xml

<xmp>

  <configuration>
    <property>
      <name>hive.execution.engine</name>
      <value>spark</value>
    </property>

    <property>
      <name>javax.jdo.option.ConnectionURL</name>
      <value>jdbc:mysql://data5:3306/hive_metastore?createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8&amp;useSSL=false</value>
    </property>

    <property>
      <name>javax.jdo.option.ConnectionDriverName</name>
      <value>com.mysql.jdbc.Driver</value>
    </property>

    <property>
      <name>javax.jdo.option.ConnectionUserName</name>
      <value>someuser</value>
      <description>user name for connecting to mysql server</description>
    </property>

    <property>
      <name>javax.jdo.option.ConnectionPassword</name>
      <value>testpassword</value>
      <description>password for connecting to mysql server</description>
    </property>

    <property>
      <name>spark.master</name>
      <value>yarn</value>
    </property>
    <property>
      <name>spark.submit.deployMode</name>
      <value>cluster</value>
    </property>
    <property>
      <name>spark.home</name>
      <value>/usr/local/spark-2.4.4-bin-without-hadoop</value>
    </property>
    <property>
      <name>spark.executor.memory</name>
      <value>2g</value>
    </property>
    <property>
      <name>spark.executor.memoryOverhead</name>
      <value>512M</value>
    </property>
    <property>
      <name>spark.yarn.executor.memoryOverhead</name>
      <value>512M</value>
    </property>
    <property>
      <name>spark.executor.cores</name>
      <value>1</value>
    </property>
    <property>
      <name>spark.executor.instances</name>
      <value>3</value>
    </property>

    <property>
      <name>spark.debug.maxToStringFields</name>
      <value>200</value>
    </property>
    <!-- <property> <name>spark.dynamicAllocation.enabled</name> <value>true</value> </property> -->
    <property>
      <name>spark.driver.memory</name>
      <value>1g</value>
    </property>

    <property>
      <name>spark.yarn.driver.memoryOverhead</name>
      <value>256M</value>
    </property>

    <property>
      <name>spark.yarn.jars</name>
      <value>hdfs://master:9000/user/spark/jars/*</value>
    </property>

    <property>
      <name>hive.exec.reducers.bytes.per.reducer</name>
      <value>256000000</value>
    </property>
    <property>
      <name>hive.exec.reducers.max</name>
      <value>100</value>
    </property>
    <property>
      <name>mapreduce.job.reducers</name>
      <value>2</value>
    </property>
    <property>
      <name>hive.spark.client.connect.timeout</name>
      <value>30000ms</value>
    </property>
    <property>
      <name>hive.spark.client.server.connect.timeout</name>
      <value>30000ms</value>
    </property>
    <property>
      <name>spark.yarn.dist.files</name>
      <value>/usr/local/apache-hive-2.3.3-bin/conf/hive-site.xml</value>
    </property>
    <!-- thrift setup -->

    <property>
      <name>hive.server2.transport.mode</name>
      <value>http</value>
    </property>
    <property>
      <name>hive.server2.thrift.http.port</name>
      <value>10000</value>
    </property>
    <property>
      <name>hive.server2.thrift.http.max.worker.threads</name>
      <value>500</value>
    </property>

    <property>
      <name>hive.server2.thrift.http.min.worker.threads</name>
      <value>10</value>
    </property>
    <property>
      <name>hive.server2.thrift.http.path</name>
      <value>cliservice</value>
    </property>

    <property>
      <name>hive.server2.enable.doAs</name>
      <value>false</value>
    </property>

    <property>
      <name>hive.metastore.uris</name>
      <value>thrift://data6:9083</value>
    </property>

    <property>
      <name>hive.exec.dynamic.partition.mode</name>
      <value>nonstrict</value>
    </property>

    <!-- Transaction properties. Disabled for now. <property> <name>hive.support.concurrency</name> <value>true</value> </property> <property> <name>hive.enforce.bucketing</name> <value>true</value> </property> <property> <name>hive.txn.manager</name>
    <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value> </property> <property> <name>hive.compactor.initiator.on</name> <value>true</value> </property> -->

    <property>
      <name>hive.server2.authentication</name>
      <value>NONE</value>
    </property>

    <property>
      <name>hive.metastore.event.db.notification.api.auth</name>
      <value>false</value>
    </property>

    <property>
      <name>hive.server2.active.passive.ha.enable</name>
      <value>true</value>
    </property>

    <property>
      <name>hive.server2.limit.connections.per.user</name>
      <value>50</value>
    </property>

    <property>
      <name>hive.spark.client.rpc.max.size</name>
      <value>1262485504</value>
    </property>

    <property>
      <name>hive.server2.thrift.min.worker.threads</name>
      <value>5</value>
    </property>

    <property>
      <name>hive.server2.thrift.max.worker.threads</name>
      <value>500</value>
    </property>

    <property>
      <name>hive.mapjoin.hybridgrace.hashtable</name>
      <value>false</value>
    </property>

    <property>
      <name>hive.spark.job.monitor.timeout</name>
      <value>60000</value>
    </property>

    <property>
      <name>hive.spark.client.server.connect.timeout</name>
      <value>60000</value>
    </property>

    <property>
      <name>auto.convert.join</name>
      <value>false</value>
    </property>

    <property>
      <name>hive.vectorized.execution</name>
      <value>false</value>
    </property>

    <!-- <property> <name>hive.vectorized.execution.enabled</name> <value>true</value> </property> -->

    <property>
      <name>hive.cbo.enable</name>
      <value>true</value>
    </property>

    <property>
      <name>hive.exec.max.dynamic.partitions.pernode</name>
      <value>20000</value>
    </property>

    <property>
      <name>hive.exec.max.dynamic.partitions</name>
      <value>20000</value>
    </property>

    <property>
      <name>hive.exec.dynamic.partition</name>
      <value>true</value>
    </property>

    <property>
      <name>hive.exec.dynamic.partition.mode</name>
      <value>nonstrict</value>
    </property>

    <property>
      <name>hive.enforce.bucketing</name>
      <value>false</value>
    </property>

    <property>
      <name>hive.enforce.sorting</name>
      <value>false</value>
    </property>

    <property>
      <name>optimize.sort.dynamic.partitionining</name>
      <value>true</value>
    </property>

    <property>
      <name>spark.sql.sources.bucketing.enabled</name>
      <value>true</value>
    </property>

    <property>
      <name>hive.mapjoin.optimized.hashtable</name>
      <value>false</value>
    </property>

  </configuration>

</xmp>

core-site.xml文件

<xmp>

  <?xml version="1.0" encoding="UTF-8"?>
  <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  <configuration>
    <property>
      <name>fs.default.name</name>
      <value>hdfs://master:9000</value>
    </property>
    <property>
      <name>fs.defaultFS</name>
      <value>hdfs://master:9000</value>
    </property>
    <property>
      <name>dfs.permissions</name>
      <value>false</value>
    </property>

    <property>
      <name>hadoop.proxyuser.hadoopuser.groups</name>
      <value>*</value>
    </property>

    <property>
      <name>hadoop.proxyuser.hadoopuser.hosts</name>
      <value>*</value>
    </property>

    <property>
      <name>dfs.webhdfs.enabled</name>
      <value>true</value>
    </property>

    <property>
      <name>hadoop.proxyuser.hue.hosts</name>
      <value>*</value>
    </property>
    <property>
      <name>hadoop.proxyuser.hue.groups</name>
      <value>*</value>
    </property>

    <property>
      <name>hadoop.proxyuser.dremio.hosts</name>
      <value>*</value>
    </property>

    <property>
      <name>hadoop.proxyuser.dremio.groups</name>
      <value>*</value>
    </property>
    <property>
      <name>hadoop.proxyuser.dremio.users</name>
      <value>*</value>
    </property>

  </configuration>

</xmp>

关于什么可能导致错误有什么建议吗?

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题