hbase spark连接器

irlmq6kh  于 2021-05-27  发布在  Spark
关注(0)|答案(0)|浏览(373)

我试图写一个SparkDataframe到hbase以下是我正在处理的组件版本
spark版本:2.4.0
cloudera分布:6.3.2
hbase版本:2.1.0
scala版本:2.11.12
使用的jar文件
hbase-common-2.1.0-cdh6.3.2.jar
hbase-client-2.1.0-cdh6.3.2.jar
hbase-server-2.1.0-cdh6.3.2.jar
shc-core-1.1.1-2.1-sè2.11.jar
代码

import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.execution.datasources.hbase.HBaseTableCatalog
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext

val Patient = spark.read.option("header", "true").csv("/vps_demo/Patient.csv")
Patient.createOrReplaceTempView("Patient_data")
val testDataFrame =
  spark.sql("select REGISTRATIONNO,PATIENTNAME,Age from Patient_data")
testDataFrame.createOrReplaceTempView("testDataFrame")

def catalog = s"""{
    "table":{"namespace":"default","name":"patient900"},
    "rowkey":"key",
    "rowkey":{"cf":"rowkey","col":key","type":"string"},
    "REGISTRATIONNO":{"cf":"personal","col":"REGISTRATIONNO","type":"string"},
    "PATIENTNAME":{"cf":"personal","col":"PATIENTNAME","type":"string"},
    "Age":{"cf":"personal","col":"Age","type":"string"}
    }""".stripMargin

testDataFrame.write
  .options(Map(HBaseTableCatalog.tableCatalog -> catalog))
  .format("org.apache.spark.sql.execution.datasources.hbase")
  .save()

错误

java.lang.NoSuchMethodError: org.json4s.jackson.JsonMethods$.parse(Lorg/json4s/JsonInput;Z)Lorg/json4s/JsonAST$JValue;
  at org.apache.spark.sql.execution.datasources.hbase.HBaseTableCatalog$.apply(HBaseTableCatalog.scala:186)
  at org.apache.spark.sql.execution.datasources.hbase.HBaseRelation.<init>(HBaseRelation.scala:162)
  at org.apache.spark.sql.execution.datasources.hbase.DefaultSource.createRelation(HBaseRelation.scala:57)
  at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:45)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
  at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
  at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
  at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
  at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:80)
  at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:80)
  at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:668)
  at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:668)
  at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
  at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
  at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
  at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:668)
  at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:276)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:270)
  ... 76 elided

有人能告诉我为什么我会犯这个错误吗?

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题