val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val customers = sc.textFile("/FileStore/tables/Customer.txt")
case class CUSTOMERS (custid:Int,cname:String,prodid:Int)
val DF1 =customers.map(_.split('\t')).map(c=>CUSTOMERS(c(0).toInt,c(1),c(2).toInt)).toDF
DF1.createOrReplaceTempView("cust")
val cust1 =sqlContext.sql("select custid,cname,count(prodid) from cust group by custid,cname,prodid")
cust1.show()
(1) Spark Jobs
command-997514807385057:1: warning: constructor SQLContext in class SQLContext is deprecated: Use SparkSession.builder instead
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
^
org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 4.0 failed 1 times, most recent failure: Lost task 1.0 in stage 4.0 (TID 5, localhost, executor driver): java.lang.NumberFormatException: For input string: ""
at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)
at java.lang.Integer.parseInt(Integer.java:592)
at java.lang.Integer.parseInt(Integer.java:615)
at scala.collection.immutable.StringLike$class.toInt(StringLike.scala:273)
at scala.collection.immutable.StringOps.toInt(StringOps.scala:29)
at line16675c99081740c8a574492d3705e15f31.$read$$iw$$iw$$iw$$iw$$iw$$iw$$anonfun$2.apply(command-997514807385057:4)
at line16675c99081740c8a574492d3705e15f31.$read$$iw$$iw$$iw$$iw$$iw$$iw$$anonfun$2.apply(command-997514807385057:4)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithKeys_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:640)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
at org.apache.spark.scheduler.Task.run(Task.scala:113)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:537)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1541)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:543)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
1条答案
按热度按时间ffx8fchx1#
检查以下代码。
toInt
函数无法将“”字符串值解析为int在转换为字符串之前,必须检查字符串列是否包含数字
toInt
```import org.apache.spark.sql.{Column, DataFrame, Dataset, SparkSession}
val spark = SparkSession.builder().appName("app").master("local").getOrCreate()
val customers = sc.textFile("/FileStore/tables/Customer.txt")
case class Customers (custid:Int,cname:String,prodid:Int)
import scala.util.{Failure, Success, Try}
def parse( s: String ) = Try(s.toInt).toOption
val dfa =customers.map(_.split('\t')).map(c=>Customers(parse(c(0)).getOrElse(0),c(1),parse(c(2)).getOrElse(0))).toDF
dfa.createOrReplaceTempView("cust")
val cust1 =spark.sql("select custid,cname,count(prodid) from cust group by custid,cname,prodid")
cust1.show()