我从这里看到了这段漂亮的代码,我把它贴在下面:
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import scala.annotation.tailrec
import scala.util.Try
implicit class DFHelpers(df: DataFrame) {
def columns = {
val dfColumns = df.columns.map(_.toLowerCase)
df.schema.fields.flatMap { data =>
data match {
case column if column.dataType.isInstanceOf[StructType] => {
column.dataType.asInstanceOf[StructType].fields.map { field =>
val columnName = column.name
val fieldName = field.name
col(s"${columnName}.${fieldName}").as(s"${columnName}_${fieldName}")
}.toList
}
case column => List(col(s"${column.name}"))
}
}
}
def flatten: DataFrame = {
val empty = df.schema.filter(_.dataType.isInstanceOf[StructType]).isEmpty
empty match {
case false =>
df.select(columns: _*).flatten
case _ => df
}
}
def explodeColumns = {
@tailrec
def columns(cdf: DataFrame):DataFrame = cdf.schema.fields.filter(_.dataType.typeName == "array") match {
case c if !c.isEmpty => columns(c.foldLeft(cdf)((dfa,field) => {
dfa.withColumn(field.name,explode_outer(col(s"${field.name}"))).flatten
}))
case _ => cdf
}
columns(df.flatten)
}
}
// Exiting paste mode, now interpreting.
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import scala.annotation.tailrec
import scala.util.Try
defined class DFHelpers
但是,除了一个特定的列之外,有没有一种方法可以展平所有的内容(代码是这样做的)?例如,如果我有:
scala> df.printSchema
root
|-- stackoverflow: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- tag: struct (nullable = true)
| | | |-- author: string (nullable = true)
| | | |-- frameworks: array (nullable = true)
| | | | |-- element: struct (containsNull = true)
| | | | | |-- id: long (nullable = true)
| | | | | |-- name: string (nullable = true)
| | | |-- id: long (nullable = true)
| | | |-- name: string (nullable = true)
|-- category: array (nullable = true)
除了“类别”之外,有没有办法把所有的东西(它已经做到了)都扁平化?
暂无答案!
目前还没有任何答案,快来回答吧!