我是新的编程与Spark结构化流。我用了这个后就犯了这个错误 F.approx_count_distinct
,这是我的密码。我的问题是,我想得到一个Dataframe,检测欺诈,但首先我需要检查是否有人与相同的 card_number
. 有人能帮我吗?提前谢谢。
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import from_json, col
from pyspark.sql import functions as F
from pyspark.sql.functions import when
from pyspark.sql.types import *
conf = SparkConf().setAppName("Pruebas").setMaster("local")
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
sparkSQL = SparkSession \
.builder \
.appName("SparkSQL") \
.master("local") \
.getOrCreate()
broker="localhost:9092"
topic = "transacts"
# Construir el dataframe de streaming
df = sparkSQL \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", broker) \
.option("failOnDataLoss", "false") \
.option("subscribe", topic) \
.option("startingOffsets", "latest") \
.option("includeTImestamp", "true") \
.load()
# Definir el esquema que utilizaremos en el json
schema = StructType([ StructField("card_owner", StringType(), True),
StructField("card_number", StringType(), True),
StructField("geography", StringType(), True),
StructField("target", StringType(), True),
StructField("amount", StringType(), True),
StructField("currency", StringType(), True)])
# decodificar el json
# al decodificar el json nos genera una serie de subcolumnas dentro del campo value
df = df.withColumn("value", from_json(df["value"].cast("string"), schema))
df.printSchema()
# seleccionamos el timestamp del mensaje y las columnas del json
df = df.select("timestamp","value.*")
df1 = df.groupBy(df.card_number).agg(F.approx_count_distinct(df.card_owner).alias('titulares')).filter((F.col('titulares')>1))
df1 = df1.selectExpr("'a' as key", "to_json(struct(*)) as value")
query= df1.writeStream\
.outputMode("complete")\
.format("kafka")\
.option("topic","aux_topic1")\
.option("kafka.bootstrap.servers", "localhost:9092")\
.option("checkpointLocation","hdfs://localhost:9000/checkpoints")\
.start()
# query.awaitTermination(200)
# Paso de json a df
topic1= "aux_topic1"
df1 = sparkSQL \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", broker) \
.option("failOnDataLoss", "false") \
.option("subscribe", topic1) \
.option("startingOffsets", "latest") \
.option("includeTImestamp", "true") \
.load()
# Definir el esquema que utilizaremos en el json
schema = StructType([ StructField("card_number", StringType(), True),
StructField("titulares", StringType(), True)])
# decodificar el json
df1 = df1.withColumn("value", from_json(df1["value"].cast("string"), schema))
df1.printSchema()
df1 = df1.select("timestamp","value.*")
df2=df.join(df1, on="card_number")
# Mostrar por pantalla
query1= df2.writeStream\
.outputMode("append")\
.format("console")\
.queryName("test")\
.start()
query1.awaitTermination()
1条答案
按热度按时间jogvjijk1#
问题似乎是这样的:
更准确地说是你的过滤器
.filter((F.col('titulares')>1))
如果要获取所有出现多次的卡号,请执行以下操作:这是你的Dataframe
现在要获得每个卡号的所有计数(过滤掉没有重复的计数):
现在如果你想要
card_owner
那么: