我有一个Hive脚本如下所示:
导入时跳过csv中的标题
SET skip.header.line.count = 1;
创建cac表
CREATE EXTERNAL TABLE channelAccessCodes (accessCode string,channelCode string,id string,upc string,version bigint)
STORED BY 'org.apache.hadoop.hive.dynamodb.DynamoDBStorageHandler'
TBLPROPERTIES ("dynamodb.table.name" = "properties.channelAccessCode",
"dynamodb.column.mapping" = "accessCode:accessCode,channelCode:channelCode,id:id,upc:upc,version:version");
CREATE TEMPORARY TABLE if not exists bdc (id STRING,name STRING,address STRING,zip STRING,city_hotel STRING,cc1 STRING,ufi STRING,class STRING,currencycode STRING,minrate STRING,maxrate STRING,preferred STRING,nr_rooms STRING,longitude STRING,latitude STRING,public_ranking STRING,hotel_url STRING,photo_url STRING,desc_en STRING,desc_fr STRING,desc_es STRING,desc_de STRING,desc_nl STRING,desc_it STRING,desc_pt STRING,desc_ja STRING,desc_zh STRING,desc_pl STRING,desc_ru STRING,desc_sv STRING,desc_ar STRING,desc_el STRING,desc_no STRING,city_unique STRING,city_preferred STRING,continent_id STRING,review_score STRING,review_nr STRING)
ROW FORMAT DELIMITED fields terminated by '\t' lines terminated by '\n'
stored as textfile
LOCATION 's3://properties-uat-imports/input/BDC'
tblproperties("skip.header.line.count"="1");
CREATE TEMPORARY TABLE TempTableDeletes(ChannelAccessCode STRING)
ROW FORMAT DELIMITED fields terminated by '|' lines terminated by '\n';
INSERT INTO TABLE TempTableDeletes SELECT channelAccessCodes.id FROM channelAccessCodes LEFT JOIN bdc ON channelAccessCodes.id = CONCAT('BDC',bdc.id) WHERE CONCAT('BDC',bdc.id) IS NULL AND channelAccessCodes.id LIKE 'BDC%';
部分-将所选数据写入s3
create external table s3_export_deletes(ChannelAccessCode STRING)
row format delimited lines terminated by '\n'
stored as textfile
LOCATION 's3://properties-uat-imports-emr/';
写作
INSERT OVERWRITE TABLE s3_export_deletes SELECT * from TempTableDeletes;
它基本上是从dynamo表中读取数据,然后从s3中读取一个文件……在输出表中做一些小动作,然后将该表写入s3存储桶。
当这个配置单元脚本在emr上运行时,它会将一个文件写入s3。我在这个bucket上配置了一个通知,每当bucket上收到put通知时,就会向sqs队列发送一条消息。我只希望有一个通知,但似乎有多个(准确地说是6)?
有人知道这是为什么吗?有没有可能强制hive只对s3 bucket进行一次调用?
谢谢,
克里斯
1条答案
按热度按时间bksxznpy1#
这里的问题是,配置单元的整个输出提交过程将文件写入子目录,然后重命名它们,所有这些都是put调用的组合(每个目录一个),重命名过程中的每个副本一个。
除了过滤该通知(lambda?)以丢弃除最终提交外的所有通知之外,我想不出任何简单的解决方案