我已经通过使用csv在druid中创建了我的数据源
例如:关于数据:
"2015-09-12T00:47:00.496Z",100134,33,21,30505,43285,U,67c38115-1a68-45bb-858d-dd6cdeaab5cb,
"2015-09-12T00:47:00.496Z",100082,6,26,31548,43202,U,a4f8708a-30ac-4637-910c-e8f9386d6353,
数据是通过druid中的以下json:indexcsv.json消耗的
{
"type" : "index_hadoop",
"spec" : {
"ioConfig" : {
"type" : "hadoop",
"inputSpec" : {
"type" : "static",
"paths" : "/opt/druid-0.12.3/npmData/example.csv"
}
},
"dataSchema" : {
"dataSource" : "example",
"granularitySpec" : {
"type" : "uniform",
"segmentGranularity" : "day",
"queryGranularity" : "none",
"intervals" : ["2010-09-12/2018-09-13"]
},
"parser" : {
"type" : "hadoopyString",
"parseSpec": {
"format" : "csv",
"timestampSpec" : {
"column" : "timestamp"
},
"columns" : ["timestamp","IId","QId","Score","StartOffsetInMs","EndOffsetInMs","SpeakerRole","QueryIdentity","SId"],
"dimensionsSpec" : {
"dimensions" : ["IId","QId","SpeakerRole","QueryIdentity","SId"]
}
}
},
"metricsSpec" : [
{
"name" : "count",
"type" : "count"
}
]
},
"tuningConfig" : {
"type" : "hadoop",
"partitionsSpec" : {
"type" : "hashed",
"targetPartitionSize" : 5000000
},
"jobProperties" : {}
}
}
}
我可以在Druid身上看到这些数据。如:
[root@ENT-CL-015243 druid-0.12.3]# curl -X 'POST' -H 'Content-Type:application/json' -d @customJsons/groupby-sql.json http://localhost:8082/druid/v2/sql
[{"IId":"1","QId":"26","QueryIdentity":"c5b7d739-a531-409e-afd1-fb294846560a","SpeakerRole":"U","__time":"2015-09-12T00:47:00.496Z","count":1},
{"IId":"1","QId":"30","QueryIdentity":"ba8bb5f5-36e4-41ee-b74c-536b50aa979a","SpeakerRole":"U","__time":"2015-09-12T00:47:00.496Z","count":1},
为了在配置单元中查询此数据,我遵循了以下步骤:
https://cwiki.apache.org/confluence/display/hive/druid+integration#druidintegration-查询完全执行Druid
我打开hive bash并运行以下查询:
hive>CREATE EXTERNAL TABLE wikipedia
STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
TBLPROPERTIES ("druid.datasource" ="example");
hive> DESCRIBE FORMATTED example;
OK
# col_name data_type comment
iid string from deserializer
qid string from deserializer
queryidentity string from deserializer
speakerrole string from deserializer
__time timestamp with local time zone from deserializer
count bigint from deserializer
# Detailed Table Information
Database: default
OwnerType: USER
Owner: root
CreateTime: Thu Nov 08 13:18:14 IST 2018
LastAccessTime: UNKNOWN
Retention: 0
Location: hdfs://localhost:9000/user/hive/warehouse/example
Table Type: EXTERNAL_TABLE
Table Parameters:
COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"count\":\"true\",\"iid\":\"true\",\"qid\":\"true\",\"queryidentity\":\"true\",\"speakerrole\":\"true\"}}
EXTERNAL TRUE
bucketing_version 2
druid.datasource example
numFiles 0
numRows 0
rawDataSize 0
storage_handler org.apache.hadoop.hive.druid.DruidStorageHandler
totalSize 0
transient_lastDdlTime 1541675894
# Storage Information
SerDe Library: org.apache.hadoop.hive.druid.serde.DruidSerDe
InputFormat: null
OutputFormat: null
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
Time taken: 0.288 seconds, Fetched: 39 row(s)
hive> SELECT * FROM example LIMIT 10;
OK
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
NULL NULL NULL NULL 2015-09-12 03:47:00.496 Asia/Jerusalem 1
Time taken: 0.104 seconds, Fetched: 10 row(s)
如您所见,所有列的值都为null。我可以猜这是一些相关的输入格式在某个地方可以有人请帮助这一点。
2条答案
按热度按时间ylamdve61#
您可以检查是否配置了以下属性:
hive.druid.broker.address.default:myip:8082
hive.druid.coordinator.address.default:myip:8081
hive.druid.http.numconnection:20
hive.druid.http.read.timeout:pt10m
hive.druid.indexer.memory.rownum.max:75000个
hive.druid.indexer.partition.size.max:1000000
hive.druid.indexer.segments.granularity:天
hive.druid.metadata.base:Druid
hive.druid.metadata.db.type:mysql
hive.druid.metadata.password:Druid
hive.druid.metadata.uri:jdbc:mysql://myip:3306/druid
hive.druid.metadata.username:Druid
hive.druid.storage.storagedirectory:/apps/hive/warehouse
hive.druid.working.directory:/tmp/druid索引
ccrfmcuu2#
druid中的列名区分大小写,而hive中的列名不区分大小写。用小写的Druid来重命名你的列名,这样就可以了。