我们正在尝试为其中一个字符串数据类型的列进行聚合(求和)。我正在使用下面的查询。我有两个Druid服务器在那里有相同的数据。一个是暗示,另一个是从Ambari安装。在暗指它是工作的,我们得到了预期的输出,但从DruidAmbari我得到零作为以下查询的输出。下面是我的输入Kafka规范(我的AmbariDruid服务器和暗示以及)
{"type": "kafka",
"dataSchema": {"dataSource": "DRUID_RAIN","parser": {"type": "string", "parseSpec": { "format": "json", "timestampSpec": { "column": "DATE_TIME","format": "auto"},"flattenSpec": {"fields": [{ "type": "path","name": "deviceType","expr": "$.ENVIRONMENT.deviceType"},{ "type": "path","name": "NS","expr":"$.ENVIRONMENT.NS"},
{"type": "path","name": "latitude","expr": "$.ENVIRONMENT.latitude"},{ "type": "path","name": "TIME","expr": "$.ENVIRONMENT.TIME"},{ "type": "path","name": "tenantCode","expr": "$.ENVIRONMENT.tenantCode"},{ "type": "path","name": "deviceName","expr": "$.ENVIRONMENT.deviceName"},{ "type": "path","name": "MAC","expr": "$.ENVIRONMENT.MAC"},{ "type": "path","name": "DATE","expr": "$.ENVIRONMENT.DATE"},{ "type": "path","name": "RAIN","expr": "$.ENVIRONMENT.RAIN"},
{ "type": "path","name": "MESSAGE_ID","expr": "$.ENVIRONMENT.MESSAGE_ID"},{ "type": "path","name": "tenantId","expr": "$.ENVIRONMENT.tenantId"},{ "type": "path","name": "zoneId","expr": "$.ENVIRONMENT.zoneId"},{ "type": "path","name": "DATE_TIME","expr": "$.ENVIRONMENT.DATE_TIME"},{ "type": "path","name": "zoneName","expr": "$.ENVIRONMENT.zoneName" }, { "type": "path","name": "longitude","expr": "$.ENVIRONMENT.longitude"},{ "type": "path","name": "STATUS","expr": "$.ENVIRONMENT.STATUS"}]},"dimensionsSpec": {"dimensions":["deviceType","NS","latitude","TIME","tenantCode","deviceName","MAC","DATE","RAIN","MESSAGE_ID","tenantId","zoneId","DATE_TIME","zoneName","longitude","STATUS"]}}},"metricsSpec": [ ],"granularitySpec": { "type": "uniform", "segmentGranularity": "DAY", "queryGranularity": {"type": "none"},"rollup": true, "intervals": null},"transformSpec": { "filter": null, "transforms": []}},"tuningConfig": {"type": "kafka","maxRowsInMemory": 1000000,"maxBytesInMemory": 0,"maxRowsPerSegment": 5000000,"maxTotalRows": null,"intermediatePersistPeriod": "PT10M","maxPendingPersists": 0,"indexSpec": { "bitmap": { "type": "concise"}, "dimensionCompression": "lz4", "metricCompression": "lz4","longEncoding": "longs"},"buildV9Directly": true,"reportParseExceptions": false,"handoffConditionTimeout": 0,"resetOffsetAutomatically": false,"segmentWriteOutMediumFactory": null,"workerThreads": null,"chatThreads": null,"chatRetries": 8,"httpTimeout": "PT10S","shutdownTimeout": "PT80S","offsetFetchPeriod": "PT30S","intermediateHandoffPeriod": "P2147483647D","logParseExceptions": false,"maxParseExceptions": 2147483647,"maxSavedParseExceptions": 0,"skipSequenceNumberAvailabilityCheck": false},
"ioConfig": {"topic": "rain_out","replicas": 2,"taskCount": 1,"taskDuration": "PT5S","consumerProperties": { "bootstrap.servers": "XXXX:6667,XXXX:6667,XXXX:6667"},"pollTimeout": 100,"startDelay": "PT10S","period": "PT30S","useEarliestOffset": true,"completionTimeout": "PT20S","lateMessageRejectionPeriod": null,"earlyMessageRejectionPeriod": null,"stream": "env_out","useEarliestSequenceNumber": true,"type": "kafka"},
"context": null,
"suspended": false}
下面是我的问题,我写了什么
{
"queryType": "groupBy",
"dataSource": "DRUID_RAIN",
"granularity": "hour",
"dimensions": [
"zoneName",
"deviceName"
],
"limitSpec": {
"type": "default",
"limit": 5000,
"columns": [
"zoneName",
"deviceName"
]
},
"aggregations": [
{
"type": "doubleSum",
"name": "RAIN",
"fieldName": "RAIN"
}
],
"intervals": [
"2020-10-27T18:30:00.000/2020-10-28T18:30:00.000"
],
"context": {
"skipEmptyBuckets": "true"
}
}
rain列的输出总是得到“0”。我没有从 Postman 或Druid数据库的命令行得到期望的总和。但是使用implie,我得到的是相同查询的精确输出。
暂无答案!
目前还没有任何答案,快来回答吧!