我最初以为块是自动放置到位的,因为我的集群运行状况是黄色的。我读了关于碎片优化的文章,发现我有很多未分配的碎片。我将节点数增加到2,它变为绿色。
GET /_cluster/health?level=indices
{
"cluster_name" : "861156488073:hd-staging",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"discovered_master" : true,
"active_primary_shards" : 47,
"active_shards" : 94,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0,
"indices" : {
".kibana_-1326395655_honestdoorstg_1" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
".kibana_-1326395655_honestdoorstg_2" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
".kibana_2" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"neighbourhood_assessment" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
".kibana_1" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
".opendistro_security" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
".tasks" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
".kibana_3" : {
"status" : "green",
"number_of_shards" : 1,
"number_of_replicas" : 1,
"active_primary_shards" : 1,
"active_shards" : 2,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"assessment" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"valuation" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"neighbourhood" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"permit" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"property" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"listing" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
},
"close" : {
"status" : "green",
"number_of_shards" : 5,
"number_of_replicas" : 1,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0
}
}
}
但障碍依然存在:
GET /_cluster/settings
{
"persistent" : {
"cluster" : {
"routing" : {
"allocation" : {
"cluster_concurrent_rebalance" : "2",
"node_concurrent_recoveries" : "2",
"disk" : {
"watermark" : {
"low" : "1.35gb",
"flood_stage" : "0.45gb",
"high" : "0.9gb"
}
},
"node_initial_primaries_recoveries" : "4"
}
},
"blocks" : {
"create_index" : "true"
}
},
"indices" : {
"recovery" : {
"max_bytes_per_sec" : "60mb"
}
}
},
"transient" : {
"cluster" : {
"routing" : {
"allocation" : {
"cluster_concurrent_rebalance" : "2",
"node_concurrent_recoveries" : "2",
"disk" : {
"watermark" : {
"low" : "1.35gb",
"flood_stage" : "0.45gb",
"high" : "0.9gb"
}
},
"exclude" : { },
"node_initial_primaries_recoveries" : "4"
}
}
},
"indices" : {
"recovery" : {
"max_bytes_per_sec" : "60mb"
}
}
}
}
我已尝试将\u cluster/settings api用于以下所有应用程序:
PUT /_cluster/settings
{"persistent": {"cluster": {"blocks.create_index": (null and false)}}}
PUT /_cluster/settings
{"persistent": {"cluster.blocks": (null and false)}}
PUT /_cluster/settings
{
"persistent" : {
"cluster.blocks.create_index": (null and false)
}
}
# all return
{"Message":"Your request: '/_cluster/settings' payload is not allowed."}
如何清除该块以便重新编制索引?elasticsearch服务上的持久设置是否可以编辑?我错过了什么?
我认为将我的域增加到2个节点将我推到ebs分配上的容量存储:https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-handling-errors.html
1条答案
按热度按时间sc4hvdpw1#
示例上的磁盘空间是问题所在?♂️
一个非常方便的碎片优化工具,因此您不会遇到以下问题:https://gbaptista.github.io/elastic-calculator/