es数据导出到csv文件,暂时不考虑效率问题,仅谈实现方式。
def connect_elk():
client = Elasticsearch(hosts='http://192.168.56.20:9200',
http_auth=("elastic", "elastic密码"),
# 在做任何操作之前,先进行嗅探
# sniff_on_start=True,
# 节点没有响应时,进行刷新,重新连接
sniff_on_connection_fail=True,
# 每 60 秒刷新一次
sniffer_timeout=60
)
return client
from elasticsearch import Elasticsearch
import csv
# 获取es数据库
from common.util_es import connect_elk
es = connect_elk()
'''
查询所有数据并导出
'''
index = 'blog_rate'
body = {}
item = ["r_id", "a_id"]
# body = {
# "query": {
# "match": {"name": "张三"},
# }
# }
def ExportCsv(index, body,item):
query = es.search(index=index, body=body, scroll='5m', size=1000)
# es查询出的结果第一页
results = query['hits']['hits']
# es查询出的结果总量
total = query['hits']['total']["value"]
# 游标用于输出es查询出的所有结果
scroll_id = query['_scroll_id']
for i in range(0, int(total / 100) + 1):
# scroll参数必须指定否则会报错
query_scroll = es.scroll(scroll_id=scroll_id, scroll='5m')['hits']['hits']
results += query_scroll
with open('./' + index + '.csv', 'w', newline='', encoding="utf_8_sig") as flow:
csv_writer = csv.writer(flow)
for res in results:
csvrow1 = []
for i in item:
csvrow1.append(res["_source"][i])
csv_writer.writerow(csvrow1)
print('done!')
参考地址:
https://blog.csdn.net/github_27244019/article/details/115351640
版权说明 : 本文为转载文章, 版权归原作者所有 版权申明
原文链接 : https://vitcloud.blog.csdn.net/article/details/124173239
内容来源于网络,如有侵权,请联系作者删除!