log4j未写入hdfs/log4j.properties

9o685dep  于 2021-05-29  发布在  Hadoop
关注(0)|答案(1)|浏览(745)

基于以下配置,我希望log4j应该写入hdfs文件夹(/myfolder/mysubfolder)。但它甚至没有创建一个名为hadoop9.log的文件。我尝试在hdfs上手动创建hadoop9.log。但还是没用。
我在log4j.properties中遗漏了什么吗。?


# Define some default values that can be overridden by system properties

hadoop.root.logger=INFO,console,RFA,DRFA
hadoop.log.dir= /myfolder/mysubfolder
hadoop.log.file=hadoop9.log

# Define the root logger to the system property "hadoop.root.logger".

log4j.rootLogger=${hadoop.root.logger}, EventCounter

# Logging Threshold

log4j.threshold=ALL

# Null Appender

log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender

# 

# Rolling File Appender - cap space usage at 5gb.

# 

hadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}

log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}

log4j.appender.RFA.layout=org.apache.log4j.PatternLayout

# Pattern format: Date LogLevel LoggerName LogMessage

log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

# Debugging Pattern format

# log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

# 

# Daily Rolling File Appender

# 

log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}

# Rollver at midnight

log4j.appender.DRFA.DatePattern=.yyyy-MM-dd

# 30-day backup

# log4j.appender.DRFA.MaxBackupIndex=30

log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout

# Pattern format: Date LogLevel LoggerName LogMessage

log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

# Debugging Pattern format

# log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

# 

# console

# Add "console" to rootlogger above if you want to use this

# 

log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n

# 

# TaskLog Appender

# 

# Default values

hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12

log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}

log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

# 

# HDFS block state change log from block manager

# 

# Uncomment the following to suppress normal block state change

# messages from BlockManager in NameNode.

# log4j.logger.BlockStateChange=WARN

# 

# Security appender

# 

hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}

# 

# Daily Rolling Security appender

# 

log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd

# 

# hadoop configuration logging

# 

# Uncomment the following line to turn off configuration deprecation warnings.

# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

# 

# hdfs audit logging

# 

hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}

# 

# mapred audit logging

# 

mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}

# Custom Logging levels

# log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG

# log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG

# log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG

# Jets3t library

log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR

# 

# Event Counter Appender

# Sends counts of logging messages at different severity levels to Hadoop Metrics.

# 

log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter

# 

# Job Summary Appender

# 

# Use following logger to send summary to separate file defined by

# hadoop.mapreduce.jobsummary.log.file :

# hadoop.mapreduce.jobsummary.logger=INFO,JSA

# 

hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false

# 

# Yarn ResourceManager Application Summary Log

# 

# Set the ResourceManager summary log filename

yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log

# Set the ResourceManager summary log level and appender

yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}

# yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY

# To enable AppSummaryLogging for the RM,

# set yarn.server.resourcemanager.appsummary.logger to

# <LEVEL>,RMSUMMARY in hadoop-env.sh

# Appender for ResourceManager Application Summary Log

# Requires the following properties to be set

# - hadoop.log.dir (Hadoop Log directory)

# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)

# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)

log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
log4j.appender.RMSUMMARY.MaxFileSize=256MB
log4j.appender.RMSUMMARY.MaxBackupIndex=20
log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

# HS audit log configs

# mapreduce.hs.audit.logger=INFO,HSAUDIT

# log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}

# log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false

# log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender

# log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log

# log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout

# log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

# log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd

# Http Server Request Logs

# log4j.logger.http.requests.namenode=INFO,namenoderequestlog

# log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender

# log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log

# log4j.appender.namenoderequestlog.RetainDays=3

# log4j.logger.http.requests.datanode=INFO,datanoderequestlog

# log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender

# log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log

# log4j.appender.datanoderequestlog.RetainDays=3

# log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog

# log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender

# log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log

# log4j.appender.resourcemanagerrequestlog.RetainDays=3

# log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog

# log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender

# log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log

# log4j.appender.jobhistoryrequestlog.RetainDays=3

# log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog

# log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender

# log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log

# log4j.appender.nodemanagerrequestlog.RetainDays=3

log4j.logger.org.apache.zookeeper=ERROR
log4j.logger.com.mapr.util.zookeeper=WARN
log4j.logger.org.apache.hadoop.yarn.client.MapRZKBasedRMFailoverProxyProvider=WARN
n3ipq98p

n3ipq98p1#

这个 RollingFileAppender 将只写入本地磁盘。除非你能以某种方式挂载你的hdfs,使它“看起来像本地磁盘到你的操作系统”,否则它不会工作。您必须选择另一个支持远程日志记录的log4j appender类型,如flume appender或roll-your。

相关问题