org.slf4j.Logger.isInfoEnabled()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(10.6k)|赞(0)|评价(0)|浏览(772)

本文整理了Java中org.slf4j.Logger.isInfoEnabled()方法的一些代码示例,展示了Logger.isInfoEnabled()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Logger.isInfoEnabled()方法的具体详情如下:
包路径:org.slf4j.Logger
类名称:Logger
方法名:isInfoEnabled

Logger.isInfoEnabled介绍

[英]Is the logger instance enabled for the INFO level?
[中]是否为信息级别启用了记录器实例?

代码示例

代码示例来源:origin: spring-projects/spring-framework

  1. public void info(Object message, Throwable exception) {
  2. if (message instanceof String || this.logger.isInfoEnabled()) {
  3. this.logger.info(String.valueOf(message), exception);
  4. }
  5. }

代码示例来源:origin: com.h2database/h2

  1. @Override
  2. public boolean isEnabled(int level) {
  3. switch (level) {
  4. case TraceSystem.DEBUG:
  5. return logger.isDebugEnabled();
  6. case TraceSystem.INFO:
  7. return logger.isInfoEnabled();
  8. case TraceSystem.ERROR:
  9. return logger.isErrorEnabled();
  10. default:
  11. return false;
  12. }
  13. }

代码示例来源:origin: oblac/jodd

  1. @Override
  2. public boolean isEnabled(final Level level) {
  3. switch (level) {
  4. case TRACE: return logger.isTraceEnabled();
  5. case DEBUG: return logger.isDebugEnabled();
  6. case INFO: return logger.isInfoEnabled();
  7. case WARN: return logger.isWarnEnabled();
  8. case ERROR: return logger.isErrorEnabled();
  9. default:
  10. throw new IllegalArgumentException();
  11. }
  12. }

代码示例来源:origin: eclipse-vertx/vert.x

  1. level == DEBUG_INT && logger.isDebugEnabled() ||
  2. level == INFO_INT && logger.isInfoEnabled() ||
  3. level == WARN_INT && logger.isWarnEnabled() ||
  4. level == ERROR_INT && logger.isErrorEnabled()) {
  5. LocationAwareLogger l = (LocationAwareLogger) logger;
  6. break;
  7. case DEBUG_INT:
  8. logger.debug(msg, parameters);
  9. break;
  10. case INFO_INT:
  11. logger.info(msg, parameters);
  12. break;
  13. case WARN_INT:
  14. logger.warn(msg, parameters);
  15. break;
  16. case ERROR_INT:
  17. logger.error(msg, parameters);
  18. break;
  19. default:

代码示例来源:origin: alibaba/fescar

  1. @Override
  2. public void run() {
  3. while (true) {
  4. if (messageStrings.size() > 0) {
  5. StringBuilder builder = new StringBuilder();
  6. while (!messageStrings.isEmpty()) {
  7. builder.append(messageStrings.poll()).append(BATCH_LOG_SPLIT);
  8. }
  9. if (LOGGER.isInfoEnabled()) {
  10. LOGGER.info(builder.toString());
  11. }
  12. }
  13. try {
  14. Thread.sleep(IDLE_CHECK_MILLS);
  15. } catch (InterruptedException exx) {
  16. LOGGER.error(exx.getMessage());
  17. }
  18. }
  19. }
  20. }

代码示例来源:origin: apache/drill

  1. private void logConfigurations(JobConf localJobConf) {
  2. if (LOG.isInfoEnabled()) {
  3. LOG.info("Logging job configuration: ");
  4. StringWriter outWriter = new StringWriter();
  5. try {
  6. Configuration.dumpConfiguration(localJobConf, outWriter);
  7. } catch (IOException e) {
  8. LOG.warn("Error logging job configuration", e);
  9. }
  10. LOG.info(outWriter.toString());
  11. }
  12. }
  13. }

代码示例来源:origin: apache/hive

  1. private boolean handleScheduleAttemptedRejection(TaskWrapper rejected) {
  2. // TODO: is this check even needed given what the caller checks?
  3. if (!enablePreemption || preemptionQueue.isEmpty()) {
  4. return false;
  5. }
  6. LOG.debug("Preemption Queue: {}", preemptionQueue);
  7. // This call checks under lock if we can actually preempt the task.
  8. // It is possible to have a race where the update (that's also under lock) makes the
  9. // task finishable or guaranteed between the remove and kill, but it's the same timing
  10. // issue as would happen is there was a tiny delay on the network, so we don't care.
  11. TaskWrapper victim = getSuitableVictimFromPreemptionQueue(rejected);
  12. if (victim == null) {
  13. return false; // Woe us.
  14. }
  15. if (LOG.isInfoEnabled()) {
  16. LOG.info("Invoking kill task for {} due to pre-emption to run {}",
  17. victim.getRequestId(), rejected.getRequestId());
  18. }
  19. // The task will either be killed or is already in the process of completing, which will
  20. // trigger the next scheduling run, or result in available slots being higher than 0,
  21. // which will cause the scheduler loop to continue.
  22. victim.getTaskRunnerCallable().killTask();
  23. // We've killed something and may want to wait for it to die.
  24. return true;
  25. }

代码示例来源:origin: apache/flink

  1. @Override
  2. public void close() throws IOException {
  3. if (this.invalidLineCount > 0) {
  4. if (LOG.isWarnEnabled()) {
  5. LOG.warn("In file \"" + currentSplit.getPath() + "\" (split start: " + this.splitStart + ") " + this.invalidLineCount +" invalid line(s) were skipped.");
  6. }
  7. }
  8. if (this.commentCount > 0) {
  9. if (LOG.isInfoEnabled()) {
  10. LOG.info("In file \"" + currentSplit.getPath() + "\" (split start: " + this.splitStart + ") " + this.commentCount +" comment line(s) were skipped.");
  11. }
  12. }
  13. super.close();
  14. }

代码示例来源:origin: apache/hive

  1. @Override
  2. public void cleanUpInputFileChangedOp() throws HiveException {
  3. super.cleanUpInputFileChangedOp();
  4. Path fpath = getExecContext().getCurrentInputPath();
  5. Path nominalPath = getNominalPath(fpath);
  6. Map<Operator<?>, MapOpCtx> contexts = opCtxMap.get(nominalPath);
  7. if (LOG.isInfoEnabled()) {
  8. StringBuilder builder = new StringBuilder();
  9. for (MapOpCtx context : contexts.values()) {
  10. if (builder.length() > 0) {
  11. builder.append(", ");
  12. }
  13. builder.append(context.alias);
  14. }
  15. if (LOG.isDebugEnabled()) {
  16. LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath);
  17. }
  18. }
  19. // Add alias, table name, and partitions to hadoop conf so that their
  20. // children will inherit these
  21. for (Entry<Operator<?>, MapOpCtx> entry : contexts.entrySet()) {
  22. Operator<?> operator = entry.getKey();
  23. MapOpCtx context = entry.getValue();
  24. operator.setInputContext(context.tableName, context.partName);
  25. }
  26. currentCtxs = contexts.values().toArray(new MapOpCtx[contexts.size()]);
  27. }

代码示例来源:origin: apache/hive

  1. logLevel = LogLevel.valueOf(config.getRpcChannelLogLevel());
  2. } catch (Exception e) {
  3. LOG.warn("Invalid log level {}, reverting to default.", config.getRpcChannelLogLevel());
  4. switch (logLevel) {
  5. case DEBUG:
  6. logEnabled = LOG.isDebugEnabled();
  7. break;
  8. case ERROR:
  9. break;
  10. case INFO:
  11. logEnabled = LOG.isInfoEnabled();
  12. break;
  13. case TRACE:
  14. break;
  15. case WARN:
  16. logEnabled = LOG.isWarnEnabled();
  17. break;

代码示例来源:origin: ethereum/ethereumj

  1. private void processDisconnect(ChannelHandlerContext ctx, DisconnectMessage msg) {
  2. if (logger.isInfoEnabled() && msg.getReason() == ReasonCode.USELESS_PEER) {
  3. if (channel.getNodeStatistics().ethInbound.get() - ethInbound > 1 ||
  4. channel.getNodeStatistics().ethOutbound.get() - ethOutbound > 1) {
  5. // it means that we've been disconnected
  6. // after some incorrect action from our peer
  7. // need to log this moment
  8. logger.debug("From: \t{}\t [DISCONNECT reason=BAD_PEER_ACTION]", channel);
  9. }
  10. }
  11. ctx.close();
  12. killTimers();
  13. }

代码示例来源:origin: networknt/light-4j

  1. private List<URL> nodeChildsToUrls(String parentPath, List<String> currentChilds) {
  2. List<URL> urls = new ArrayList<URL>();
  3. if (currentChilds != null) {
  4. for (String node : currentChilds) {
  5. String nodePath = parentPath + Constants.PATH_SEPARATOR + node;
  6. String data = client.readData(nodePath, true);
  7. try {
  8. URL url = URLImpl.valueOf(data);
  9. urls.add(url);
  10. } catch (Exception e) {
  11. if(logger.isInfoEnabled()) logger.warn(String.format("Found malformed urls from ZooKeeperRegistry, path=%s", nodePath), e);
  12. }
  13. }
  14. }
  15. return urls;
  16. }

代码示例来源:origin: Netflix/eureka

  1. public ReloadingClusterResolver(final ClusterResolverFactory<T> factory, final long reloadIntervalMs) {
  2. this.factory = factory;
  3. this.reloadIntervalMs = reloadIntervalMs;
  4. this.maxReloadIntervalMs = MAX_RELOAD_INTERVAL_MULTIPLIER * reloadIntervalMs;
  5. this.delegateRef = new AtomicReference<>(factory.createClusterResolver());
  6. this.lastUpdateTime = System.currentTimeMillis();
  7. this.currentReloadIntervalMs = reloadIntervalMs;
  8. List<T> clusterEndpoints = delegateRef.get().getClusterEndpoints();
  9. if (clusterEndpoints.isEmpty()) {
  10. logger.error("Empty Eureka server endpoint list during initialization process");
  11. throw new ClusterResolverException("Resolved to an empty endpoint list");
  12. }
  13. if (logger.isInfoEnabled()) {
  14. logger.info("Initiated with delegate resolver of type {}; next reload in {}[sec]. Loaded endpoints={}",
  15. delegateRef.get().getClass(), currentReloadIntervalMs / 1000, clusterEndpoints);
  16. }
  17. try {
  18. Monitors.registerObject(this);
  19. } catch (Throwable e) {
  20. logger.warn("Cannot register metrics", e);
  21. }
  22. }

代码示例来源:origin: alibaba/fescar

  1. @Override
  2. public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
  3. LOGGER.error(FrameworkErrorCode.ExceptionCaught.errCode,
  4. NetUtil.toStringAddress(ctx.channel().remoteAddress()) + "connect exception. " + cause.getMessage(),
  5. cause);
  6. releaseChannel(ctx.channel(), getAddressFromChannel(ctx.channel()));
  7. if (LOGGER.isInfoEnabled()) {
  8. LOGGER.info("remove exception rm channel:" + ctx.channel());
  9. }
  10. super.exceptionCaught(ctx, cause);
  11. }

代码示例来源:origin: searchbox-io/Jest

  1. protected void removeNodeAndUpdateServers(final String hostToRemove) {
  2. log.warn("Removing host {}", hostToRemove);
  3. discoveredServerList.remove(hostToRemove);
  4. if (log.isInfoEnabled()) {
  5. log.info("Discovered server pool is now: {}", Joiner.on(',').join(discoveredServerList));
  6. }
  7. if (!discoveredServerList.isEmpty()) {
  8. client.setServers(discoveredServerList);
  9. } else {
  10. client.setServers(bootstrapServerList);
  11. }
  12. }

代码示例来源:origin: spring-projects/spring-framework

  1. public void info(Object message) {
  2. if (message instanceof String || this.logger.isInfoEnabled()) {
  3. this.logger.info(String.valueOf(message));
  4. }
  5. }

代码示例来源:origin: apache/hive

  1. static void setSearchArgument(Reader.Options options,
  2. List<OrcProto.Type> types,
  3. Configuration conf,
  4. boolean isOriginal) {
  5. String neededColumnNames = getNeededColumnNamesString(conf);
  6. if (neededColumnNames == null) {
  7. LOG.debug("No ORC pushdown predicate - no column names");
  8. options.searchArgument(null, null);
  9. return;
  10. }
  11. SearchArgument sarg = ConvertAstToSearchArg.createFromConf(conf);
  12. if (sarg == null) {
  13. LOG.debug("No ORC pushdown predicate");
  14. options.searchArgument(null, null);
  15. return;
  16. }
  17. if (LOG.isInfoEnabled()) {
  18. LOG.info("ORC pushdown predicate: " + sarg);
  19. }
  20. options.searchArgument(sarg, getSargColumnNames(
  21. neededColumnNames.split(","), types, options.getInclude(), isOriginal));
  22. }

代码示例来源:origin: apache/flink

  1. if (Optimizer.LOG.isWarnEnabled()) {
  2. Optimizer.LOG.warn("Could not instantiate InputFormat to obtain statistics."
  3. + " Limited statistics will be available.", t);
  4. if (Optimizer.LOG.isWarnEnabled()) {
  5. Optimizer.LOG.warn("Error obtaining statistics from input format: " + t.getMessage(), t);
  6. final long len = bs.getTotalInputSize();
  7. if (len == BaseStatistics.SIZE_UNKNOWN) {
  8. if (Optimizer.LOG.isInfoEnabled()) {
  9. Optimizer.LOG.info("Compiler could not determine the size of input '" + inFormatDescription + "'. Using default estimates.");

代码示例来源:origin: org.apache.hadoop/hadoop-common

  1. @Override
  2. public boolean isEnabled(int level) {
  3. switch (level) {
  4. case com.jcraft.jsch.Logger.DEBUG:
  5. return LOG.isDebugEnabled();
  6. case com.jcraft.jsch.Logger.INFO:
  7. return LOG.isInfoEnabled();
  8. case com.jcraft.jsch.Logger.WARN:
  9. return LOG.isWarnEnabled();
  10. case com.jcraft.jsch.Logger.ERROR:
  11. case com.jcraft.jsch.Logger.FATAL:
  12. return LOG.isErrorEnabled();
  13. default:
  14. return false;
  15. }
  16. }

代码示例来源:origin: apache/hive

  1. private static boolean checkInputFormatForLlapEncode(Configuration conf, String ifName) {
  2. String formatList = HiveConf.getVar(conf, ConfVars.LLAP_IO_ENCODE_FORMATS);
  3. if (LOG.isDebugEnabled()) {
  4. LOG.debug("Checking " + ifName + " against " + formatList);
  5. }
  6. String[] formats = StringUtils.getStrings(formatList);
  7. if (formats != null) {
  8. for (String format : formats) {
  9. // TODO: should we check isAssignableFrom?
  10. if (ifName.equals(format)) {
  11. if (LOG.isInfoEnabled()) {
  12. LOG.info("Using SerDe-based LLAP reader for " + ifName);
  13. }
  14. return true;
  15. }
  16. }
  17. }
  18. return false;
  19. }

相关文章