org.slf4j.Logger.debug()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(10.0k)|赞(0)|评价(0)|浏览(470)

本文整理了Java中org.slf4j.Logger.debug()方法的一些代码示例,展示了Logger.debug()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Logger.debug()方法的具体详情如下:
包路径:org.slf4j.Logger
类名称:Logger
方法名:debug

Logger.debug介绍

[英]Log a message at the DEBUG level.
[中]在调试级别记录消息。

代码示例

代码示例来源:origin: spring-projects/spring-framework

  1. public void debug(Object message) {
  2. if (message instanceof String || this.logger.isDebugEnabled()) {
  3. this.logger.debug(String.valueOf(message));
  4. }
  5. }

代码示例来源:origin: skylot/jadx

  1. public static void setClipboardString(String text) {
  2. try {
  3. Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard();
  4. Transferable transferable = new StringSelection(text);
  5. clipboard.setContents(transferable, null);
  6. LOG.debug("String '{}' copied to clipboard", text);
  7. } catch (Exception e) {
  8. LOG.error("Failed copy string '{}' to clipboard", text, e);
  9. }
  10. }
  11. }

代码示例来源:origin: skylot/jadx

  1. public void printMissingClasses() {
  2. int count = missingClasses.size();
  3. if (count == 0) {
  4. return;
  5. }
  6. LOG.warn("Found {} references to unknown classes", count);
  7. if (LOG.isDebugEnabled()) {
  8. List<String> clsNames = new ArrayList<>(missingClasses);
  9. Collections.sort(clsNames);
  10. for (String cls : clsNames) {
  11. LOG.debug(" {}", cls);
  12. }
  13. }
  14. }
  15. }

代码示例来源:origin: apache/hbase

  1. public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
  2. try (Table table = connection.getTable(bulkLoadTableName)) {
  3. List<Delete> lstDels = new ArrayList<>();
  4. for (byte[] row : rows) {
  5. Delete del = new Delete(row);
  6. lstDels.add(del);
  7. LOG.debug("orig deleting the row: " + Bytes.toString(row));
  8. }
  9. table.delete(lstDels);
  10. LOG.debug("deleted " + rows.size() + " original bulkload rows");
  11. }
  12. }

代码示例来源:origin: perwendel/spark

  1. private void add(HttpMethod method, String url, String acceptedType, Object target) {
  2. RouteEntry entry = new RouteEntry();
  3. entry.httpMethod = method;
  4. entry.path = url;
  5. entry.target = target;
  6. entry.acceptedType = acceptedType;
  7. LOG.debug("Adds route: " + entry);
  8. // Adds to end of list
  9. routes.add(entry);
  10. }

代码示例来源:origin: apache/hbase

  1. private void updateFileLists(List<String> activeFiles, List<String> archiveFiles)
  2. throws IOException {
  3. List<String> newlyArchived = new ArrayList<>();
  4. for (String spath : activeFiles) {
  5. if (!fs.exists(new Path(spath))) {
  6. newlyArchived.add(spath);
  7. }
  8. }
  9. if (newlyArchived.size() > 0) {
  10. activeFiles.removeAll(newlyArchived);
  11. archiveFiles.addAll(newlyArchived);
  12. }
  13. LOG.debug(newlyArchived.size() + " files have been archived.");
  14. }

代码示例来源:origin: apache/storm

  1. public List<HBMessage> sendAll(HBMessage m) throws PacemakerConnectionException, InterruptedException {
  2. List<HBMessage> responses = new ArrayList<HBMessage>();
  3. LOG.debug("Using servers: {}", servers);
  4. for (String s : servers) {
  5. try {
  6. HBMessage response = getClientForServer(s).send(m);
  7. responses.add(response);
  8. } catch (PacemakerConnectionException e) {
  9. LOG.warn("Failed to connect to the pacemaker server {}, attempting to reconnect", s);
  10. getClientForServer(s).reconnect();
  11. }
  12. }
  13. if (responses.size() == 0) {
  14. throw new PacemakerConnectionException("Failed to connect to any Pacemaker.");
  15. }
  16. return responses;
  17. }

代码示例来源:origin: apache/flink

  1. @Override
  2. public void log (int level, String category, String message, Throwable ex) {
  3. final String logString = "[KRYO " + category + "] " + message;
  4. switch (level) {
  5. case Log.LEVEL_ERROR:
  6. log.error(logString, ex);
  7. break;
  8. case Log.LEVEL_WARN:
  9. log.warn(logString, ex);
  10. break;
  11. case Log.LEVEL_INFO:
  12. log.info(logString, ex);
  13. break;
  14. case Log.LEVEL_DEBUG:
  15. log.debug(logString, ex);
  16. break;
  17. case Log.LEVEL_TRACE:
  18. log.trace(logString, ex);
  19. break;
  20. }
  21. }
  22. }

代码示例来源:origin: Activiti/Activiti

  1. protected void logException() {
  2. if (exception instanceof JobNotFoundException || exception instanceof ActivitiTaskAlreadyClaimedException) {
  3. // reduce log level, because this may have been caused because of job deletion due to cancelActiviti="true"
  4. log.info("Error while closing command context",
  5. exception);
  6. } else if (exception instanceof ActivitiOptimisticLockingException) {
  7. // reduce log level, as normally we're not interested in logging this exception
  8. log.debug("Optimistic locking exception : " + exception);
  9. } else {
  10. log.error("Error while closing command context",
  11. exception);
  12. }
  13. }

代码示例来源:origin: Netflix/eureka

  1. private List<AwsEndpoint> getClusterEndpointsFromConfig() {
  2. String[] availZones = clientConfig.getAvailabilityZones(clientConfig.getRegion());
  3. String myZone = InstanceInfo.getZone(availZones, myInstanceInfo);
  4. Map<String, List<String>> serviceUrls = EndpointUtils
  5. .getServiceUrlsMapFromConfig(clientConfig, myZone, clientConfig.shouldPreferSameZoneEureka());
  6. List<AwsEndpoint> endpoints = new ArrayList<>();
  7. for (String zone : serviceUrls.keySet()) {
  8. for (String url : serviceUrls.get(zone)) {
  9. try {
  10. endpoints.add(new AwsEndpoint(url, getRegion(), zone));
  11. } catch (Exception ignore) {
  12. logger.warn("Invalid eureka server URI: {}; removing from the server pool", url);
  13. }
  14. }
  15. }
  16. logger.debug("Config resolved to {}", endpoints);
  17. if (endpoints.isEmpty()) {
  18. logger.error("Cannot resolve to any endpoints from provided configuration: {}", serviceUrls);
  19. }
  20. return endpoints;
  21. }

代码示例来源:origin: alibaba/fescar

  1. @Override
  2. public void onCheckMessage(long msgId, ChannelHandlerContext ctx, ServerMessageSender sender) {
  3. try {
  4. sender.sendResponse(msgId, ctx.channel(), HeartbeatMessage.PONG);
  5. } catch (Throwable throwable) {
  6. LOGGER.error("", "send response error", throwable);
  7. }
  8. if (LOGGER.isDebugEnabled()) {
  9. LOGGER.debug("received PING from " + ctx.channel().remoteAddress());
  10. }
  11. }

代码示例来源:origin: Graylog2/graylog2-server

  1. @Override
  2. public ResourceModel processResourceModel(ResourceModel resourceModel, Configuration configuration) {
  3. LOG.debug("Map for resource model <" + resourceModel + ">:");
  4. final List<Resource> resources = new ArrayList<>();
  5. for (Resource resource : resourceModel.getResources()) {
  6. resources.add(resource);
  7. resources.addAll(findChildResources(resource));
  8. }
  9. logResources(resources);
  10. return resourceModel;
  11. }

代码示例来源:origin: apache/flink

  1. @Override
  2. public void initializeState(StateInitializationContext context) throws Exception {
  3. super.initializeState(context);
  4. checkState(checkpointedState == null, "The reader state has already been initialized.");
  5. checkpointedState = context.getOperatorStateStore().getSerializableListState("splits");
  6. int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask();
  7. if (context.isRestored()) {
  8. LOG.info("Restoring state for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx);
  9. // this may not be null in case we migrate from a previous Flink version.
  10. if (restoredReaderState == null) {
  11. restoredReaderState = new ArrayList<>();
  12. for (TimestampedFileInputSplit split : checkpointedState.get()) {
  13. restoredReaderState.add(split);
  14. }
  15. if (LOG.isDebugEnabled()) {
  16. LOG.debug("{} (taskIdx={}) restored {}.", getClass().getSimpleName(), subtaskIdx, restoredReaderState);
  17. }
  18. }
  19. } else {
  20. LOG.info("No state to restore for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx);
  21. }
  22. }

代码示例来源:origin: apache/hive

  1. static void logException(String msg, Exception e) {
  2. if (LOG.isDebugEnabled()) {
  3. LOG.debug(msg, e);
  4. } else {
  5. LOG.info(msg + ": " + e.getMessage());
  6. }
  7. }

代码示例来源:origin: skylot/jadx

  1. private static Release checkForNewRelease() throws IOException {
  2. String version = JadxDecompiler.getVersion();
  3. if (version.contains("dev")) {
  4. LOG.debug("Ignore check for update: development version");
  5. return null;
  6. }
  7. List<Release> list = get(GITHUB_RELEASES_URL, RELEASES_LIST_TYPE);
  8. if (list == null) {
  9. return null;
  10. }
  11. list.removeIf(release -> release.getName().equalsIgnoreCase(version) || release.isPreRelease());
  12. if (list.isEmpty()) {
  13. return null;
  14. }
  15. list.sort(RELEASE_COMPARATOR);
  16. Release latest = list.get(list.size() - 1);
  17. if (VersionComparator.checkAndCompare(version, latest.getName()) >= 0) {
  18. return null;
  19. }
  20. LOG.info("Found new jadx version: {}", latest);
  21. return latest;
  22. }

代码示例来源:origin: apache/zookeeper

  1. @Override
  2. public synchronized void shutdown() {
  3. if (!canShutdown()) {
  4. LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!");
  5. return;
  6. }
  7. LOG.info("Shutting down");
  8. try {
  9. super.shutdown();
  10. } catch (Exception e) {
  11. LOG.warn("Ignoring unexpected exception during shutdown", e);
  12. }
  13. try {
  14. if (syncProcessor != null) {
  15. syncProcessor.shutdown();
  16. }
  17. } catch (Exception e) {
  18. LOG.warn("Ignoring unexpected exception in syncprocessor shutdown",
  19. e);
  20. }
  21. }
  22. }

代码示例来源:origin: gocd/gocd

  1. protected boolean runImpl() {
  2. try {
  3. Message message = consumer.receive();
  4. if (message == null) {
  5. LOG.debug("Message consumer was closed.");
  6. return true;
  7. }
  8. ObjectMessage omessage = (ObjectMessage) message;
  9. daemonThreadStatsCollector.captureStats(thread.getId());
  10. listener.onMessage((GoMessage) omessage.getObject());
  11. } catch (JMSException e) {
  12. LOG.warn("Error receiving message. Message receiving will continue despite this error.", e);
  13. } catch (Exception e) {
  14. LOG.error("Exception thrown in message handling by listener {}", listener, e);
  15. } finally {
  16. daemonThreadStatsCollector.clearStats(thread.getId());
  17. }
  18. return false;
  19. }

代码示例来源:origin: apache/kafka

  1. private boolean threadShouldExit(long now, long curHardShutdownTimeMs) {
  2. if (!hasActiveExternalCalls()) {
  3. log.trace("All work has been completed, and the I/O thread is now exiting.");
  4. return true;
  5. }
  6. if (now >= curHardShutdownTimeMs) {
  7. log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted.");
  8. return true;
  9. }
  10. log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now);
  11. return false;
  12. }

代码示例来源:origin: apache/kafka

  1. @Override
  2. public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
  3. if (exception != null) {
  4. if (exception instanceof RetriableException) {
  5. log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", offsets,
  6. exception);
  7. nextAutoCommitTimer.updateAndReset(retryBackoffMs);
  8. } else {
  9. log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, exception.getMessage());
  10. }
  11. } else {
  12. log.debug("Completed asynchronous auto-commit of offsets {}", offsets);
  13. }
  14. }
  15. });

代码示例来源:origin: apache/geode

  1. @Override
  2. protected void rebalanceCache() {
  3. try {
  4. getLogger().info("Rebalancing: " + this.cache);
  5. RebalanceResults results = RegionHelper.rebalanceCache(this.cache);
  6. if (getLogger().isDebugEnabled()) {
  7. getLogger().debug("Done rebalancing: " + this.cache);
  8. getLogger().debug(RegionHelper.getRebalanceResultsMessage(results));
  9. }
  10. } catch (Exception e) {
  11. getLogger().warn("Rebalance failed because of the following exception:", e);
  12. }
  13. }

相关文章