org.apache.spark.util.Utils.bytesToString()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(12.4k)|赞(0)|评价(0)|浏览(148)

本文整理了Java中org.apache.spark.util.Utils.bytesToString()方法的一些代码示例,展示了Utils.bytesToString()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utils.bytesToString()方法的具体详情如下:
包路径:org.apache.spark.util.Utils
类名称:Utils
方法名:bytesToString

Utils.bytesToString介绍

[英]Convert a quantity in bytes to a human-readable string such as "4.0 MB".
[中]

代码示例

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Release N bytes of execution memory for a MemoryConsumer.
 */
public void releaseExecutionMemory(long size, MemoryConsumer consumer) {
 logger.debug("Task {} release {} from {}", taskAttemptId, Utils.bytesToString(size), consumer);
 memoryManager.releaseExecutionMemory(size, taskAttemptId, consumer.getMode());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Release N bytes of execution memory for a MemoryConsumer.
 */
public void releaseExecutionMemory(long size, MemoryConsumer consumer) {
 logger.debug("Task {} release {} from {}", taskAttemptId, Utils.bytesToString(size), consumer);
 memoryManager.releaseExecutionMemory(size, taskAttemptId, consumer.getMode());
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Release N bytes of execution memory for a MemoryConsumer.
 */
public void releaseExecutionMemory(long size, MemoryConsumer consumer) {
 logger.debug("Task {} release {} from {}", taskAttemptId, Utils.bytesToString(size), consumer);
 memoryManager.releaseExecutionMemory(size, taskAttemptId, consumer.getMode());
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Dump the memory usage of all consumers.
 */
public void showMemoryUsage() {
 logger.info("Memory used in task " + taskAttemptId);
 synchronized (this) {
  long memoryAccountedForByConsumers = 0;
  for (MemoryConsumer c: consumers) {
   long totalMemUsage = c.getUsed();
   memoryAccountedForByConsumers += totalMemUsage;
   if (totalMemUsage > 0) {
    logger.info("Acquired by " + c + ": " + Utils.bytesToString(totalMemUsage));
   }
  }
  long memoryNotAccountedFor =
   memoryManager.getExecutionMemoryUsageForTask(taskAttemptId) - memoryAccountedForByConsumers;
  logger.info(
   "{} bytes of memory were used by task {} but are not associated with specific consumers",
   memoryNotAccountedFor, taskAttemptId);
  logger.info(
   "{} bytes of memory are used for execution and {} bytes of memory are used for storage",
   memoryManager.executionMemoryUsed(), memoryManager.storageMemoryUsed());
 }
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Dump the memory usage of all consumers.
 */
public void showMemoryUsage() {
 logger.info("Memory used in task " + taskAttemptId);
 synchronized (this) {
  long memoryAccountedForByConsumers = 0;
  for (MemoryConsumer c: consumers) {
   long totalMemUsage = c.getUsed();
   memoryAccountedForByConsumers += totalMemUsage;
   if (totalMemUsage > 0) {
    logger.info("Acquired by " + c + ": " + Utils.bytesToString(totalMemUsage));
   }
  }
  long memoryNotAccountedFor =
   memoryManager.getExecutionMemoryUsageForTask(taskAttemptId) - memoryAccountedForByConsumers;
  logger.info(
   "{} bytes of memory were used by task {} but are not associated with specific consumers",
   memoryNotAccountedFor, taskAttemptId);
  logger.info(
   "{} bytes of memory are used for execution and {} bytes of memory are used for storage",
   memoryManager.executionMemoryUsed(), memoryManager.storageMemoryUsed());
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Dump the memory usage of all consumers.
 */
public void showMemoryUsage() {
 logger.info("Memory used in task " + taskAttemptId);
 synchronized (this) {
  long memoryAccountedForByConsumers = 0;
  for (MemoryConsumer c: consumers) {
   long totalMemUsage = c.getUsed();
   memoryAccountedForByConsumers += totalMemUsage;
   if (totalMemUsage > 0) {
    logger.info("Acquired by " + c + ": " + Utils.bytesToString(totalMemUsage));
   }
  }
  long memoryNotAccountedFor =
   memoryManager.getExecutionMemoryUsageForTask(taskAttemptId) - memoryAccountedForByConsumers;
  logger.info(
   "{} bytes of memory were used by task {} but are not associated with specific consumers",
   memoryNotAccountedFor, taskAttemptId);
  logger.info(
   "{} bytes of memory are used for execution and {} bytes of memory are used for storage",
   memoryManager.executionMemoryUsed(), memoryManager.storageMemoryUsed());
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Sort and spill the current records in response to memory pressure.
 */
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
 if (trigger != this || inMemSorter == null || inMemSorter.numRecords() == 0) {
  return 0L;
 }
 logger.info("Thread {} spilling sort data of {} to disk ({} {} so far)",
  Thread.currentThread().getId(),
  Utils.bytesToString(getMemoryUsage()),
  spills.size(),
  spills.size() > 1 ? " times" : " time");
 writeSortedFile(false);
 final long spillSize = freeMemory();
 inMemSorter.reset();
 // Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
 // records. Otherwise, if the task is over allocated memory, then without freeing the memory
 // pages, we might not be able to get memory for the pointer array.
 taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
 return spillSize;
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return
 * value can be used to detect memory leaks.
 */
public long cleanUpAllAllocatedMemory() {
 synchronized (this) {
  for (MemoryConsumer c: consumers) {
   if (c != null && c.getUsed() > 0) {
    // In case of failed task, it's normal to see leaked memory
    logger.debug("unreleased " + Utils.bytesToString(c.getUsed()) + " memory from " + c);
   }
  }
  consumers.clear();
  for (MemoryBlock page : pageTable) {
   if (page != null) {
    logger.debug("unreleased page: " + page + " in task " + taskAttemptId);
    page.pageNumber = MemoryBlock.FREED_IN_TMM_PAGE_NUMBER;
    memoryManager.tungstenMemoryAllocator().free(page);
   }
  }
  Arrays.fill(pageTable, null);
 }
 // release the memory that is not used by any consumer (acquired for pages in tungsten mode).
 memoryManager.releaseExecutionMemory(acquiredButNotUsed, taskAttemptId, tungstenMemoryMode);
 return memoryManager.releaseAllExecutionMemoryForTask(taskAttemptId);
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Sort and spill the current records in response to memory pressure.
 */
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
 if (trigger != this || inMemSorter == null || inMemSorter.numRecords() == 0) {
  return 0L;
 }
 logger.info("Thread {} spilling sort data of {} to disk ({} {} so far)",
  Thread.currentThread().getId(),
  Utils.bytesToString(getMemoryUsage()),
  spills.size(),
  spills.size() > 1 ? " times" : " time");
 writeSortedFile(false);
 final long spillSize = freeMemory();
 inMemSorter.reset();
 // Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
 // records. Otherwise, if the task is over allocated memory, then without freeing the memory
 // pages, we might not be able to get memory for the pointer array.
 taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
 return spillSize;
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Sort and spill the current records in response to memory pressure.
 */
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
 if (trigger != this || inMemSorter == null || inMemSorter.numRecords() == 0) {
  return 0L;
 }
 logger.info("Thread {} spilling sort data of {} to disk ({} {} so far)",
  Thread.currentThread().getId(),
  Utils.bytesToString(getMemoryUsage()),
  spills.size(),
  spills.size() > 1 ? " times" : " time");
 writeSortedFile(false);
 final long spillSize = freeMemory();
 inMemSorter.reset();
 // Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the
 // records. Otherwise, if the task is over allocated memory, then without freeing the memory
 // pages, we might not be able to get memory for the pointer array.
 taskContext.taskMetrics().incMemoryBytesSpilled(spillSize);
 return spillSize;
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return
 * value can be used to detect memory leaks.
 */
public long cleanUpAllAllocatedMemory() {
 synchronized (this) {
  for (MemoryConsumer c: consumers) {
   if (c != null && c.getUsed() > 0) {
    // In case of failed task, it's normal to see leaked memory
    logger.debug("unreleased " + Utils.bytesToString(c.getUsed()) + " memory from " + c);
   }
  }
  consumers.clear();
  for (MemoryBlock page : pageTable) {
   if (page != null) {
    logger.debug("unreleased page: " + page + " in task " + taskAttemptId);
    memoryManager.tungstenMemoryAllocator().free(page);
   }
  }
  Arrays.fill(pageTable, null);
 }
 // release the memory that is not used by any consumer (acquired for pages in tungsten mode).
 memoryManager.releaseExecutionMemory(acquiredButNotUsed, taskAttemptId, tungstenMemoryMode);
 return memoryManager.releaseAllExecutionMemoryForTask(taskAttemptId);
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return
 * value can be used to detect memory leaks.
 */
public long cleanUpAllAllocatedMemory() {
 synchronized (this) {
  for (MemoryConsumer c: consumers) {
   if (c != null && c.getUsed() > 0) {
    // In case of failed task, it's normal to see leaked memory
    logger.debug("unreleased " + Utils.bytesToString(c.getUsed()) + " memory from " + c);
   }
  }
  consumers.clear();
  for (MemoryBlock page : pageTable) {
   if (page != null) {
    logger.debug("unreleased page: " + page + " in task " + taskAttemptId);
    page.pageNumber = MemoryBlock.FREED_IN_TMM_PAGE_NUMBER;
    memoryManager.tungstenMemoryAllocator().free(page);
   }
  }
  Arrays.fill(pageTable, null);
 }
 // release the memory that is not used by any consumer (acquired for pages in tungsten mode).
 memoryManager.releaseExecutionMemory(acquiredButNotUsed, taskAttemptId, tungstenMemoryMode);
 return memoryManager.releaseAllExecutionMemoryForTask(taskAttemptId);
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

if (released > 0) {
    logger.debug("Task {} released {} from {} for {}", taskAttemptId,
     Utils.bytesToString(released), c, consumer);
    got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
    if (got >= required) {
  if (released > 0) {
   logger.debug("Task {} released {} from itself ({})", taskAttemptId,
    Utils.bytesToString(released), consumer);
   got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
logger.debug("Task {} acquired {} for {}", taskAttemptId, Utils.bytesToString(got), consumer);
return got;

代码示例来源:origin: org.apache.spark/spark-core_2.11

if (released > 0) {
    logger.debug("Task {} released {} from {} for {}", taskAttemptId,
     Utils.bytesToString(released), c, consumer);
    got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
    if (got >= required) {
  if (released > 0) {
   logger.debug("Task {} released {} from itself ({})", taskAttemptId,
    Utils.bytesToString(released), consumer);
   got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
logger.debug("Task {} acquired {} for {}", taskAttemptId, Utils.bytesToString(got), consumer);
return got;

代码示例来源:origin: org.apache.spark/spark-core

if (released > 0) {
    logger.debug("Task {} released {} from {} for {}", taskAttemptId,
     Utils.bytesToString(released), c, consumer);
    got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
    if (got >= required) {
  if (released > 0) {
   logger.debug("Task {} released {} from itself ({})", taskAttemptId,
    Utils.bytesToString(released), consumer);
   got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
logger.debug("Task {} acquired {} for {}", taskAttemptId, Utils.bytesToString(got), consumer);
return got;

代码示例来源:origin: org.apache.spark/spark-core_2.11

Utils.bytesToString(getMemoryUsage()),
spillWriters.size(),
spillWriters.size() > 1 ? " times" : " time");

代码示例来源:origin: org.apache.spark/spark-core

Utils.bytesToString(getMemoryUsage()),
spillWriters.size(),
spillWriters.size() > 1 ? " times" : " time");

代码示例来源:origin: org.apache.spark/spark-core_2.10

Utils.bytesToString(getMemoryUsage()),
spillWriters.size(),
spillWriters.size() > 1 ? " times" : " time");

代码示例来源:origin: shunfei/indexr

/**
 * Release N bytes of execution memory for a MemoryConsumer.
 */
public void releaseExecutionMemory(long size, MemoryMode mode, MemoryConsumer consumer) {
  logger.debug("Task {} release {} from {}", taskAttemptId, Utils.bytesToString(size), consumer);
  memoryManager.releaseMemory(size, taskAttemptId);
}

代码示例来源:origin: shunfei/indexr

/**
 * Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return
 * value can be used to detect memory leaks.
 */
public long cleanUpAllAllocatedMemory() {
  synchronized (this) {
    Arrays.fill(pageTable, null);
    for (MemoryConsumer c : consumers) {
      if (c != null && c.getUsed() > 0) {
        // In case of failed task, it's normal to see leaked memory
        logger.warn("leak " + Utils.bytesToString(c.getUsed()) + " memory from " + c);
      }
    }
    consumers.clear();
  }
  for (MemoryBlock page : pageTable) {
    if (page != null) {
      memoryManager.tungstenMemoryAllocator().free(page);
    }
  }
  Arrays.fill(pageTable, null);
  return memoryManager.releaseAllMemoryForTask(taskAttemptId);
}

相关文章