org.apache.hadoop.io.IOUtils.cleanupWithLogger()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(6.4k)|赞(0)|评价(0)|浏览(188)

本文整理了Java中org.apache.hadoop.io.IOUtils.cleanupWithLogger()方法的一些代码示例,展示了IOUtils.cleanupWithLogger()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。IOUtils.cleanupWithLogger()方法的具体详情如下:
包路径:org.apache.hadoop.io.IOUtils
类名称:IOUtils
方法名:cleanupWithLogger

IOUtils.cleanupWithLogger介绍

[英]Close the Closeable objects and ignore any Throwable or null pointers. Must only be used for cleanup in exception handlers.
[中]关闭可关闭对象并忽略任何可丢弃或空指针。只能用于异常处理程序中的清理。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
synchronized public void close() {
 if (stream != null) {
  IOUtils.cleanupWithLogger(LOG, stream);
  stream = null;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Closes the stream ignoring {@link Throwable}.
 * Must only be called in cleaning up from exception handlers.
 *
 * @param stream the Stream to close
 */
public static void closeStream(java.io.Closeable stream) {
 if (stream != null) {
  cleanupWithLogger(null, stream);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Closes the streams ignoring {@link Throwable}.
 * Must only be called in cleaning up from exception handlers.
 *
 * @param streams the Streams to close
 */
public static void closeStreams(java.io.Closeable... streams) {
 if (streams != null) {
  cleanupWithLogger(null, streams);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

void stop() {
 stopping = true;
 sinkThread.interrupt();
 if (sink instanceof Closeable) {
  IOUtils.cleanupWithLogger(LOG, (Closeable)sink);
 }
 try {
  sinkThread.join();
 } catch (InterruptedException e) {
  LOG.warn("Stop interrupted", e);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Convenience method for reading a token storage file and loading its Tokens.
 * @param filename
 * @param conf
 * @throws IOException
 */
public static Credentials readTokenStorageFile(File filename,
                        Configuration conf)
  throws IOException {
 DataInputStream in = null;
 Credentials credentials = new Credentials();
 try {
  in = new DataInputStream(new BufferedInputStream(
    new FileInputStream(filename)));
  credentials.readTokenStorageStream(in);
  return credentials;
 } catch(IOException ioe) {
  throw new IOException("Exception reading " + filename, ioe);
 } finally {
  IOUtils.cleanupWithLogger(LOG, in);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

static void unTarUsingJava(File inFile, File untarDir,
  boolean gzipped) throws IOException {
 InputStream inputStream = null;
 TarArchiveInputStream tis = null;
 try {
  if (gzipped) {
   inputStream = new BufferedInputStream(new GZIPInputStream(
     new FileInputStream(inFile)));
  } else {
   inputStream = new BufferedInputStream(new FileInputStream(inFile));
  }
  tis = new TarArchiveInputStream(inputStream);
  for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
   unpackEntries(tis, entry, untarDir);
   entry = tis.getNextTarEntry();
  }
 } finally {
  IOUtils.cleanupWithLogger(LOG, tis, inputStream);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Convenience method for reading a token storage file and loading its Tokens.
 * @param filename
 * @param conf
 * @throws IOException
 */
public static Credentials readTokenStorageFile(Path filename,
                        Configuration conf)
throws IOException {
 FSDataInputStream in = null;
 Credentials credentials = new Credentials();
 try {
  in = filename.getFileSystem(conf).open(filename);
  credentials.readTokenStorageStream(in);
  in.close();
  return credentials;
 } catch(IOException ioe) {
  throw IOUtils.wrapException(filename.toString(), "Credentials"
    + ".readTokenStorageFile", ioe);
 } finally {
  IOUtils.cleanupWithLogger(LOG, in);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

IOUtils.cleanupWithLogger(LOG, input, fis);

代码示例来源:origin: org.apache.hadoop/hadoop-common

private static void unTarUsingJava(InputStream inputStream, File untarDir,
                  boolean gzipped) throws IOException {
 TarArchiveInputStream tis = null;
 try {
  if (gzipped) {
   inputStream = new BufferedInputStream(new GZIPInputStream(
     inputStream));
  } else {
   inputStream =
     new BufferedInputStream(inputStream);
  }
  tis = new TarArchiveInputStream(inputStream);
  for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
   unpackEntries(tis, entry, untarDir);
   entry = tis.getNextTarEntry();
  }
 } finally {
  IOUtils.cleanupWithLogger(LOG, tis, inputStream);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/** Common work of the constructors. */
private void initialize(Path filename, FSDataInputStream in,
            long start, long length, Configuration conf,
            boolean tempReader) throws IOException {
 if (in == null) {
  throw new IllegalArgumentException("in == null");
 }
 this.filename = filename == null ? "<unknown>" : filename.toString();
 this.in = in;
 this.conf = conf;
 boolean succeeded = false;
 try {
  seek(start);
  this.end = this.in.getPos() + length;
  // if it wrapped around, use the max
  if (end < length) {
   end = Long.MAX_VALUE;
  }
  init(tempReader);
  succeeded = true;
 } finally {
  if (!succeeded) {
   IOUtils.cleanupWithLogger(LOG, this.in);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public PartHandle putPart(Path filePath, InputStream inputStream,
  int partNumber, UploadHandle uploadId, long lengthInBytes)
  throws IOException {
 byte[] uploadIdByteArray = uploadId.toByteArray();
 checkUploadId(uploadIdByteArray);
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
   uploadIdByteArray.length, Charsets.UTF_8));
 Path partPath =
   mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR),
     new Path(Integer.toString(partNumber) + ".part")));
 try(FSDataOutputStream fsDataOutputStream =
     fs.createFile(partPath).build()) {
  IOUtils.copy(inputStream, fsDataOutputStream, 4096);
 } finally {
  org.apache.hadoop.io.IOUtils.cleanupWithLogger(LOG, inputStream);
 }
 return BBPartHandle.from(ByteBuffer.wrap(
   partPath.toString().getBytes(Charsets.UTF_8)));
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

"still in the poll(2) loop.");
IOUtils.cleanupWithLogger(LOG, sock);
fdSet.remove(fd);
return true;

代码示例来源:origin: org.apache.hadoop/hadoop-common

IOUtils.cleanupWithLogger(LOG, reader);

代码示例来源:origin: org.apache.hadoop/hadoop-common

throw ioe;
} finally {
 IOUtils.cleanupWithLogger(LOG, lin, in);
 IOUtils.cleanupWithLogger(LOG, aIn);

代码示例来源:origin: org.apache.hadoop/hadoop-common

IOUtils.cleanupWithLogger(LOG, traceScope);

代码示例来源:origin: org.apache.hadoop/hadoop-common

entry.getDomainSocket().refCount.unreference();
entry.getHandler().handle(entry.getDomainSocket());
IOUtils.cleanupWithLogger(LOG, entry.getDomainSocket());
iter.remove();

代码示例来源:origin: org.apache.hadoop/hadoop-common

if (closed) {
 handler.handle(sock);
 IOUtils.cleanupWithLogger(LOG, sock);
 return;

代码示例来源:origin: org.apache.hadoop/hadoop-common

IOUtils.cleanupWithLogger(LOG, reader, fsdis);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public void abort() throws IOException {
 if (fp == null) {
  return;
 }
 IOUtils.cleanupWithLogger(LOG, fp);
 fp = null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

IOUtils.cleanupWithLogger(LOG, blkAppender, writerBCF);
blkAppender = null;
writerBCF = null;

相关文章