com.amazonaws.services.s3.transfer.Upload类的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(14.2k)|赞(0)|评价(0)|浏览(157)

本文整理了Java中com.amazonaws.services.s3.transfer.Upload类的一些代码示例,展示了Upload类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Upload类的具体详情如下:
包路径:com.amazonaws.services.s3.transfer.Upload
类名称:Upload

Upload介绍

[英]Represents an asynchronous upload to Amazon S3.

See TransferManager for more information about creating transfers.
[中]表示异步上传到Amazon S3。
有关创建传输的更多信息,请参阅TransferManager。

代码示例

代码示例来源:origin: prestodb/presto

STATS.uploadStarted();
PutObjectRequest request = new PutObjectRequest(host, key, tempFile);
if (sseEnabled) {
  switch (sseType) {
    case KMS:
      if (sseKmsKeyId != null) {
        request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsKeyId));
        request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams());
request.withCannedAcl(aclType);
Upload upload = transferManager.upload(request);
  upload.addProgressListener(createProgressListener(upload));
upload.waitForCompletion();
STATS.uploadSuccessful();
log.debug("Completed upload for host: %s, key: %s", host, key);

代码示例来源:origin: Alluxio/alluxio

PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta);
mManager.upload(putReq).waitForUploadResult();
if (!mFile.delete()) {
 LOG.error("Failed to delete temporary file @ {}", mFile.getPath());

代码示例来源:origin: awsdocs/aws-doc-sdk-examples

System.out.println("\nSubtransfer progress:\n");
for (Upload u : sub_xfers) {
  System.out.println("  " + u.getDescription());
  if (u.isDone()) {
    TransferState xfer_state = u.getState();
    System.out.println("  " + xfer_state);
  } else {
    TransferProgress progress = u.getProgress();
    double pct = progress.getPercentTransferred();
    printProgressBar(pct);

代码示例来源:origin: awsdocs/aws-doc-sdk-examples

TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
try {
  Upload u = xfer_mgr.upload(bucket_name, key_name, f);
  u.addProgressListener(new ProgressListener() {
    public void progressChanged(ProgressEvent e) {
      double pct = e.getBytesTransferred() * 100.0 / e.getBytes();
  TransferState xfer_state = u.getState();
  System.out.println(": " + xfer_state);
} catch (AmazonServiceException e) {
  System.exit(1);
xfer_mgr.shutdownNow();

代码示例来源:origin: apache/jackrabbit-oak

@Override
public void addMetadataRecord(File input, String name) throws DataStoreException {
  checkArgument(input != null, "input should not be null");
  checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty");
  ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
  try {
    Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
    Upload upload = tmx.upload(s3ReqDecorator
      .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input)));
    upload.waitForUploadResult();
  } catch (InterruptedException e) {
    LOG.error("Exception in uploading metadata file {}", new Object[] {input, e});
    throw new DataStoreException("Error in uploading metadata file", e);
  } finally {
    if (contextClassLoader != null) {
      Thread.currentThread().setContextClassLoader(contextClassLoader);
    }
  }
}

代码示例来源:origin: Aloisius/hadoop-s3a

transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);
TransferManager transfers = new TransferManager(client);
transfers.setConfiguration(transferConfiguration);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
upload.addProgressListener(listener);
upload.waitForUploadResult();
long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
if (statistics != null && delta != 0) {
 if (LOG.isDebugEnabled()) {

代码示例来源:origin: org.apache.jackrabbit/oak-blob-cloud

bucket, key);
copReq.setNewObjectMetadata(objectMetaData);
Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
try {
  copy.waitForCopyResult();
try {
  Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(
    bucket, key, file)));
  up.waitForUploadResult();
  LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
} catch (Exception e2 ) {

代码示例来源:origin: com.ibm.stocator/stocator

@Override
public void close() throws IOException {
 if (closed.getAndSet(true)) {
  return;
 }
 mBackupOutputStream.close();
 LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
 try {
  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(mBackupFile.length());
  om.setContentType(mContentType);
  om.setUserMetadata(mMetadata);
  PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
  putObjectRequest.setMetadata(om);
  Upload upload = transfers.upload(putObjectRequest);
  upload.waitForUploadResult();
 } catch (InterruptedException e) {
  throw (InterruptedIOException) new InterruptedIOException(e.toString())
    .initCause(e);
 } catch (AmazonClientException e) {
  throw new IOException(String.format("saving output %s %s", mKey, e));
 } finally {
  if (!mBackupFile.delete()) {
   LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
  }
  super.close();
 }
 LOG.debug("OutputStream for key '{}' upload complete", mKey);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-aws

om.setServerSideEncryption(serverSideEncryptionAlgorithm);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
upload.addProgressListener(listener);
upload.waitForUploadResult();
long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
if (statistics != null && delta != 0) {
 if (LOG.isDebugEnabled()) {

代码示例来源:origin: jenkinsci/pipeline-aws-plugin

path += localFile.getName();
PutObjectRequest request = new PutObjectRequest(this.bucket, path, localFile);
    metas.setSSEAlgorithm(this.sseAlgorithm);
  request.withMetadata(metas);
  request.withCannedAcl(this.acl);
final Upload upload = mgr.upload(request);
upload.addProgressListener((ProgressListener) progressEvent -> {
  if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
    RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
upload.waitForCompletion();
return null;
fileUpload = mgr.uploadDirectory(this.bucket, this.path, localFile, true, metadatasProvider);
for (final Upload upload : fileUpload.getSubTransfers()) {
  upload.addProgressListener((ProgressListener) progressEvent -> {
    if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
      RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());

代码示例来源:origin: uk.co.nichesolutions.presto/presto-hive

private void uploadObject()
    throws IOException
{
  try {
    log.debug("Starting upload for host: %s, key: %s, file: %s, size: %s", host, key, tempFile, tempFile.length());
    STATS.uploadStarted();
    PutObjectRequest request = new PutObjectRequest(host, key, tempFile);
    if (sseEnabled) {
      ObjectMetadata metadata = new ObjectMetadata();
      metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
      request.setMetadata(metadata);
    }
    Upload upload = transferManager.upload(request);
    if (log.isDebugEnabled()) {
      upload.addProgressListener(createProgressListener(upload));
    }
    upload.waitForCompletion();
    STATS.uploadSuccessful();
    log.debug("Completed upload for host: %s, key: %s", host, key);
  }
  catch (AmazonClientException e) {
    STATS.uploadFailed();
    throw new IOException(e);
  }
  catch (InterruptedException e) {
    STATS.uploadFailed();
    Thread.currentThread().interrupt();
    throw new InterruptedIOException();
  }
}

代码示例来源:origin: classmethod/gradle-aws-plugin

getLogger().info("Uploading... s3://{}/{}", bucketName, key);
Upload upload = s3mgr.upload(new PutObjectRequest(getBucketName(), getKey(), getFile())
  .withMetadata(getObjectMetadata()));
upload.addProgressListener(new ProgressListener() {
upload.waitForCompletion();
setResourceUrl(s3.getUrl(bucketName, key).toString());
getLogger().info("Upload completed: {}", getResourceUrl());

代码示例来源:origin: electronicarts/gatling-aws-maven-plugin

final String path = file.getAbsolutePath();
final long uploadStartTimeMs = System.currentTimeMillis();
final PutObjectRequest putRequest = new PutObjectRequest(s3bucket, targetDirectory + "/" + file.getName(), file)
    .withCannedAcl(CannedAccessControlList.PublicRead);
final Upload upload = this.transferManager.upload(putRequest);
int statusChecks = 0;
while (!upload.isDone()) {
  if (this.uploadTimedOut(uploadStartTimeMs)) {
    System.err.format("Timed out uploading file to S3 (%s). Will skip file. Report might be incomplete.%n", path);
  upload.waitForCompletion();
} catch (final Exception e) {
  System.out.format("Failed to upload to S3 %s/%s/%s%n", s3bucket, targetDirectory, file.getName());

代码示例来源:origin: org.kuali.common/kuali-s3

/**
 * Use this method to reliably upload large files and wait until they are fully uploaded before continuing. Behind the scenes this is
 * accomplished by splitting the file up into manageable chunks and using separate threads to upload each chunk. Consider using
 * multi-part uploads on files larger than <code>MULTI_PART_UPLOAD_THRESHOLD</code>. When this method returns, all threads have finished
 * and the file has been reassembled on S3. The benefit to this method is that if any one thread fails, only the portion of the file
 * that particular thread was handling will have to be re-uploaded (instead of the entire file). A reasonable number of automatic
 * retries occurs if an individual upload thread fails. If the file upload fails this method throws <code>AmazonS3Exception</code>
 */
public void blockingMultiPartUpload(PutObjectRequest request, TransferManager manager) {
  // Use multi-part upload for large files
  Upload upload = manager.upload(request);
  try {
    // Block and wait for the upload to finish
    upload.waitForCompletion();
  } catch (Exception e) {
    throw new AmazonS3Exception("Unexpected error uploading file", e);
  }
}

代码示例来源:origin: apache/cloudstack

PutObjectRequest putObjectRequest = new PutObjectRequest(s3TO.getBucketName(), s3Key, inputStream, objectMetadata);
  putObjectRequest.withStorageClass(StorageClass.ReducedRedundancy);
upload.addProgressListener(new ProgressListener() {
  @Override
  public void progressChanged(ProgressEvent progressEvent) {
  upload.waitForCompletion();
} catch (InterruptedException e) {

代码示例来源:origin: amazon-archives/aws-ant-tasks

TransferManager transferManager;
if (awsSecretKey != null && awsAccessKeyId != null) {
  transferManager = new TransferManager(getOrCreateClient(AmazonS3Client.class));
} else {
  transferManager = new TransferManager();
        System.out.println("Uploading file " + file.getName()
            + "...");
        Upload upload = transferManager.upload(bucketName, key, file);
        if (printStatusUpdates) {
          while (!upload.isDone()) {
            System.out.print(upload.getProgress()
                .getBytesTransferred()
                + "/"
                + upload.getProgress()
                    .getTotalBytesToTransfer()
                + " bytes transferred...\r");
            Thread.sleep(statusUpdatePeriodInMs);
          System.out.print(upload.getProgress()
                .getBytesTransferred()
                + "/"
                + upload.getProgress()
                    .getTotalBytesToTransfer()
                + " bytes transferred...\n");
        } else {
          upload.waitForCompletion();

代码示例来源:origin: apache/streams

private void addFile() throws Exception {
 InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
 int contentLength = outputStream.size();
 TransferManager transferManager = new TransferManager(amazonS3Client);
 ObjectMetadata metadata = new ObjectMetadata();
 metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
 metadata.setContentLength(contentLength);
 metadata.addUserMetadata("writer", "org.apache.streams");
 for (String s : metaData.keySet()) {
  metadata.addUserMetadata(s, metaData.get(s));
 }
 String fileNameToWrite = path + fileName;
 Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
 try {
  upload.waitForUploadResult();
  is.close();
  transferManager.shutdownNow(false);
  LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
 } catch (Exception ignored) {
  LOGGER.trace("Ignoring", ignored);
 }
}

代码示例来源:origin: ingenieux/beanstalker

@Override
public PutObjectResult putObject(PutObjectRequest req) throws AmazonClientException, AmazonServiceException {
 if (!multipartUpload) {
  return super.putObject(req);
 }
 final long contentLen = TransferManagerUtils.getContentLength(req);
 String tempFilename = req.getKey() + ".tmp";
 String origFilename = req.getKey();
 req.setKey(tempFilename);
 XProgressListener progressListener = new XProgressListener();
 req.setGeneralProgressListener(new ProgressListenerChain(progressListener));
 progressListener.setContentLen(contentLen);
 progressListener.setUpload(transferManager.upload(req));
 progressListener.setSilentUpload(silentUpload);
 try {
  progressListener.getUpload().waitForCompletion();
 } catch (InterruptedException e) {
  throw new AmazonClientException(e.getMessage(), e);
 }
 CopyObjectRequest copyReq = new CopyObjectRequest(req.getBucketName(), tempFilename, req.getBucketName(), origFilename);
 copyObject(copyReq);
 deleteObject(new DeleteObjectRequest(req.getBucketName(), tempFilename));
 return null;
}

代码示例来源:origin: com.conveyal/r5

@Override
  public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) {
    try {
      ObjectMetadata metadata = new ObjectMetadata();
      // Set content encoding to gzip. This way browsers will decompress on download using native deflate code.
      // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/
      metadata.setContentEncoding("gzip");
      metadata.setContentType(persistenceBuffer.getMimeType());
      // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer.
      metadata.setContentLength(persistenceBuffer.getSize());
//            amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata);
      final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata);
      upload.addProgressListener(new UploadProgressLogger(upload));
      // Block until upload completes to avoid accumulating unlimited uploads in memory.
      upload.waitForCompletion();
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
  }

代码示例来源:origin: io.digdag/digdag-standards

void stageFiles()
{
  if (files.isEmpty()) {
    return;
  }
  TransferManager transferManager = new TransferManager(s3);
  List<PutObjectRequest> requests = new ArrayList<>();
  for (StagingFile f : files) {
    logger.info("Staging {} -> {}", f.file().reference().filename(), f.file().s3Uri());
    requests.add(stagingFilePutRequest(f));
  }
  try {
    List<Upload> uploads = requests.stream()
        .map(transferManager::upload)
        .collect(toList());
    for (Upload upload : uploads) {
      try {
        upload.waitForCompletion();
      }
      catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new TaskExecutionException(e);
      }
    }
  }
  finally {
    transferManager.shutdownNow(false);
    requests.forEach(r -> closeQuietly(r.getInputStream()));
  }
}

相关文章