com.amazonaws.services.s3.transfer.Upload类的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(14.2k)|赞(0)|评价(0)|浏览(176)

本文整理了Java中com.amazonaws.services.s3.transfer.Upload类的一些代码示例,展示了Upload类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Upload类的具体详情如下:
包路径:com.amazonaws.services.s3.transfer.Upload
类名称:Upload

Upload介绍

[英]Represents an asynchronous upload to Amazon S3.

See TransferManager for more information about creating transfers.
[中]表示异步上传到Amazon S3。
有关创建传输的更多信息,请参阅TransferManager。

代码示例

代码示例来源:origin: prestodb/presto

  1. STATS.uploadStarted();
  2. PutObjectRequest request = new PutObjectRequest(host, key, tempFile);
  3. if (sseEnabled) {
  4. switch (sseType) {
  5. case KMS:
  6. if (sseKmsKeyId != null) {
  7. request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsKeyId));
  8. request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams());
  9. request.withCannedAcl(aclType);
  10. Upload upload = transferManager.upload(request);
  11. upload.addProgressListener(createProgressListener(upload));
  12. upload.waitForCompletion();
  13. STATS.uploadSuccessful();
  14. log.debug("Completed upload for host: %s, key: %s", host, key);

代码示例来源:origin: Alluxio/alluxio

  1. PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta);
  2. mManager.upload(putReq).waitForUploadResult();
  3. if (!mFile.delete()) {
  4. LOG.error("Failed to delete temporary file @ {}", mFile.getPath());

代码示例来源:origin: awsdocs/aws-doc-sdk-examples

  1. System.out.println("\nSubtransfer progress:\n");
  2. for (Upload u : sub_xfers) {
  3. System.out.println(" " + u.getDescription());
  4. if (u.isDone()) {
  5. TransferState xfer_state = u.getState();
  6. System.out.println(" " + xfer_state);
  7. } else {
  8. TransferProgress progress = u.getProgress();
  9. double pct = progress.getPercentTransferred();
  10. printProgressBar(pct);

代码示例来源:origin: awsdocs/aws-doc-sdk-examples

  1. TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
  2. try {
  3. Upload u = xfer_mgr.upload(bucket_name, key_name, f);
  4. u.addProgressListener(new ProgressListener() {
  5. public void progressChanged(ProgressEvent e) {
  6. double pct = e.getBytesTransferred() * 100.0 / e.getBytes();
  7. TransferState xfer_state = u.getState();
  8. System.out.println(": " + xfer_state);
  9. } catch (AmazonServiceException e) {
  10. System.exit(1);
  11. xfer_mgr.shutdownNow();

代码示例来源:origin: apache/jackrabbit-oak

  1. @Override
  2. public void addMetadataRecord(File input, String name) throws DataStoreException {
  3. checkArgument(input != null, "input should not be null");
  4. checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty");
  5. ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
  6. try {
  7. Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
  8. Upload upload = tmx.upload(s3ReqDecorator
  9. .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input)));
  10. upload.waitForUploadResult();
  11. } catch (InterruptedException e) {
  12. LOG.error("Exception in uploading metadata file {}", new Object[] {input, e});
  13. throw new DataStoreException("Error in uploading metadata file", e);
  14. } finally {
  15. if (contextClassLoader != null) {
  16. Thread.currentThread().setContextClassLoader(contextClassLoader);
  17. }
  18. }
  19. }

代码示例来源:origin: Aloisius/hadoop-s3a

  1. transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);
  2. TransferManager transfers = new TransferManager(client);
  3. transfers.setConfiguration(transferConfiguration);
  4. PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
  5. putObjectRequest.setCannedAcl(cannedACL);
  6. putObjectRequest.setMetadata(om);
  7. Upload upload = transfers.upload(putObjectRequest);
  8. upload.addProgressListener(listener);
  9. upload.waitForUploadResult();
  10. long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
  11. if (statistics != null && delta != 0) {
  12. if (LOG.isDebugEnabled()) {

代码示例来源:origin: org.apache.jackrabbit/oak-blob-cloud

  1. bucket, key);
  2. copReq.setNewObjectMetadata(objectMetaData);
  3. Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
  4. try {
  5. copy.waitForCopyResult();
  6. try {
  7. Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(
  8. bucket, key, file)));
  9. up.waitForUploadResult();
  10. LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
  11. } catch (Exception e2 ) {

代码示例来源:origin: com.ibm.stocator/stocator

  1. @Override
  2. public void close() throws IOException {
  3. if (closed.getAndSet(true)) {
  4. return;
  5. }
  6. mBackupOutputStream.close();
  7. LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
  8. try {
  9. final ObjectMetadata om = new ObjectMetadata();
  10. om.setContentLength(mBackupFile.length());
  11. om.setContentType(mContentType);
  12. om.setUserMetadata(mMetadata);
  13. PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
  14. putObjectRequest.setMetadata(om);
  15. Upload upload = transfers.upload(putObjectRequest);
  16. upload.waitForUploadResult();
  17. } catch (InterruptedException e) {
  18. throw (InterruptedIOException) new InterruptedIOException(e.toString())
  19. .initCause(e);
  20. } catch (AmazonClientException e) {
  21. throw new IOException(String.format("saving output %s %s", mKey, e));
  22. } finally {
  23. if (!mBackupFile.delete()) {
  24. LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
  25. }
  26. super.close();
  27. }
  28. LOG.debug("OutputStream for key '{}' upload complete", mKey);
  29. }

代码示例来源:origin: ch.cern.hadoop/hadoop-aws

  1. om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  2. PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
  3. putObjectRequest.setCannedAcl(cannedACL);
  4. putObjectRequest.setMetadata(om);
  5. Upload upload = transfers.upload(putObjectRequest);
  6. upload.addProgressListener(listener);
  7. upload.waitForUploadResult();
  8. long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
  9. if (statistics != null && delta != 0) {
  10. if (LOG.isDebugEnabled()) {

代码示例来源:origin: jenkinsci/pipeline-aws-plugin

  1. path += localFile.getName();
  2. PutObjectRequest request = new PutObjectRequest(this.bucket, path, localFile);
  3. metas.setSSEAlgorithm(this.sseAlgorithm);
  4. request.withMetadata(metas);
  5. request.withCannedAcl(this.acl);
  6. final Upload upload = mgr.upload(request);
  7. upload.addProgressListener((ProgressListener) progressEvent -> {
  8. if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
  9. RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
  10. upload.waitForCompletion();
  11. return null;
  12. fileUpload = mgr.uploadDirectory(this.bucket, this.path, localFile, true, metadatasProvider);
  13. for (final Upload upload : fileUpload.getSubTransfers()) {
  14. upload.addProgressListener((ProgressListener) progressEvent -> {
  15. if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
  16. RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());

代码示例来源:origin: uk.co.nichesolutions.presto/presto-hive

  1. private void uploadObject()
  2. throws IOException
  3. {
  4. try {
  5. log.debug("Starting upload for host: %s, key: %s, file: %s, size: %s", host, key, tempFile, tempFile.length());
  6. STATS.uploadStarted();
  7. PutObjectRequest request = new PutObjectRequest(host, key, tempFile);
  8. if (sseEnabled) {
  9. ObjectMetadata metadata = new ObjectMetadata();
  10. metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  11. request.setMetadata(metadata);
  12. }
  13. Upload upload = transferManager.upload(request);
  14. if (log.isDebugEnabled()) {
  15. upload.addProgressListener(createProgressListener(upload));
  16. }
  17. upload.waitForCompletion();
  18. STATS.uploadSuccessful();
  19. log.debug("Completed upload for host: %s, key: %s", host, key);
  20. }
  21. catch (AmazonClientException e) {
  22. STATS.uploadFailed();
  23. throw new IOException(e);
  24. }
  25. catch (InterruptedException e) {
  26. STATS.uploadFailed();
  27. Thread.currentThread().interrupt();
  28. throw new InterruptedIOException();
  29. }
  30. }

代码示例来源:origin: classmethod/gradle-aws-plugin

  1. getLogger().info("Uploading... s3://{}/{}", bucketName, key);
  2. Upload upload = s3mgr.upload(new PutObjectRequest(getBucketName(), getKey(), getFile())
  3. .withMetadata(getObjectMetadata()));
  4. upload.addProgressListener(new ProgressListener() {
  5. upload.waitForCompletion();
  6. setResourceUrl(s3.getUrl(bucketName, key).toString());
  7. getLogger().info("Upload completed: {}", getResourceUrl());

代码示例来源:origin: electronicarts/gatling-aws-maven-plugin

  1. final String path = file.getAbsolutePath();
  2. final long uploadStartTimeMs = System.currentTimeMillis();
  3. final PutObjectRequest putRequest = new PutObjectRequest(s3bucket, targetDirectory + "/" + file.getName(), file)
  4. .withCannedAcl(CannedAccessControlList.PublicRead);
  5. final Upload upload = this.transferManager.upload(putRequest);
  6. int statusChecks = 0;
  7. while (!upload.isDone()) {
  8. if (this.uploadTimedOut(uploadStartTimeMs)) {
  9. System.err.format("Timed out uploading file to S3 (%s). Will skip file. Report might be incomplete.%n", path);
  10. upload.waitForCompletion();
  11. } catch (final Exception e) {
  12. System.out.format("Failed to upload to S3 %s/%s/%s%n", s3bucket, targetDirectory, file.getName());

代码示例来源:origin: org.kuali.common/kuali-s3

  1. /**
  2. * Use this method to reliably upload large files and wait until they are fully uploaded before continuing. Behind the scenes this is
  3. * accomplished by splitting the file up into manageable chunks and using separate threads to upload each chunk. Consider using
  4. * multi-part uploads on files larger than <code>MULTI_PART_UPLOAD_THRESHOLD</code>. When this method returns, all threads have finished
  5. * and the file has been reassembled on S3. The benefit to this method is that if any one thread fails, only the portion of the file
  6. * that particular thread was handling will have to be re-uploaded (instead of the entire file). A reasonable number of automatic
  7. * retries occurs if an individual upload thread fails. If the file upload fails this method throws <code>AmazonS3Exception</code>
  8. */
  9. public void blockingMultiPartUpload(PutObjectRequest request, TransferManager manager) {
  10. // Use multi-part upload for large files
  11. Upload upload = manager.upload(request);
  12. try {
  13. // Block and wait for the upload to finish
  14. upload.waitForCompletion();
  15. } catch (Exception e) {
  16. throw new AmazonS3Exception("Unexpected error uploading file", e);
  17. }
  18. }

代码示例来源:origin: apache/cloudstack

  1. PutObjectRequest putObjectRequest = new PutObjectRequest(s3TO.getBucketName(), s3Key, inputStream, objectMetadata);
  2. putObjectRequest.withStorageClass(StorageClass.ReducedRedundancy);
  3. upload.addProgressListener(new ProgressListener() {
  4. @Override
  5. public void progressChanged(ProgressEvent progressEvent) {
  6. upload.waitForCompletion();
  7. } catch (InterruptedException e) {

代码示例来源:origin: amazon-archives/aws-ant-tasks

  1. TransferManager transferManager;
  2. if (awsSecretKey != null && awsAccessKeyId != null) {
  3. transferManager = new TransferManager(getOrCreateClient(AmazonS3Client.class));
  4. } else {
  5. transferManager = new TransferManager();
  6. System.out.println("Uploading file " + file.getName()
  7. + "...");
  8. Upload upload = transferManager.upload(bucketName, key, file);
  9. if (printStatusUpdates) {
  10. while (!upload.isDone()) {
  11. System.out.print(upload.getProgress()
  12. .getBytesTransferred()
  13. + "/"
  14. + upload.getProgress()
  15. .getTotalBytesToTransfer()
  16. + " bytes transferred...\r");
  17. Thread.sleep(statusUpdatePeriodInMs);
  18. System.out.print(upload.getProgress()
  19. .getBytesTransferred()
  20. + "/"
  21. + upload.getProgress()
  22. .getTotalBytesToTransfer()
  23. + " bytes transferred...\n");
  24. } else {
  25. upload.waitForCompletion();

代码示例来源:origin: apache/streams

  1. private void addFile() throws Exception {
  2. InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
  3. int contentLength = outputStream.size();
  4. TransferManager transferManager = new TransferManager(amazonS3Client);
  5. ObjectMetadata metadata = new ObjectMetadata();
  6. metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
  7. metadata.setContentLength(contentLength);
  8. metadata.addUserMetadata("writer", "org.apache.streams");
  9. for (String s : metaData.keySet()) {
  10. metadata.addUserMetadata(s, metaData.get(s));
  11. }
  12. String fileNameToWrite = path + fileName;
  13. Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
  14. try {
  15. upload.waitForUploadResult();
  16. is.close();
  17. transferManager.shutdownNow(false);
  18. LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
  19. } catch (Exception ignored) {
  20. LOGGER.trace("Ignoring", ignored);
  21. }
  22. }

代码示例来源:origin: ingenieux/beanstalker

  1. @Override
  2. public PutObjectResult putObject(PutObjectRequest req) throws AmazonClientException, AmazonServiceException {
  3. if (!multipartUpload) {
  4. return super.putObject(req);
  5. }
  6. final long contentLen = TransferManagerUtils.getContentLength(req);
  7. String tempFilename = req.getKey() + ".tmp";
  8. String origFilename = req.getKey();
  9. req.setKey(tempFilename);
  10. XProgressListener progressListener = new XProgressListener();
  11. req.setGeneralProgressListener(new ProgressListenerChain(progressListener));
  12. progressListener.setContentLen(contentLen);
  13. progressListener.setUpload(transferManager.upload(req));
  14. progressListener.setSilentUpload(silentUpload);
  15. try {
  16. progressListener.getUpload().waitForCompletion();
  17. } catch (InterruptedException e) {
  18. throw new AmazonClientException(e.getMessage(), e);
  19. }
  20. CopyObjectRequest copyReq = new CopyObjectRequest(req.getBucketName(), tempFilename, req.getBucketName(), origFilename);
  21. copyObject(copyReq);
  22. deleteObject(new DeleteObjectRequest(req.getBucketName(), tempFilename));
  23. return null;
  24. }

代码示例来源:origin: com.conveyal/r5

  1. @Override
  2. public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) {
  3. try {
  4. ObjectMetadata metadata = new ObjectMetadata();
  5. // Set content encoding to gzip. This way browsers will decompress on download using native deflate code.
  6. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/
  7. metadata.setContentEncoding("gzip");
  8. metadata.setContentType(persistenceBuffer.getMimeType());
  9. // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer.
  10. metadata.setContentLength(persistenceBuffer.getSize());
  11. // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata);
  12. final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata);
  13. upload.addProgressListener(new UploadProgressLogger(upload));
  14. // Block until upload completes to avoid accumulating unlimited uploads in memory.
  15. upload.waitForCompletion();
  16. } catch (Exception e) {
  17. throw new RuntimeException(e);
  18. }
  19. }

代码示例来源:origin: io.digdag/digdag-standards

  1. void stageFiles()
  2. {
  3. if (files.isEmpty()) {
  4. return;
  5. }
  6. TransferManager transferManager = new TransferManager(s3);
  7. List<PutObjectRequest> requests = new ArrayList<>();
  8. for (StagingFile f : files) {
  9. logger.info("Staging {} -> {}", f.file().reference().filename(), f.file().s3Uri());
  10. requests.add(stagingFilePutRequest(f));
  11. }
  12. try {
  13. List<Upload> uploads = requests.stream()
  14. .map(transferManager::upload)
  15. .collect(toList());
  16. for (Upload upload : uploads) {
  17. try {
  18. upload.waitForCompletion();
  19. }
  20. catch (InterruptedException e) {
  21. Thread.currentThread().interrupt();
  22. throw new TaskExecutionException(e);
  23. }
  24. }
  25. }
  26. finally {
  27. transferManager.shutdownNow(false);
  28. requests.forEach(r -> closeQuietly(r.getInputStream()));
  29. }
  30. }

相关文章