diff --git a/gems/aws-sdk-s3/CHANGELOG.md b/gems/aws-sdk-s3/CHANGELOG.md index acfff814db8..e7469d7c031 100644 --- a/gems/aws-sdk-s3/CHANGELOG.md +++ b/gems/aws-sdk-s3/CHANGELOG.md @@ -1,6 +1,8 @@ Unreleased Changes ------------------ +* Feature - TODO + 1.211.0 (2026-01-08) ------------------ diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/customizations.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/customizations.rb index 9a3914a2b39..de39a386ca4 100644 --- a/gems/aws-sdk-s3/lib/aws-sdk-s3/customizations.rb +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/customizations.rb @@ -7,15 +7,23 @@ module S3 autoload :Encryption, 'aws-sdk-s3/encryption' autoload :EncryptionV2, 'aws-sdk-s3/encryption_v2' autoload :EncryptionV3, 'aws-sdk-s3/encryption_v3' + autoload :LegacySigner, 'aws-sdk-s3/legacy_signer' + + # transfer manager + multipart upload/download utilities + autoload :DefaultExecutor, 'aws-sdk-s3/default_executor' autoload :FilePart, 'aws-sdk-s3/file_part' autoload :DefaultExecutor, 'aws-sdk-s3/default_executor' autoload :FileUploader, 'aws-sdk-s3/file_uploader' autoload :FileDownloader, 'aws-sdk-s3/file_downloader' - autoload :LegacySigner, 'aws-sdk-s3/legacy_signer' autoload :MultipartDownloadError, 'aws-sdk-s3/multipart_download_error' autoload :MultipartFileUploader, 'aws-sdk-s3/multipart_file_uploader' autoload :MultipartStreamUploader, 'aws-sdk-s3/multipart_stream_uploader' autoload :MultipartUploadError, 'aws-sdk-s3/multipart_upload_error' + autoload :DirectoryProgress, 'aws-sdk-s3/directory_progress' + autoload :DirectoryDownloadError, 'aws-sdk-s3/directory_download_error' + autoload :DirectoryDownloader, '.aws-sdk-s3/directory_downloader' + autoload :DirectoryUploadError, 'aws-sdk-s3/directory_upload_error' + autoload :DirectoryUploader, 'aws-sdk-s3/directory_uploader' autoload :ObjectCopier, 'aws-sdk-s3/object_copier' autoload :ObjectMultipartCopier, 'aws-sdk-s3/object_multipart_copier' autoload :PresignedPost, 'aws-sdk-s3/presigned_post' diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_download_error.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_download_error.rb new file mode 100644 index 00000000000..7ec3b35a35d --- /dev/null +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_download_error.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module Aws + module S3 + # Raised when DirectoryDownloader fails to download objects from S3 bucket + class DirectoryDownloadError < StandardError + def initialize(message, errors = []) + @errors = errors + super(message) + end + + # @return [Array] The list of errors encountered when downloading objects + attr_reader :errors + end + end +end diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_downloader.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_downloader.rb new file mode 100644 index 00000000000..4703cd72651 --- /dev/null +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_downloader.rb @@ -0,0 +1,162 @@ +# frozen_string_literal: true + +module Aws + module S3 + # @api private + class DirectoryDownloader + def initialize(options = {}) + @client = options[:client] + @executor = options[:executor] + @abort_requested = false + @mutex = Mutex.new + end + + attr_reader :abort_requested + + def download(destination, bucket:, **options) + if File.exist?(destination) + raise ArgumentError, 'invalid destination, expected a directory' unless File.directory?(destination) + else + FileUtils.mkdir_p(destination) + end + + download_opts = build_download_opts(destination, bucket, options.dup) + downloader = FileDownloader.new(client: @client, executor: @executor) + producer = ObjectProducer.new(download_opts.merge(client: @client, directory_downloader: self)) + downloads, errors = process_download_queue(producer, downloader, download_opts) + build_result(downloads, errors) + ensure + @abort_requested = false + end + + private + + def request_abort + @mutex.synchronize { @abort_requested = true } + end + def build_download_opts(destination, bucket, opts) + { + destination: destination, + bucket: bucket, + s3_prefix: opts.delete(:s3_prefix), + ignore_failure: opts.delete(:ignore_failure) || false, + filter_callback: opts.delete(:filter_callback), + progress_callback: opts.delete(:progress_callback) + } + end + + def build_result(download_count, errors) + if @abort_requested + msg = "directory download failed: #{errors.map(&:message).join('; ')}" + raise DirectoryDownloadError.new(msg, errors) + else + { + completed_downloads: [download_count - errors.count, 0].max, + failed_downloads: errors.count, + errors: errors.any? ? errors : nil + }.compact + end + end + + def handle_error(executor, opts) + return if opts[:ignore_failure] + + request_abort + executor.kill + end + + def process_download_queue(producer, downloader, opts) + # Separate executor for lightweight queuing tasks, avoiding interference with main @executor lifecycle + queue_executor = DefaultExecutor.new + progress = DirectoryProgress.new(opts[:progress_callback]) if opts[:progress_callback] + download_attempts = 0 + errors = [] + begin + producer.each do |object| + break if @abort_requested + + download_attempts += 1 + queue_executor.post(object) do |o| + dir_path = File.dirname(o[:path]) + FileUtils.mkdir_p(dir_path) unless dir_path == opts[:destination] || Dir.exist?(dir_path) + + downloader.download(o[:path], bucket: opts[:bucket], key: o[:key]) + progress&.call(File.size(o[:path])) + rescue StandardError => e + errors << e + handle_error(queue_executor, opts) + end + end + rescue StandardError => e + errors << e + handle_error(queue_executor, opts) + end + queue_executor.shutdown + [download_attempts, errors] + end + + # @api private + class ObjectProducer + include Enumerable + + DEFAULT_QUEUE_SIZE = 100 + + def initialize(options = {}) + @destination_dir = options[:destination] + @client = options[:client] + @bucket = options[:bucket] + @s3_prefix = options[:s3_prefix] + @filter_callback = options[:filter_callback] + @directory_downloader = options[:directory_downloader] + @object_queue = SizedQueue.new(DEFAULT_QUEUE_SIZE) + end + + def each + producer_thread = Thread.new do + stream_objects + ensure + @object_queue << :done + end + + # Yield objects from internal queue + while (object = @object_queue.shift) != :done + break if @directory_downloader.abort_requested + + yield object + end + ensure + producer_thread.join + end + + private + + def build_object_entry(key) + { path: File.join(@destination_dir, normalize_key(key)), key: key } + end + + def stream_objects(continuation_token: nil) + resp = @client.list_objects_v2(bucket: @bucket, prefix: @s3_prefix, continuation_token: continuation_token) + resp.contents.each do |o| + break if @directory_downloader.abort_requested + next if o.key.end_with?('/') && o.size.zero? + next unless include_object?(o.key) + + @object_queue << build_object_entry(o.key) + end + stream_objects(continuation_token: resp.next_continuation_token) if resp.next_continuation_token + end + + def include_object?(key) + return true unless @filter_callback + + @filter_callback.call(key) + end + + def normalize_key(key) + key = key.delete_prefix(@s3_prefix) if @s3_prefix + File::SEPARATOR == '/' ? key : key.tr('/', File::SEPARATOR) + end + end + end + end +end diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_progress.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_progress.rb new file mode 100644 index 00000000000..d2100782977 --- /dev/null +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_progress.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module Aws + module S3 + # @api private + class DirectoryProgress + def initialize(progress_callback) + @transferred_bytes = 0 + @transferred_files = 0 + @progress_callback = progress_callback + @mutex = Mutex.new + end + + def call(bytes_transferred) + @mutex.synchronize do + @transferred_bytes += bytes_transferred + @transferred_files += 1 + + @progress_callback.call(@transferred_bytes, @transferred_files) + end + end + end + end +end diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_upload_error.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_upload_error.rb new file mode 100644 index 00000000000..1818e103805 --- /dev/null +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_upload_error.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module Aws + module S3 + # Raised when DirectoryUploader fails to upload files to S3 bucket + class DirectoryUploadError < StandardError + def initialize(message, errors = []) + @errors = errors + super(message) + end + + # @return [Array] The list of errors encountered when uploading files + attr_reader :errors + end + end +end diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_uploader.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_uploader.rb new file mode 100644 index 00000000000..a1a12df53b6 --- /dev/null +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/directory_uploader.rb @@ -0,0 +1,259 @@ +# frozen_string_literal: true + +require 'set' + +module Aws + module S3 + # @api private + class DirectoryUploader + def initialize(opts = {}) + @client = opts[:client] + @executor = opts[:executor] + @queue_executor = DefaultExecutor.new + @abort_requested = false + @mutex = Mutex.new + end + + attr_reader :client, :executor + + def abort_requested + @mutex.synchronize { @abort_requested } + end + + def request_abort + @mutex.synchronize { @abort_requested = true } + end + + def upload(source_directory, bucket, **opts) + raise ArgumentError, 'Invalid directory' unless Dir.exist?(source_directory) + + uploader = FileUploader.new( + multipart_threshold: opts.delete(:multipart_threshold), + client: @client, + executor: @executor + ) + upload_opts, producer_opts = build_opts(source_directory, bucket, opts) + producer = FileProducer.new(producer_opts) + uploads, errors = process_upload_queue(producer, uploader, upload_opts) + build_result(uploads, errors) + ensure + @abort_requested = false + @queue_executor.shutdown + end + + private + + def build_opts(source_directory, bucket, opts) + uploader_opts = { + progress_callback: opts[:progress_callback], + ignore_failure: opts[:ignore_failure] || false + } + producer_opts = { + directory_uploader: self, + source_dir: source_directory, + bucket: bucket, + s3_prefix: opts[:s3_prefix], + recursive: opts[:recursive] || false, + follow_symlinks: opts[:follow_symlinks] || false, + filter_callback: opts[:filter_callback], + request_callback: opts[:request_callback], + } + [uploader_opts, producer_opts] + end + + def build_result(upload_count, errors) + if @abort_requested + msg = "directory upload failed: #{errors.map(&:message).join('; ')}" + raise DirectoryUploadError.new(msg, errors) + else + { + completed_uploads: [upload_count - errors.count, 0].max, + failed_uploads: errors.count, + errors: errors.any? ? errors : nil + }.compact + end + end + + def handle_error(opts) + return if opts[:ignore_failure] + + request_abort + @queue_executor.kill + end + + def process_upload_queue(producer, uploader, opts) + progress = DirectoryProgress.new(opts[:progress_callback]) if opts[:progress_callback] + completion_queue = Queue.new + upload_attempts = 0 + errors = [] + producer.each do |file| + break if abort_requested + + upload_attempts += 1 + @queue_executor.post(file) do |f| + uploader.upload(f.path, f.params) + progress&.call(File.size(f.path)) + rescue StandardError => e + @mutex.synchronize do + errors << StandardError.new("Upload failed for #{File.basename(f.path)}: #{e.message}") + end + handle_error(opts) + ensure + completion_queue << :done + end + end + upload_attempts.times do + break if abort_requested + + completion_queue.pop + end + [upload_attempts, errors] + end + + # @api private + class FileProducer + include Enumerable + + DEFAULT_QUEUE_SIZE = 100 + DONE_MARKER = :done + + def initialize(opts = {}) + @directory_uploader = opts[:directory_uploader] + @source_dir = opts[:source_dir] + @bucket = opts[:bucket] + @s3_prefix = opts[:s3_prefix] + @recursive = opts[:recursive] + @follow_symlinks = opts[:follow_symlinks] + @filter_callback = opts[:filter_callback] + @request_callback = opts[:request_callback] + @file_queue = SizedQueue.new(DEFAULT_QUEUE_SIZE) + end + + def each + producer_thread = Thread.new do + if @recursive + find_recursively + else + find_directly + end + rescue StandardError => e + @directory_uploader.request_abort + raise DirectoryUploadError.new("Directory traversal failed for '#{@source_dir}': #{e.message}") + ensure + @file_queue << DONE_MARKER + end + + while (file = @file_queue.shift) != DONE_MARKER + break if @directory_uploader.abort_requested + + yield file + end + ensure + producer_thread.value + end + + private + + def apply_request_callback(file_path, params) + callback_params = @request_callback.call(file_path, params.dup) + return params unless callback_params.is_a?(Hash) && callback_params.any? + + params.merge(callback_params) + end + + def build_upload_entry(file_path, key) + params = { bucket: @bucket, key: @s3_prefix ? File.join(@s3_prefix, key) : key } + params = apply_request_callback(file_path, params.dup) if @request_callback + UploadEntry.new(path: file_path, params: params) + end + + def find_directly + Dir.each_child(@source_dir) do |entry| + break if @directory_uploader.abort_requested + + entry_path = File.join(@source_dir, entry) + if @follow_symlinks + stat = File.stat(entry_path) + next if stat.directory? + else + stat = File.lstat(entry_path) + next if stat.symlink? || stat.directory? + end + next unless include_file?(entry_path, entry) + + @file_queue << build_upload_entry(entry_path, entry) + end + end + + def find_recursively + if @follow_symlinks + ancestors = Set.new + ancestors << File.stat(@source_dir).ino + scan_directory(@source_dir, ancestors: ancestors) + else + scan_directory(@source_dir) + end + end + + def include_file?(file_path, file_name) + return true unless @filter_callback + + @filter_callback.call(file_path, file_name) + end + + def scan_directory(dir_path, key_prefix: '', ancestors: nil) + return if @directory_uploader.abort_requested + + Dir.each_child(dir_path) do |entry| + break if @directory_uploader.abort_requested + + full_path = File.join(dir_path, entry) + next unless include_file?(full_path, entry) + + stat = get_file_stat(full_path) + next unless stat + + if stat.directory? + handle_directory(full_path, entry, key_prefix, ancestors) + else + key = key_prefix.empty? ? entry : File.join(key_prefix, entry) + @file_queue << build_upload_entry(full_path, key) + end + end + end + + def get_file_stat(full_path) + return File.stat(full_path) if @follow_symlinks + + lstat = File.lstat(full_path) + return if lstat.symlink? + + lstat + end + + def handle_directory(dir_path, dir_name, key_prefix, ancestors) + if @follow_symlinks && ancestors + stat = File.stat(dir_path) + ino = stat.ino + return if ancestors.include?(ino) # cycle detected - skip + + ancestors.add(ino) + end + new_prefix = key_prefix.empty? ? dir_name : File.join(key_prefix, dir_name) + scan_directory(dir_path, key_prefix: new_prefix, ancestors: ancestors) + ancestors.delete(ino) if @follow_symlinks && ancestors + end + + # @api private + class UploadEntry + def initialize(opts = {}) + @path = opts[:path] + @params = opts[:params] + end + + attr_reader :path, :params + end + end + end + end +end diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/transfer_manager.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/transfer_manager.rb index 6ee2d09deb3..d9a59bbdd0c 100644 --- a/gems/aws-sdk-s3/lib/aws-sdk-s3/transfer_manager.rb +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/transfer_manager.rb @@ -153,6 +153,73 @@ def download_file(destination, bucket:, key:, **options) true end + # Uploads a directory from disk to S3. + # + # @example Uploading a directory + # tm = TransferManager.new + # tm.upload_directory('/path/to/directory', bucket: 'bucket') + # # => {completed_uploads: 7, failed_uploads: 0} + # + # @example Using filter callback to upload only text files + # tm = TransferManager.new + # filter = proc do |file_path, file_name| + # File.extname(file_name) == '.txt' # Only upload .txt files + # end + # tm.upload_directory('/path/to/directory', bucket: 'bucket', filter_callback: filter) + # + # @param [String, Pathname, File, Tempfile] source + # The source directory to upload. + # + # @param [String] bucket + # The name of the bucket to upload objects to. + # + # @param [Hash] options + # + # @option options [String] :s3_prefix (nil) + # The S3 key prefix to use for each object. If not provided, files will be uploaded to the root of the bucket. + # + # @option options [Boolean] :recursive (false) + # Whether to upload directories recursively: + # * `false` (default) - only files in the top-level directory are uploaded, subdirectories are ignored. + # * `true` - all files and subdirectories are uploaded recursively. + # + # @option options [Proc] :follow_symlinks (false) + # Whether to follow symbolic links when traversing the file tree: + # * `false` (default) - symbolic links are ignored and not uploaded. + # * `true` - symbolic links are followed and their target files/directories are uploaded. + # + # @option options [Proc] :ignore_failure (false) + # How to handle individual file upload failures: + # * `false` (default) - Cancel all ongoing requests, terminate the directory upload, and raise an exception + # * `true` - Ignore the failure and continue the transfer for other objects + # + # @option options [Proc] :filter_callback (nil) + # A Proc to filter which files to upload. Called for each discovered file with the file path. + # Return `true` to upload the file, `false` to skip it. + # + # @option options [Proc] :request_callback (nil) + # A Proc to modify upload parameters for each file. Called with upload parameters hash. + # Must return the modified parameters. + # + # @option options [Proc] :progress_callback (nil) + # A Proc that will be called as files are uploaded. + # It will be invoked with `transferred_bytes` and `transferred_files`. + # + # @raise [DirectoryUploadError] TBD + # + # @return [Hash] Returns a hash with upload statistics: + # * `:completed_uploads` - Number of files successfully uploaded + # * `:failed_uploads` - Number of files that failed to upload + # * `:errors` - Array of error objects for failed uploads (only present when failures occur) + def upload_directory(source, bucket:, **options) + executor = @executor || DefaultExecutor.new + # TODO: need to consider whether we want to allow http chunk size + uploader = DirectoryUploader.new(client: @client, executor: executor) + result = uploader.upload(source, bucket, **options) + executor.shutdown unless @executor + result + end + # Uploads a file from disk to S3. # # # a small file are uploaded with PutObject API diff --git a/gems/aws-sdk-s3/spec/directory_uploader_spec.rb b/gems/aws-sdk-s3/spec/directory_uploader_spec.rb new file mode 100644 index 00000000000..d20b01a8890 --- /dev/null +++ b/gems/aws-sdk-s3/spec/directory_uploader_spec.rb @@ -0,0 +1,209 @@ +# frozen_string_literal: true + +require_relative 'spec_helper' +require 'tempfile' +require 'tmpdir' + +module Aws + module S3 + describe DirectoryUploader do + def create_test_directory_structure(base_dir) + # Root files + create_file_with_size(base_dir, 'small.txt', 1024) # 1KB + create_file_with_size(base_dir, 'medium.log', 1024 * 1024) # 1MB + create_file_with_size(base_dir, 'large.dat', 10 * 1024 * 1024) # 10MB + create_file_with_size(base_dir, 'huge.bin', 20 * 1024 * 1024) # 20MB + create_file_with_size(base_dir, 'target.txt', 1024 * 1024) # 1MB + + # Nested directories + subdir1 = File.join(base_dir, 'documents') + Dir.mkdir(subdir1) + create_file_with_size(subdir1, 'readme.md', 2048) # 2KB + create_file_with_size(subdir1, 'backup.zip', 10 * 1024 * 1024) # 10MB + + subdir2 = File.join(base_dir, 'images') + Dir.mkdir(subdir2) + create_file_with_size(subdir2, 'photo1.jpg', 2 * 1024 * 1024) # 2MB + + # Deep nesting + subdir3 = File.join(subdir2, 'thumbnails') + Dir.mkdir(subdir3) + create_file_with_size(subdir3, 'thumb.jpg', 50 * 1024) # 50KB + + # Symlinks for testing + File.symlink(File.join(base_dir, 'small.txt'), File.join(base_dir, 'small_link.txt')) + File.symlink(subdir1, File.join(base_dir, 'docs_link')) + + # Recursive symlink (points back to parent directory) + File.symlink(base_dir, File.join(subdir1, 'parent_link')) + File.symlink(File.join(base_dir, 'target.txt'), File.join(base_dir, 'link1.txt')) + File.symlink(File.join(base_dir, 'link1.txt'), File.join(base_dir, 'link2.txt')) + end + + def create_file_with_size(dir, filename, size_bytes) + file_path = File.join(dir, filename) + File.write(file_path, 'x' * size_bytes) + end + + let(:client) { Aws::S3::Client.new(stub_responses: true) } + let(:executor) { DefaultExecutor.new } + let(:uploader) { DirectoryUploader.new(client: client, executor: executor) } + + describe '#initialize' do + it 'constructs with default options' do + uploader = DirectoryUploader.new + expect(uploader.abort_requested).to be false + end + + it 'accepts client and executor options' do + expect(uploader.client).to be(client) + expect(uploader.executor).to be(executor) + end + end + + describe '#upload' do + let(:temp_dir) { Dir.mktmpdir } + + before do + create_test_directory_structure(temp_dir) + end + + after do + FileUtils.rm_rf(temp_dir) + end + + it 'handles empty directory' do + empty_dir = Dir.mktmpdir + result = uploader.upload(empty_dir, 'test-bucket') + + expect(result[:completed_uploads]).to eq(0) + expect(result[:failed_uploads]).to eq(0) + FileUtils.rm_rf(empty_dir) + end + + it 'raises when directory does not exist' do + expect do + uploader.upload('/nonexistent/path', 'test-bucket') + end.to raise_error(ArgumentError, /Invalid directory/) + end + + it 'can be aborted mid-upload' do + call_count = 0 + allow(client).to receive(:put_object) do + call_count += 1 + uploader.request_abort if call_count == 2 + end + + expect do + uploader.upload(temp_dir, 'test-bucket') + end.to raise_error(DirectoryUploadError) + end + + it 'raises when directory traversal fails' do + allow(Dir).to receive(:each_child).and_raise(Errno::EACCES, 'Permission denied') + expect do + uploader.upload(temp_dir, 'test-bucket', recursive: true, ignore_failure: true) + end.to raise_error(DirectoryUploadError, /Directory traversal failed/) + end + + context 'recursive' do + it 'uploads recursively when true' do + result = uploader.upload(temp_dir, 'test-bucket', recursive: true) + + expect(result[:completed_uploads]).to eq(9) + expect(result[:failed_uploads]).to eq(0) + end + + it 'uploads only direct files when false' do + result = uploader.upload(temp_dir, 'test-bucket') + + expect(result[:completed_uploads]).to eq(5) + end + end + + context 's3 prefix' do + it 'applies prefixes to all keys when set' do + uploaded_keys = [] + allow(client).to receive(:put_object) { |p| uploaded_keys << p[:key] } + result = uploader.upload(temp_dir, 'test-bucket', s3_prefix: 'uploads', recursive: true) + + expect(uploaded_keys).to all(start_with('uploads/')) + expect(uploaded_keys.length).to eq(9) + expect(result[:completed_uploads]).to eq(9) + end + end + + context 'follow_symlinks option' do + it 'follows symlinks when true' do + result = uploader.upload(temp_dir, 'test-bucket', recursive: true, follow_symlinks: true) + expect(result[:completed_uploads]).to eq(14) + expect(result[:failed_uploads]).to eq(0) + end + end + + context 'ignore_failure option' do + it 'stops uploading after failure by default' do + allow(client).to receive(:put_object).and_raise(Aws::S3::Errors::AccessDenied.new(nil, 'Access Denied')) + expect do + uploader.upload(temp_dir, 'test-bucket', ignore_failure: false) + end.to raise_error(DirectoryUploadError) + end + + it 'continues uploading after failure when true' do + uploaded_keys = [] + allow(client).to receive(:put_object) do |params| + if %w[small.txt medium.log].include?(params[:key]) + raise Aws::S3::Errors::AccessDenied.new(nil, 'Access Denied') + end + uploaded_keys << params[:key] + end + + result = uploader.upload(temp_dir, 'test-bucket', ignore_failure: true) + expect(result[:completed_uploads]).to eq(3) + expect(result[:failed_uploads]).to eq(2) + expect(result[:errors].length).to eq(2) + end + end + + context 'filter callbacks' do + it 'excludes files' do + uploaded_keys = [] + allow(client).to receive(:put_object) { |p| uploaded_keys << p[:key] } + filter_callback = lambda { |_p, name| !name.end_with?('.bin') } + result = uploader.upload(temp_dir, 'test-bucket', filter_callback: filter_callback) + + expect(uploaded_keys).not_to include('huge.bin') + expect(result[:completed_uploads]).to eq(4) + end + end + + context 'request callbacks' do + it 'modifies upload parameters' do + uploaded_params = [] + allow(client).to receive(:put_object) { |p| uploaded_params << p } + request_callback = lambda do |_p, params| + params[:storage_class] = 'GLACIER' + params + end + + uploader.upload(temp_dir, 'test-bucket', request_callback: request_callback) + expect(uploaded_params).to all(include(storage_class: 'GLACIER')) + end + end + + context 'progress callbacks' do + it 'reports progress' do + progress_calls = [] + + callback = proc do |bytes, files| + progress_calls << { total_bytes: bytes, files_completed: files } + end + + uploader.upload(temp_dir, 'test-bucket', recursive: false, progress_callback: callback) + expect(progress_calls.length).to eq(5) + end + end + end + end + end +end diff --git a/gems/aws-sdk-s3/spec/transfer_manager_spec.rb b/gems/aws-sdk-s3/spec/transfer_manager_spec.rb index 659efae471b..7b2adbe9097 100644 --- a/gems/aws-sdk-s3/spec/transfer_manager_spec.rb +++ b/gems/aws-sdk-s3/spec/transfer_manager_spec.rb @@ -51,6 +51,17 @@ module S3 end end + describe '#upload_directory' do + it 'returns upload results when upload succeeds' do + end + + it 'raises when upload errors' do + end + + it 'calls progress callback when given' do + end + end + describe '#upload_file' do let(:file) do Tempfile.new('ten-meg-file').tap do |f|