diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java
new file mode 100644
index 00000000000..eb91935016a
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
+import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import javax.annotation.PostConstruct;
+import javax.ws.rs.core.Response;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.audit.S3GAction;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
+import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.util.Time;
+import org.apache.http.HttpStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handler for bucket ACL operations (?acl query parameter).
+ * Implements PUT operations for bucket Access Control Lists.
+ *
+ * This handler extends EndpointBase to inherit all required functionality
+ * (configuration, headers, request context, audit logging, metrics, etc.).
+ */
+public class BucketAclHandler extends EndpointBase implements BucketOperationHandler {
+
+ private static final Logger LOG = LoggerFactory.getLogger(BucketAclHandler.class);
+
+ /**
+ * Determine if this handler should handle the current request.
+ * @return true if the request has the "acl" query parameter
+ */
+ private boolean shouldHandle() {
+ return queryParams().get(QueryParams.ACL) != null;
+ }
+
+ /**
+ * Implement acl put.
+ *
+ * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html
+ */
+ @Override
+ public Response handlePutRequest(String bucketName, InputStream body)
+ throws IOException, OS3Exception {
+
+ if (!shouldHandle()) {
+ return null; // Not responsible for this request
+ }
+
+ long startNanos = Time.monotonicNowNanos();
+ S3GAction s3GAction = S3GAction.PUT_ACL;
+
+ String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ);
+ String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE);
+ String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP);
+ String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP);
+ String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL);
+
+ try {
+ OzoneBucket bucket = getBucket(bucketName);
+ S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
+ OzoneVolume volume = getVolume();
+
+ List ozoneAclListOnBucket = new ArrayList<>();
+ List ozoneAclListOnVolume = new ArrayList<>();
+
+ if (grantReads == null && grantWrites == null && grantReadACP == null
+ && grantWriteACP == null && grantFull == null) {
+ // Handle grants in body
+ S3BucketAcl putBucketAclRequest =
+ new PutBucketAclRequestUnmarshaller().readFrom(body);
+ ozoneAclListOnBucket.addAll(
+ S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest));
+ ozoneAclListOnVolume.addAll(
+ S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest));
+ } else {
+ // Handle grants in headers
+ if (grantReads != null) {
+ ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads,
+ S3Acl.ACLType.READ.getValue()));
+ ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads,
+ S3Acl.ACLType.READ.getValue()));
+ }
+ if (grantWrites != null) {
+ ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites,
+ S3Acl.ACLType.WRITE.getValue()));
+ ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites,
+ S3Acl.ACLType.WRITE.getValue()));
+ }
+ if (grantReadACP != null) {
+ ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP,
+ S3Acl.ACLType.READ_ACP.getValue()));
+ ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP,
+ S3Acl.ACLType.READ_ACP.getValue()));
+ }
+ if (grantWriteACP != null) {
+ ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP,
+ S3Acl.ACLType.WRITE_ACP.getValue()));
+ ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP,
+ S3Acl.ACLType.WRITE_ACP.getValue()));
+ }
+ if (grantFull != null) {
+ ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull,
+ S3Acl.ACLType.FULL_CONTROL.getValue()));
+ ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull,
+ S3Acl.ACLType.FULL_CONTROL.getValue()));
+ }
+ }
+
+ // A put request will reset all previous ACLs on bucket
+ bucket.setAcl(ozoneAclListOnBucket);
+
+ // A put request will reset input user/group's permission on volume
+ List acls = bucket.getAcls();
+ List aclsToRemoveOnVolume = new ArrayList<>();
+ List currentAclsOnVolume = volume.getAcls();
+
+ // Remove input user/group's permission from Volume first
+ if (!currentAclsOnVolume.isEmpty()) {
+ for (OzoneAcl acl : acls) {
+ if (acl.getAclScope() == ACCESS) {
+ aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList(
+ acl.getName(), acl.getType(), currentAclsOnVolume));
+ }
+ }
+ for (OzoneAcl acl : aclsToRemoveOnVolume) {
+ volume.removeAcl(acl);
+ }
+ }
+
+ // Add new permission on Volume
+ for (OzoneAcl acl : ozoneAclListOnVolume) {
+ volume.addAcl(acl);
+ }
+
+ getMetrics().updatePutAclSuccessStats(startNanos);
+ auditWriteSuccess(s3GAction);
+ return Response.status(HttpStatus.SC_OK).build();
+
+ } catch (OMException exception) {
+ getMetrics().updatePutAclFailureStats(startNanos);
+ auditWriteFailure(s3GAction, exception);
+ if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
+ throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception);
+ } else if (isAccessDenied(exception)) {
+ throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception);
+ }
+ throw exception;
+ } catch (OS3Exception ex) {
+ getMetrics().updatePutAclFailureStats(startNanos);
+ auditWriteFailure(s3GAction, ex);
+ throw ex;
+ }
+ }
+
+ /**
+ * Convert ACL string to Ozone ACL on bucket.
+ *
+ * Example: x-amz-grant-write: id="111122223333", id="555566667777"
+ */
+ private List getAndConvertAclOnBucket(
+ String value, String permission) throws OS3Exception {
+ return parseAndConvertAcl(value, permission, true);
+ }
+
+ /**
+ * Convert ACL string to Ozone ACL on volume.
+ */
+ private List getAndConvertAclOnVolume(
+ String value, String permission) throws OS3Exception {
+ return parseAndConvertAcl(value, permission, false);
+ }
+
+ /**
+ * Parse ACL string and convert to Ozone ACLs.
+ *
+ * This is a common method extracted from getAndConvertAclOnBucket and
+ * getAndConvertAclOnVolume to reduce code duplication.
+ *
+ * @param value the ACL header value (e.g., "id=\"user1\",id=\"user2\"")
+ * @param permission the S3 permission type (READ, WRITE, etc.)
+ * @param isBucket true for bucket ACL, false for volume ACL
+ * @return list of OzoneAcl objects
+ * @throws OS3Exception if parsing fails or grantee type is not supported
+ */
+ private List parseAndConvertAcl(
+ String value, String permission, boolean isBucket) throws OS3Exception {
+ List ozoneAclList = new ArrayList<>();
+ if (StringUtils.isEmpty(value)) {
+ return ozoneAclList;
+ }
+
+ String[] subValues = value.split(",");
+ for (String acl : subValues) {
+ String[] part = acl.split("=");
+ if (part.length != 2) {
+ throw newError(S3ErrorTable.INVALID_ARGUMENT, acl);
+ }
+
+ S3Acl.ACLIdentityType type =
+ S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]);
+ if (type == null || !type.isSupported()) {
+ LOG.warn("S3 grantee {} is null or not supported", part[0]);
+ throw newError(NOT_IMPLEMENTED, part[0]);
+ }
+
+ String userId = part[1];
+
+ if (isBucket) {
+ // Build ACL on Bucket
+ EnumSet aclsOnBucket =
+ S3Acl.getOzoneAclOnBucketFromS3Permission(permission);
+ ozoneAclList.add(OzoneAcl.of(USER, userId, DEFAULT, aclsOnBucket));
+ ozoneAclList.add(OzoneAcl.of(USER, userId, ACCESS, aclsOnBucket));
+ } else {
+ // Build ACL on Volume
+ EnumSet aclsOnVolume =
+ S3Acl.getOzoneAclOnVolumeFromS3Permission(permission);
+ ozoneAclList.add(OzoneAcl.of(USER, userId, ACCESS, aclsOnVolume));
+ }
+ }
+
+ return ozoneAclList;
+ }
+
+ @Override
+ @PostConstruct
+ public void init() {
+ // No initialization needed for BucketAclHandler
+ }
+}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 6b888ce12fc..93b525186df 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.ozone.s3.endpoint;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneConsts.ETAG;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
@@ -25,7 +24,6 @@
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT;
import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapInQuotes;
@@ -33,7 +31,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
-import java.util.EnumSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -60,11 +57,9 @@
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneMultipartUploadList;
-import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.ErrorInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject;
import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject;
@@ -76,7 +71,6 @@
import org.apache.hadoop.ozone.s3.util.ContinueToken;
import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams;
import org.apache.hadoop.ozone.s3.util.S3StorageType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
@@ -96,6 +90,8 @@ public class BucketEndpoint extends EndpointBase {
private boolean listKeysShallowEnabled;
private int maxKeysLimit = 1000;
+ private List handlers;
+
/**
* Rest endpoint to list objects in a specific bucket.
*
@@ -309,17 +305,28 @@ public Response put(
@PathParam(BUCKET) String bucketName,
InputStream body
) throws IOException, OS3Exception {
+
+ // Chain of responsibility: let each handler try to handle the request
+ for (BucketOperationHandler handler : handlers) {
+ Response response = handler.handlePutRequest(bucketName, body);
+ if (response != null) {
+ return response; // Handler handled the request
+ }
+ }
+
+ // No handler handled the request, execute default operation: create bucket
+ return handleCreateBucket(bucketName);
+ }
+
+ /**
+ * Default PUT bucket operation (create bucket).
+ */
+ private Response handleCreateBucket(String bucketName)
+ throws IOException, OS3Exception {
long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.CREATE_BUCKET;
try {
- final String aclMarker = queryParams().get(QueryParams.ACL);
- if (aclMarker != null) {
- s3GAction = S3GAction.PUT_ACL;
- Response response = putAcl(bucketName, body);
- auditWriteSuccess(s3GAction);
- return response;
- }
String location = createS3Bucket(bucketName);
auditWriteSuccess(s3GAction);
getMetrics().updateCreateBucketSuccessStats(startNanos);
@@ -572,174 +579,6 @@ public S3BucketAcl getAcl(String bucketName)
}
}
- /**
- * Implement acl put.
- *
- * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html
- */
- public Response putAcl(String bucketName,
- InputStream body) throws IOException, OS3Exception {
- long startNanos = Time.monotonicNowNanos();
- String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ);
- String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE);
- String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_CAP);
- String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_CAP);
- String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL);
-
- try {
- OzoneBucket bucket = getBucket(bucketName);
- S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
- OzoneVolume volume = getVolume();
-
- List ozoneAclListOnBucket = new ArrayList<>();
- List ozoneAclListOnVolume = new ArrayList<>();
-
- if (grantReads == null && grantWrites == null && grantReadACP == null
- && grantWriteACP == null && grantFull == null) {
- S3BucketAcl putBucketAclRequest =
- new PutBucketAclRequestUnmarshaller().readFrom(body);
- // Handle grants in body
- ozoneAclListOnBucket.addAll(
- S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest));
- ozoneAclListOnVolume.addAll(
- S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest));
- } else {
-
- // Handle grants in headers
- if (grantReads != null) {
- ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads,
- S3Acl.ACLType.READ.getValue()));
- ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads,
- S3Acl.ACLType.READ.getValue()));
- }
- if (grantWrites != null) {
- ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites,
- S3Acl.ACLType.WRITE.getValue()));
- ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites,
- S3Acl.ACLType.WRITE.getValue()));
- }
- if (grantReadACP != null) {
- ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP,
- S3Acl.ACLType.READ_ACP.getValue()));
- ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP,
- S3Acl.ACLType.READ_ACP.getValue()));
- }
- if (grantWriteACP != null) {
- ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP,
- S3Acl.ACLType.WRITE_ACP.getValue()));
- ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP,
- S3Acl.ACLType.WRITE_ACP.getValue()));
- }
- if (grantFull != null) {
- ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull,
- S3Acl.ACLType.FULL_CONTROL.getValue()));
- ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull,
- S3Acl.ACLType.FULL_CONTROL.getValue()));
- }
- }
- // A put request will reset all previous ACLs on bucket
- bucket.setAcl(ozoneAclListOnBucket);
- // A put request will reset input user/group's permission on volume
- List acls = bucket.getAcls();
- List aclsToRemoveOnVolume = new ArrayList<>();
- List currentAclsOnVolume = volume.getAcls();
- // Remove input user/group's permission from Volume first
- if (!currentAclsOnVolume.isEmpty()) {
- for (OzoneAcl acl : acls) {
- if (acl.getAclScope() == ACCESS) {
- aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList(
- acl.getName(), acl.getType(), currentAclsOnVolume));
- }
- }
- for (OzoneAcl acl : aclsToRemoveOnVolume) {
- volume.removeAcl(acl);
- }
- }
- // Add new permission on Volume
- for (OzoneAcl acl : ozoneAclListOnVolume) {
- volume.addAcl(acl);
- }
- } catch (OMException exception) {
- getMetrics().updatePutAclFailureStats(startNanos);
- auditWriteFailure(S3GAction.PUT_ACL, exception);
- if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
- throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception);
- } else if (isAccessDenied(exception)) {
- throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception);
- }
- throw exception;
- } catch (OS3Exception ex) {
- getMetrics().updatePutAclFailureStats(startNanos);
- throw ex;
- }
- getMetrics().updatePutAclSuccessStats(startNanos);
- return Response.status(HttpStatus.SC_OK).build();
- }
-
- /**
- * Example: x-amz-grant-write: \
- * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", \
- * id="555566667777".
- */
- private List getAndConvertAclOnBucket(String value,
- String permission)
- throws OS3Exception {
- List ozoneAclList = new ArrayList<>();
- if (StringUtils.isEmpty(value)) {
- return ozoneAclList;
- }
- String[] subValues = value.split(",");
- for (String acl : subValues) {
- String[] part = acl.split("=");
- if (part.length != 2) {
- throw newError(S3ErrorTable.INVALID_ARGUMENT, acl);
- }
- S3Acl.ACLIdentityType type =
- S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]);
- if (type == null || !type.isSupported()) {
- LOG.warn("S3 grantee {} is null or not supported", part[0]);
- throw newError(NOT_IMPLEMENTED, part[0]);
- }
- // Build ACL on Bucket
- EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission);
- OzoneAcl defaultOzoneAcl = OzoneAcl.of(
- IAccessAuthorizer.ACLIdentityType.USER, part[1], OzoneAcl.AclScope.DEFAULT, aclsOnBucket
- );
- OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnBucket);
- ozoneAclList.add(defaultOzoneAcl);
- ozoneAclList.add(accessOzoneAcl);
- }
- return ozoneAclList;
- }
-
- private List getAndConvertAclOnVolume(String value,
- String permission)
- throws OS3Exception {
- List ozoneAclList = new ArrayList<>();
- if (StringUtils.isEmpty(value)) {
- return ozoneAclList;
- }
- String[] subValues = value.split(",");
- for (String acl : subValues) {
- String[] part = acl.split("=");
- if (part.length != 2) {
- throw newError(S3ErrorTable.INVALID_ARGUMENT, acl);
- }
- S3Acl.ACLIdentityType type =
- S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]);
- if (type == null || !type.isSupported()) {
- LOG.warn("S3 grantee {} is null or not supported", part[0]);
- throw newError(NOT_IMPLEMENTED, part[0]);
- }
- // Build ACL on Volume
- EnumSet aclsOnVolume =
- S3Acl.getOzoneAclOnVolumeFromS3Permission(permission);
- OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnVolume);
- ozoneAclList.add(accessOzoneAcl);
- }
- return ozoneAclList;
- }
-
private void addKey(ListObjectResponse response, OzoneKey next) {
KeyMetadata keyMetadata = new KeyMetadata();
keyMetadata.setKey(EncodingTypeObject.createNullable(next.getName(),
@@ -766,5 +605,13 @@ public void init() {
maxKeysLimit = getOzoneConfiguration().getInt(
OZONE_S3G_LIST_MAX_KEYS_LIMIT,
OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT);
+
+ // Initialize PUT handlers
+ BucketAclHandler aclHandler = new BucketAclHandler();
+ copyDependenciesTo(aclHandler);
+ aclHandler.initialization();
+
+ handlers = new ArrayList<>();
+ handlers.add(aclHandler);
}
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java
new file mode 100644
index 00000000000..745353a99f7
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import java.io.IOException;
+import java.io.InputStream;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+
+/**
+ * Interface for handling bucket operations using chain of responsibility pattern.
+ * Each implementation handles a specific S3 bucket subresource operation
+ * (e.g., ?acl, ?lifecycle, ?notification).
+ *
+ * Implementations should extend EndpointBase to inherit all required functionality
+ * (configuration, headers, request context, audit logging, metrics, etc.).
+ */
+public interface BucketOperationHandler {
+
+ /**
+ * Handle the bucket PUT operation if this handler is responsible for it.
+ * The handler inspects the request (query parameters, headers, etc.) to determine
+ * if it should handle the request.
+ *
+ * @param bucketName the name of the bucket
+ * @param body the request body stream
+ * @return Response if this handler handles the request, null otherwise
+ * @throws IOException if an I/O error occurs
+ * @throws OS3Exception if an S3-specific error occurs
+ */
+ default Response handlePutRequest(String bucketName, InputStream body)
+ throws IOException, OS3Exception {
+ return null;
+ }
+}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index dbc91c1e55e..ce13f754544 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -543,6 +543,19 @@ void setOzoneConfiguration(OzoneConfiguration conf) {
ozoneConfiguration = conf;
}
+ /**
+ * Copy dependencies from this endpoint to another endpoint.
+ * Used for initializing handler instances.
+ */
+ protected void copyDependenciesTo(EndpointBase target) {
+ target.setClient(this.client);
+ target.setOzoneConfiguration(this.ozoneConfiguration);
+ target.setContext(this.context);
+ target.setHeaders(this.headers);
+ target.setRequestIdentifier(this.requestIdentifier);
+ target.setSignatureInfo(this.signatureInfo);
+ }
+
protected OzoneConfiguration getOzoneConfiguration() {
return ozoneConfiguration;
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
index ed1df4e3c67..60f2a33fe31 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
@@ -41,8 +41,8 @@ public final class S3Acl {
// ACL put related headers
public static final String GRANT_READ = "x-amz-grant-read";
public static final String GRANT_WRITE = "x-amz-grant-write";
- public static final String GRANT_READ_CAP = "x-amz-grant-read-acp";
- public static final String GRANT_WRITE_CAP = "x-amz-grant-write-acp";
+ public static final String GRANT_READ_ACP = "x-amz-grant-read-acp";
+ public static final String GRANT_WRITE_ACP = "x-amz-grant-write-acp";
public static final String GRANT_FULL_CONTROL = "x-amz-grant-full-control";
// Not supported headers at current stage, may support it in future
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java
index 13db3962a89..1323c13fc0c 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java
@@ -150,6 +150,10 @@ public static EndpointBuilder newBucketEndpointBuilder() {
return new EndpointBuilder<>(BucketEndpoint::new);
}
+ public static EndpointBuilder newBucketAclHandlerBuilder() {
+ return new EndpointBuilder<>(BucketAclHandler::new);
+ }
+
public static EndpointBuilder newObjectEndpointBuilder() {
return new EndpointBuilder<>(ObjectEndpoint::new);
}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java
index 1598a48a989..e5326711100 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java
@@ -128,7 +128,7 @@ public void testWrite() throws Exception {
@Test
public void testReadACP() throws Exception {
when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true);
- when(headers.getHeaderString(S3Acl.GRANT_READ_CAP))
+ when(headers.getHeaderString(S3Acl.GRANT_READ_ACP))
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
Response response =
bucketEndpoint.put(BUCKET_NAME, null);
@@ -143,7 +143,7 @@ public void testReadACP() throws Exception {
@Test
public void testWriteACP() throws Exception {
when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true);
- when(headers.getHeaderString(S3Acl.GRANT_WRITE_CAP))
+ when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP))
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
Response response =
bucketEndpoint.put(BUCKET_NAME, null);
@@ -175,9 +175,9 @@ public void testCombination() throws Exception {
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
when(headers.getHeaderString(S3Acl.GRANT_WRITE))
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
- when(headers.getHeaderString(S3Acl.GRANT_READ_CAP))
+ when(headers.getHeaderString(S3Acl.GRANT_READ_ACP))
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
- when(headers.getHeaderString(S3Acl.GRANT_WRITE_CAP))
+ when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP))
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL))
.thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root");
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java
new file mode 100644
index 00000000000..1cf37c6b5e3
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse;
+import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.stream.Stream;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.S3GAction;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientStub;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+/**
+ * Test class for BucketAclHandler.
+ */
+public class TestBucketAclHandler {
+
+ private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET;
+ private OzoneClient client;
+ private BucketAclHandler aclHandler;
+ private HttpHeaders headers;
+
+ @BeforeEach
+ public void setup() throws IOException {
+ client = new OzoneClientStub();
+ client.getObjectStore().createS3Bucket(BUCKET_NAME);
+
+ headers = mock(HttpHeaders.class);
+
+ // Build BucketAclHandler using EndpointBuilder since it extends EndpointBase
+ aclHandler = EndpointBuilder.newBucketAclHandlerBuilder()
+ .setClient(client)
+ .setHeaders(headers)
+ .build();
+
+ // Set up query parameter for ACL operation (default for most tests)
+ aclHandler.queryParamsForTest().set("acl", "");
+ }
+
+ @AfterEach
+ public void clean() throws IOException {
+ if (client != null) {
+ client.close();
+ }
+ }
+
+ @Test
+ public void testHandlePutRequestWithAclQueryParam() throws Exception {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"testuser\"");
+
+ assertNotNull(aclHandler.handlePutRequest(BUCKET_NAME, null),
+ "Handler should handle request with ?acl param");
+ }
+
+ @Test
+ public void testHandlePutRequestWithoutAclQueryParam() throws Exception {
+ // Remove "acl" query parameter - handler should not handle request
+ aclHandler.queryParamsForTest().unset("acl");
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"testuser\"");
+
+ Response response = aclHandler.handlePutRequest(BUCKET_NAME, null);
+
+ assertNull(response, "Handler should return null without ?acl param");
+ }
+
+ private static Stream grantHeaderNames() {
+ return Stream.of(
+ S3Acl.GRANT_READ,
+ S3Acl.GRANT_WRITE,
+ S3Acl.GRANT_READ_ACP,
+ S3Acl.GRANT_WRITE_ACP,
+ S3Acl.GRANT_FULL_CONTROL
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("grantHeaderNames")
+ public void testHandlePutRequestWithGrantHeaders(String headerName) throws Exception {
+ when(headers.getHeaderString(headerName))
+ .thenReturn("id=\"testuser\"");
+
+ assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null));
+ }
+
+ @Test
+ public void testHandlePutRequestWithMultipleHeaders() throws Exception {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"testuser1\"");
+ when(headers.getHeaderString(S3Acl.GRANT_WRITE))
+ .thenReturn("id=\"testuser2\"");
+
+ assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null));
+ }
+
+ @Test
+ public void testHandlePutRequestWithUnsupportedGranteeType() {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("uri=\"http://example.com\"");
+
+ assertErrorResponse(NOT_IMPLEMENTED,
+ () -> aclHandler.handlePutRequest(BUCKET_NAME, null));
+ }
+
+ @Test
+ public void testHandlePutRequestWithEmailAddressType() {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("emailAddress=\"test@example.com\"");
+
+ assertErrorResponse(NOT_IMPLEMENTED,
+ () -> aclHandler.handlePutRequest(BUCKET_NAME, null));
+ }
+
+ @Test
+ public void testHandlePutRequestBucketNotFound() {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"testuser\"");
+
+ assertThrows(OS3Exception.class,
+ () -> aclHandler.handlePutRequest("nonexistent-bucket", null),
+ "Should throw OS3Exception for non-existent bucket");
+ }
+
+ @Test
+ public void testHandlePutRequestWithBody() throws Exception {
+ String aclXml = "\n" +
+ "\n" +
+ " \n" +
+ " testowner\n" +
+ " Test Owner\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " testuser\n" +
+ " \n" +
+ " READ\n" +
+ " \n" +
+ " \n" +
+ "";
+
+ InputStream body = new ByteArrayInputStream(
+ aclXml.getBytes(StandardCharsets.UTF_8));
+
+ assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, body));
+ }
+
+ @Test
+ public void testHandlePutRequestWithInvalidHeaderFormat() {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("invalid-format");
+
+ assertThrows(OS3Exception.class,
+ () -> aclHandler.handlePutRequest(BUCKET_NAME, null),
+ "Should throw OS3Exception for invalid header format");
+ }
+
+ @Test
+ public void testHandlePutRequestWithMultipleGrantees() throws Exception {
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"user1\",id=\"user2\"");
+
+ assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null));
+ }
+
+ @Test
+ public void testPutAclReplacesExistingAcls() throws Exception {
+ // Set initial ACL
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"user1\"");
+ when(headers.getHeaderString(S3Acl.GRANT_WRITE))
+ .thenReturn(null);
+
+ aclHandler.handlePutRequest(BUCKET_NAME, null);
+
+ // Replace with new ACL
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn(null);
+ when(headers.getHeaderString(S3Acl.GRANT_WRITE))
+ .thenReturn("id=\"user2\"");
+
+ assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null));
+ }
+
+ @Test
+ public void testAuditLoggingOnBucketNotFound() throws Exception {
+ BucketAclHandler spyHandler = spy(aclHandler);
+
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("id=\"testuser\"");
+
+ // This should throw exception for non-existent bucket
+ assertThrows(OS3Exception.class,
+ () -> spyHandler.handlePutRequest("nonexistent-bucket", null));
+
+ // Verify that auditWriteFailure was called with PUT_ACL action
+ verify(spyHandler, times(1)).auditWriteFailure(
+ eq(S3GAction.PUT_ACL),
+ any(OS3Exception.class));
+ }
+
+ @Test
+ public void testAuditLoggingOnInvalidArgument() throws Exception {
+ BucketAclHandler spyHandler = spy(aclHandler);
+
+ // Invalid format will trigger OS3Exception
+ when(headers.getHeaderString(S3Acl.GRANT_READ))
+ .thenReturn("invalid-format");
+
+ assertThrows(OS3Exception.class,
+ () -> spyHandler.handlePutRequest(BUCKET_NAME, null));
+
+ // Verify that auditWriteFailure was called with PUT_ACL action
+ verify(spyHandler, times(1)).auditWriteFailure(
+ eq(S3GAction.PUT_ACL),
+ any(OS3Exception.class));
+ }
+}