From aed6911b5b8be891d85891356f418b3d86137254 Mon Sep 17 00:00:00 2001 From: echonesis Date: Mon, 15 Dec 2025 01:04:33 +0800 Subject: [PATCH 01/36] HDDS-14123. Refactor BucketEndpoint#Put method --- .../hadoop/ozone/s3/endpoint/AclHandler.java | 261 +++++++++++++ .../ozone/s3/endpoint/BucketEndpoint.java | 219 +++-------- .../s3/endpoint/BucketEndpointContext.java | 108 ++++++ .../s3/endpoint/BucketOperationHandler.java | 60 +++ .../BucketOperationHandlerFactory.java | 124 +++++++ .../hadoop/ozone/s3/endpoint/S3Acl.java | 4 +- .../ozone/s3/endpoint/TestAclHandler.java | 343 ++++++++++++++++++ .../ozone/s3/endpoint/TestBucketAcl.java | 8 +- .../endpoint/TestBucketEndpointContext.java | 211 +++++++++++ .../TestBucketOperationHandlerFactory.java | 175 +++++++++ 10 files changed, 1330 insertions(+), 183 deletions(-) create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java new file mode 100644 index 000000000000..9d1e05eaa6b9 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.http.HttpStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Handler for bucket ACL operations (?acl query parameter). + * Implements PUT operations for bucket Access Control Lists. + */ +public class AclHandler implements BucketOperationHandler { + + private static final Logger LOG = LoggerFactory.getLogger(AclHandler.class); + + @Override + public String getQueryParamName() { + return "acl"; + } + + /** + * Implement acl put. + *

+ * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html + */ + @Override + public Response handlePutRequest( + String bucketName, + InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos) throws IOException, OS3Exception { + + String grantReads = headers.getHeaderString(S3Acl.GRANT_READ); + String grantWrites = headers.getHeaderString(S3Acl.GRANT_WRITE); + String grantReadACP = headers.getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = headers.getHeaderString(S3Acl.GRANT_WRITE_ACP); + String grantFull = headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL); + + try { + OzoneBucket bucket = context.getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner()); + OzoneVolume volume = context.getVolume(); + + List ozoneAclListOnBucket = new ArrayList<>(); + List ozoneAclListOnVolume = new ArrayList<>(); + + if (grantReads == null && grantWrites == null && grantReadACP == null + && grantWriteACP == null && grantFull == null) { + // Handle grants in body + S3BucketAcl putBucketAclRequest = + new PutBucketAclRequestUnmarshaller().readFrom(body); + ozoneAclListOnBucket.addAll( + S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); + ozoneAclListOnVolume.addAll( + S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest)); + } else { + // Handle grants in headers + if (grantReads != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads, + S3Acl.ACLType.READ.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads, + S3Acl.ACLType.READ.getValue())); + } + if (grantWrites != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites, + S3Acl.ACLType.WRITE.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites, + S3Acl.ACLType.WRITE.getValue())); + } + if (grantReadACP != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP, + S3Acl.ACLType.READ_ACP.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP, + S3Acl.ACLType.READ_ACP.getValue())); + } + if (grantWriteACP != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP, + S3Acl.ACLType.WRITE_ACP.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP, + S3Acl.ACLType.WRITE_ACP.getValue())); + } + if (grantFull != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull, + S3Acl.ACLType.FULL_CONTROL.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull, + S3Acl.ACLType.FULL_CONTROL.getValue())); + } + } + + // A put request will reset all previous ACLs on bucket + bucket.setAcl(ozoneAclListOnBucket); + + // A put request will reset input user/group's permission on volume + List acls = bucket.getAcls(); + List aclsToRemoveOnVolume = new ArrayList<>(); + List currentAclsOnVolume = volume.getAcls(); + + // Remove input user/group's permission from Volume first + if (!currentAclsOnVolume.isEmpty()) { + for (OzoneAcl acl : acls) { + if (acl.getAclScope() == ACCESS) { + aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList( + acl.getName(), acl.getType(), currentAclsOnVolume)); + } + } + for (OzoneAcl acl : aclsToRemoveOnVolume) { + volume.removeAcl(acl); + } + } + + // Add new permission on Volume + for (OzoneAcl acl : ozoneAclListOnVolume) { + volume.addAcl(acl); + } + + context.getEndpoint().getMetrics().updatePutAclSuccessStats(startNanos); + return Response.status(HttpStatus.SC_OK).build(); + + } catch (OMException exception) { + context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); + context.auditWriteFailure(S3GAction.PUT_ACL, exception); + if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); + } else if (context.isAccessDenied(exception)) { + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); + } + throw exception; + } catch (OS3Exception ex) { + context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); + context.auditWriteFailure(S3GAction.PUT_ACL, ex); + throw ex; + } + } + + /** + * Convert ACL string to Ozone ACL on bucket. + * + * Example: x-amz-grant-write: id="111122223333", id="555566667777" + */ + private List getAndConvertAclOnBucket(String value, + String permission) + throws OS3Exception { + return parseAndConvertAcl(value, permission, true); + } + + /** + * Convert ACL string to Ozone ACL on volume. + */ + private List getAndConvertAclOnVolume(String value, + String permission) + throws OS3Exception { + return parseAndConvertAcl(value, permission, false); + } + + /** + * Parse ACL string and convert to Ozone ACLs. + * + * This is a common method extracted from getAndConvertAclOnBucket and + * getAndConvertAclOnVolume to reduce code duplication. + * + * @param value the ACL header value (e.g., "id=\"user1\",id=\"user2\"") + * @param permission the S3 permission type (READ, WRITE, etc.) + * @param isBucket true for bucket ACL, false for volume ACL + * @return list of OzoneAcl objects + * @throws OS3Exception if parsing fails or grantee type is not supported + */ + private List parseAndConvertAcl(String value, String permission, + boolean isBucket) + throws OS3Exception { + List ozoneAclList = new ArrayList<>(); + if (StringUtils.isEmpty(value)) { + return ozoneAclList; + } + + String[] subValues = value.split(","); + for (String acl : subValues) { + String[] part = acl.split("="); + if (part.length != 2) { + throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); + } + + S3Acl.ACLIdentityType type = + S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); + if (type == null || !type.isSupported()) { + LOG.warn("S3 grantee {} is null or not supported", part[0]); + throw newError(NOT_IMPLEMENTED, part[0]); + } + + String userId = part[1]; + + if (isBucket) { + // Build ACL on Bucket + EnumSet aclsOnBucket = + S3Acl.getOzoneAclOnBucketFromS3Permission(permission); + ozoneAclList.add(OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, + userId, + OzoneAcl.AclScope.DEFAULT, + aclsOnBucket + )); + ozoneAclList.add(OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, + userId, + ACCESS, + aclsOnBucket + )); + } else { + // Build ACL on Volume + EnumSet aclsOnVolume = + S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); + ozoneAclList.add(OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, + userId, + ACCESS, + aclsOnVolume + )); + } + } + + return ozoneAclList; + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index c808f0cce761..31f7a361627d 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; @@ -25,7 +24,6 @@ import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapInQuotes; @@ -34,7 +32,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.EnumSet; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -64,11 +62,9 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata; import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject; @@ -79,7 +75,6 @@ import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.ContinueToken; import org.apache.hadoop.ozone.s3.util.S3StorageType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; import org.slf4j.Logger; @@ -94,15 +89,29 @@ public class BucketEndpoint extends EndpointBase { private static final Logger LOG = LoggerFactory.getLogger(BucketEndpoint.class); + private static final BucketOperationHandlerFactory HANDLER_FACTORY = + new BucketOperationHandlerFactory(); + @Context private HttpHeaders headers; private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; + private BucketEndpointContext context; + @Inject private OzoneConfiguration ozoneConfiguration; + public BucketEndpoint() { + super(); + this.context = new BucketEndpointContext(this); + } + + private BucketEndpointContext getContext() { + return context; + } + /** * Rest endpoint to list objects in a specific bucket. *

@@ -321,13 +330,25 @@ public Response put(@PathParam("bucket") String bucketName, S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { - if (aclMarker != null) { - s3GAction = S3GAction.PUT_ACL; - Response response = putAcl(bucketName, body); + // Build map of query parameters + Map queryParams = new HashMap<>(); + queryParams.put("acl", aclMarker); + // Future handlers: queryParams.put("lifecycle", lifecycleMarker); + + // Check for subresource operations using handlers + String queryParam = HANDLER_FACTORY.findFirstSupportedQueryParam(queryParams); + + if (queryParam != null) { + BucketOperationHandler handler = HANDLER_FACTORY.getHandler(queryParam); + // Delegate to specific handler + s3GAction = getActionForQueryParam(queryParam); + Response response = handler.handlePutRequest( + bucketName, body, headers, getContext(), startNanos); AUDIT.logWriteSuccess( buildAuditMessageForSuccess(s3GAction, getAuditParameters())); return response; } + String location = createS3Bucket(bucketName); AUDIT.logWriteSuccess( buildAuditMessageForSuccess(s3GAction, getAuditParameters())); @@ -348,6 +369,18 @@ public Response put(@PathParam("bucket") String bucketName, } } + /** + * Map query parameter to corresponding S3GAction for audit logging. + */ + private S3GAction getActionForQueryParam(String queryParam) { + switch (queryParam) { + case "acl": + return S3GAction.PUT_ACL; + default: + return S3GAction.GET_BUCKET; + } + } + public Response listMultipartUploads( String bucketName, String prefix, @@ -589,174 +622,6 @@ public S3BucketAcl getAcl(String bucketName) } } - /** - * Implement acl put. - *

- * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html - */ - public Response putAcl(String bucketName, - InputStream body) throws IOException, OS3Exception { - long startNanos = Time.monotonicNowNanos(); - String grantReads = headers.getHeaderString(S3Acl.GRANT_READ); - String grantWrites = headers.getHeaderString(S3Acl.GRANT_WRITE); - String grantReadACP = headers.getHeaderString(S3Acl.GRANT_READ_CAP); - String grantWriteACP = headers.getHeaderString(S3Acl.GRANT_WRITE_CAP); - String grantFull = headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL); - - try { - OzoneBucket bucket = getBucket(bucketName); - S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner()); - OzoneVolume volume = getVolume(); - - List ozoneAclListOnBucket = new ArrayList<>(); - List ozoneAclListOnVolume = new ArrayList<>(); - - if (grantReads == null && grantWrites == null && grantReadACP == null - && grantWriteACP == null && grantFull == null) { - S3BucketAcl putBucketAclRequest = - new PutBucketAclRequestUnmarshaller().readFrom(body); - // Handle grants in body - ozoneAclListOnBucket.addAll( - S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); - ozoneAclListOnVolume.addAll( - S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest)); - } else { - - // Handle grants in headers - if (grantReads != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads, - S3Acl.ACLType.READ.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads, - S3Acl.ACLType.READ.getValue())); - } - if (grantWrites != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites, - S3Acl.ACLType.WRITE.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites, - S3Acl.ACLType.WRITE.getValue())); - } - if (grantReadACP != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP, - S3Acl.ACLType.READ_ACP.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP, - S3Acl.ACLType.READ_ACP.getValue())); - } - if (grantWriteACP != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP, - S3Acl.ACLType.WRITE_ACP.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP, - S3Acl.ACLType.WRITE_ACP.getValue())); - } - if (grantFull != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull, - S3Acl.ACLType.FULL_CONTROL.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull, - S3Acl.ACLType.FULL_CONTROL.getValue())); - } - } - // A put request will reset all previous ACLs on bucket - bucket.setAcl(ozoneAclListOnBucket); - // A put request will reset input user/group's permission on volume - List acls = bucket.getAcls(); - List aclsToRemoveOnVolume = new ArrayList<>(); - List currentAclsOnVolume = volume.getAcls(); - // Remove input user/group's permission from Volume first - if (!currentAclsOnVolume.isEmpty()) { - for (OzoneAcl acl : acls) { - if (acl.getAclScope() == ACCESS) { - aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList( - acl.getName(), acl.getType(), currentAclsOnVolume)); - } - } - for (OzoneAcl acl : aclsToRemoveOnVolume) { - volume.removeAcl(acl); - } - } - // Add new permission on Volume - for (OzoneAcl acl : ozoneAclListOnVolume) { - volume.addAcl(acl); - } - } catch (OMException exception) { - getMetrics().updatePutAclFailureStats(startNanos); - auditWriteFailure(S3GAction.PUT_ACL, exception); - if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); - } else if (isAccessDenied(exception)) { - throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); - } - throw exception; - } catch (OS3Exception ex) { - getMetrics().updatePutAclFailureStats(startNanos); - throw ex; - } - getMetrics().updatePutAclSuccessStats(startNanos); - return Response.status(HttpStatus.SC_OK).build(); - } - - /** - * Example: x-amz-grant-write: \ - * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", \ - * id="555566667777". - */ - private List getAndConvertAclOnBucket(String value, - String permission) - throws OS3Exception { - List ozoneAclList = new ArrayList<>(); - if (StringUtils.isEmpty(value)) { - return ozoneAclList; - } - String[] subValues = value.split(","); - for (String acl : subValues) { - String[] part = acl.split("="); - if (part.length != 2) { - throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); - } - S3Acl.ACLIdentityType type = - S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); - if (type == null || !type.isSupported()) { - LOG.warn("S3 grantee {} is null or not supported", part[0]); - throw newError(NOT_IMPLEMENTED, part[0]); - } - // Build ACL on Bucket - EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission); - OzoneAcl defaultOzoneAcl = OzoneAcl.of( - IAccessAuthorizer.ACLIdentityType.USER, part[1], OzoneAcl.AclScope.DEFAULT, aclsOnBucket - ); - OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnBucket); - ozoneAclList.add(defaultOzoneAcl); - ozoneAclList.add(accessOzoneAcl); - } - return ozoneAclList; - } - - private List getAndConvertAclOnVolume(String value, - String permission) - throws OS3Exception { - List ozoneAclList = new ArrayList<>(); - if (StringUtils.isEmpty(value)) { - return ozoneAclList; - } - String[] subValues = value.split(","); - for (String acl : subValues) { - String[] part = acl.split("="); - if (part.length != 2) { - throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); - } - S3Acl.ACLIdentityType type = - S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); - if (type == null || !type.isSupported()) { - LOG.warn("S3 grantee {} is null or not supported", part[0]); - throw newError(NOT_IMPLEMENTED, part[0]); - } - // Build ACL on Volume - EnumSet aclsOnVolume = - S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); - OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnVolume); - ozoneAclList.add(accessOzoneAcl); - } - return ozoneAclList; - } - private void addKey(ListObjectResponse response, OzoneKey next) { KeyMetadata keyMetadata = new KeyMetadata(); keyMetadata.setKey(EncodingTypeObject.createNullable(next.getName(), diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java new file mode 100644 index 000000000000..0cefb0300b26 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import java.io.IOException; +import org.apache.hadoop.ozone.audit.AuditAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; + +/** + * Context object that provides access to BucketEndpoint resources. + * This allows handlers to access endpoint functionality without + * tight coupling to the BucketEndpoint class. + * + * Since BucketEndpoint extends EndpointBase, handlers can access: + * - Bucket and Volume operations + * - Methods inherited from EndpointBase + */ +public class BucketEndpointContext { + + private final BucketEndpoint endpoint; + + public BucketEndpointContext(BucketEndpoint endpoint) { + this.endpoint = endpoint; + } + + /** + * Get the bucket object. + * Delegates to BucketEndpoint's inherited getBucket() from EndpointBase. + * + * @param bucketName the bucket name + * @return OzoneBucket instance + * @throws IOException if bucket cannot be retrieved + * @throws OS3Exception if S3-specific error occurs + */ + public OzoneBucket getBucket(String bucketName) + throws IOException, OS3Exception { + return endpoint.getBucket(bucketName); + } + + /** + * Get the volume object. + * Delegates to BucketEndpoint's inherited getVolume() from EndpointBase. + * + * @return OzoneVolume instance + * @throws IOException if volume cannot be retrieved + * @throws OS3Exception if S3-specific error occurs + */ + public OzoneVolume getVolume() throws IOException, OS3Exception { + return endpoint.getVolume(); + } + + /** + * Check if an exception indicates access denied. + * This checks for OMException.ResultCodes that indicate permission issues. + * + * @param ex the exception to check + * @return true if access is denied + */ + public boolean isAccessDenied(Exception ex) { + // Check if it's an OMException with ACCESS_DENIED result code + if (ex instanceof OMException) { + OMException omEx = (OMException) ex; + return omEx.getResult() == OMException.ResultCodes.PERMISSION_DENIED || + omEx.getResult() == OMException.ResultCodes.ACCESS_DENIED; + } + return false; + } + + /** + * Audit a write operation failure. + * Delegates to BucketEndpoint's inherited auditWriteFailure() from EndpointBase. + * + * @param action the audit action being performed + * @param ex the exception that occurred + */ + public void auditWriteFailure(AuditAction action, Throwable ex) { + endpoint.auditWriteFailure(action, ex); + } + + /** + * Get reference to the endpoint for accessing other methods. + * Use with caution - prefer adding specific methods to this context + * rather than exposing the entire endpoint. + * + * @return BucketEndpoint instance + */ + protected BucketEndpoint getEndpoint() { + return endpoint; + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java new file mode 100644 index 000000000000..b42c59b257c1 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import java.io.IOException; +import java.io.InputStream; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; + +/** + * Interface for handling bucket operations based on query parameters. + * Each implementation handles a specific S3 bucket subresource operation + * (e.g., ?acl, ?lifecycle, ?notification). + */ +public interface BucketOperationHandler { + + /** + * Handle the bucket operation. + * + * @param bucketName the name of the bucket + * @param body the request body stream + * @param headers the HTTP headers + * @param context the endpoint context containing shared dependencies + * @param startNanos the start time in nanoseconds for metrics tracking + * @return HTTP response + * @throws IOException if an I/O error occurs + * @throws OS3Exception if an S3-specific error occurs + */ + Response handlePutRequest( + String bucketName, + InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos + ) throws IOException, OS3Exception; + + /** + * Get the query parameter name this handler is responsible for. + * For example: "acl", "lifecycle", "notification" + * + * @return the query parameter name + */ + String getQueryParamName(); +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java new file mode 100644 index 000000000000..1edb19165ded --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import com.google.common.annotations.VisibleForTesting; +import java.util.HashMap; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Factory class that manages all bucket operation handlers. + * Provides a registry pattern for looking up handlers based on query parameters. + */ +public class BucketOperationHandlerFactory { + + private static final Logger LOG = + LoggerFactory.getLogger(BucketOperationHandlerFactory.class); + + private final Map handlers = new HashMap<>(); + + /** + * Register all available bucket operation handlers. + */ + public BucketOperationHandlerFactory() { + registerDefaultHandlers(); + } + + /** + * Register default handlers for S3 bucket operations. + */ + private void registerDefaultHandlers() { + register(new AclHandler()); + } + + /** + * Register a bucket operation handler. + * + * @param handler the handler to register + */ + @VisibleForTesting + public void register(BucketOperationHandler handler) { + String queryParam = handler.getQueryParamName(); + if (handlers.containsKey(queryParam)) { + LOG.warn("Overwriting existing handler for query parameter: {}", + queryParam); + } + handlers.put(queryParam, handler); + LOG.debug("Registered handler for query parameter: {}", queryParam); + } + + /** + * Get a handler for the specified query parameter. + * + * @param queryParam the query parameter name + * @return the corresponding handler, or null if not found + */ + public BucketOperationHandler getHandler(String queryParam) { + return handlers.get(queryParam); + } + + /** + * Check if a handler exists for the specified query parameter. + * + * @param queryParam the query parameter name + * @return true if a handler exists + */ + public boolean hasHandler(String queryParam) { + return handlers.containsKey(queryParam); + } + + /** + * Find the first supported query parameter that has a non-null value. + * + * This method iterates through all registered handlers and checks if the + * corresponding query parameter has a non-null value in the provided map. + * + * @param queryParams map of query parameter names to their values + * @return the name of the first query parameter that has both a non-null value + * and a registered handler, or null if none found + */ + public String findFirstSupportedQueryParam(Map queryParams) { + if (queryParams == null || queryParams.isEmpty()) { + return null; + } + + // Iterate through registered handlers and find the first one with a value + for (Map.Entry entry : handlers.entrySet()) { + String paramName = entry.getKey(); + String paramValue = queryParams.get(paramName); + + if (paramValue != null) { + return paramName; + } + } + + return null; + } + + /** + * Get all registered query parameter names. + * + * @return set of query parameter names + */ + @VisibleForTesting + public java.util.Set getRegisteredQueryParams() { + return handlers.keySet(); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java index ed1df4e3c671..60f2a33fe319 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java @@ -41,8 +41,8 @@ public final class S3Acl { // ACL put related headers public static final String GRANT_READ = "x-amz-grant-read"; public static final String GRANT_WRITE = "x-amz-grant-write"; - public static final String GRANT_READ_CAP = "x-amz-grant-read-acp"; - public static final String GRANT_WRITE_CAP = "x-amz-grant-write-acp"; + public static final String GRANT_READ_ACP = "x-amz-grant-read-acp"; + public static final String GRANT_WRITE_ACP = "x-amz-grant-write-acp"; public static final String GRANT_FULL_CONTROL = "x-amz-grant-full-control"; // Not supported headers at current stage, may support it in future diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java new file mode 100644 index 000000000000..9e7cc3f3280d --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_OK; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for AclHandler. + */ +public class TestAclHandler { + + private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; + private OzoneClient client; + private BucketEndpointContext context; + private AclHandler aclHandler; + private HttpHeaders headers; + + @BeforeEach + public void setup() throws IOException { + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + headers = mock(HttpHeaders.class); + + BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build(); + + context = new BucketEndpointContext(bucketEndpoint); + aclHandler = new AclHandler(); + } + + @AfterEach + public void clean() throws IOException { + if (client != null) { + client.close(); + } + } + + @Test + public void testGetQueryParamName() { + assertEquals("acl", aclHandler.getQueryParamName(), + "Query param name should be 'acl'"); + } + + @Test + public void testHandlePutRequestWithReadHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithWriteHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithReadAcpHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithWriteAcpHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithFullControlHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithMultipleHeaders() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser1\""); + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn("id=\"testuser2\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL with multiple headers should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithUnsupportedGranteeType() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("uri=\"http://example.com\""); + + long startNanos = System.nanoTime(); + OS3Exception exception = assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + }, "Should throw OS3Exception for unsupported grantee type"); + + assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), + "Should return NOT_IMPLEMENTED for unsupported grantee type"); + } + + @Test + public void testHandlePutRequestWithEmailAddressType() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("emailAddress=\"test@example.com\""); + + long startNanos = System.nanoTime(); + OS3Exception exception = assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + }, "Should throw OS3Exception for email address grantee type"); + + assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), + "Should return NOT_IMPLEMENTED for email address grantee type"); + } + + @Test + public void testHandlePutRequestBucketNotFound() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest("nonexistent-bucket", null, headers, + context, startNanos); + }, "Should throw OS3Exception for non-existent bucket"); + } + + @Test + public void testHandlePutRequestWithBody() throws Exception { + String aclXml = "\n" + + "\n" + + " \n" + + " testowner\n" + + " Test Owner\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " testuser\n" + + " \n" + + " READ\n" + + " \n" + + " \n" + + ""; + + InputStream body = new ByteArrayInputStream( + aclXml.getBytes(StandardCharsets.UTF_8)); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, body, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL with body should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithInvalidHeaderFormat() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("invalid-format"); + + long startNanos = System.nanoTime(); + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + }, "Should throw OS3Exception for invalid header format"); + } + + @Test + public void testHandlePutRequestWithMultipleGrantees() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"user1\",id=\"user2\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL with multiple grantees should return 200 OK"); + } + + @Test + public void testPutAclReplacesExistingAcls() throws Exception { + // Set initial ACL + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"user1\""); + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn(null); + + long startNanos = System.nanoTime(); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + + // Replace with new ACL + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn(null); + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn("id=\"user2\""); + + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should replace existing ACLs"); + } + + @Test + public void testAuditLoggingOnBucketNotFound() throws Exception { + // Create a spy of BucketEndpoint to verify audit logging + BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build()); + + BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + + // This should throw exception for non-existent bucket + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest("nonexistent-bucket", null, headers, + spyContext, startNanos); + }); + + // Verify that auditWriteFailure was called with PUT_ACL action + // Note: getBucket() wraps OMException as OS3Exception, so we catch OS3Exception + verify(spyEndpoint, times(1)).auditWriteFailure( + eq(S3GAction.PUT_ACL), + any(OS3Exception.class)); + } + + @Test + public void testAuditLoggingOnInvalidArgument() throws Exception { + // Create a spy of BucketEndpoint to verify audit logging + BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build()); + + BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + + // Invalid format will trigger OS3Exception + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("invalid-format"); + + long startNanos = System.nanoTime(); + + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, + spyContext, startNanos); + }); + + // Verify that auditWriteFailure was called with PUT_ACL action + verify(spyEndpoint, times(1)).auditWriteFailure( + eq(S3GAction.PUT_ACL), + any(OS3Exception.class)); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java index ffba1be40359..3b6e58bd6067 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java @@ -128,7 +128,7 @@ public void testWrite() throws Exception { @Test public void testReadACP() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); - when(headers.getHeaderString(S3Acl.GRANT_READ_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); @@ -143,7 +143,7 @@ public void testReadACP() throws Exception { @Test public void testWriteACP() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); - when(headers.getHeaderString(S3Acl.GRANT_WRITE_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); @@ -175,9 +175,9 @@ public void testCombination() throws Exception { .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); - when(headers.getHeaderString(S3Acl.GRANT_READ_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); - when(headers.getHeaderString(S3Acl.GRANT_WRITE_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java new file mode 100644 index 000000000000..7d27ae23d664 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +import java.io.IOException; +import javax.ws.rs.core.HttpHeaders; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for BucketEndpointContext. + */ +public class TestBucketEndpointContext { + + private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; + private OzoneClient client; + private BucketEndpointContext context; + + @BeforeEach + public void setup() throws IOException { + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + HttpHeaders headers = mock(HttpHeaders.class); + + BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build(); + + context = new BucketEndpointContext(bucketEndpoint); + } + + @AfterEach + public void clean() throws IOException { + if (client != null) { + client.close(); + } + } + + @Test + public void testGetBucket() throws IOException, OS3Exception { + OzoneBucket bucket = context.getBucket(BUCKET_NAME); + assertNotNull(bucket, "Bucket should not be null"); + assertEquals(BUCKET_NAME, bucket.getName(), + "Bucket name should match"); + } + + @Test + public void testGetBucketNotFound() { + assertThrows(OS3Exception.class, () -> { + context.getBucket("nonexistent-bucket"); + }, "Should throw OS3Exception for non-existent bucket"); + } + + @Test + public void testGetVolume() throws IOException, OS3Exception { + OzoneVolume volume = context.getVolume(); + assertNotNull(volume, "Volume should not be null"); + } + + @Test + public void testIsAccessDeniedWithPermissionDenied() { + OMException exception = new OMException("Access denied", + OMException.ResultCodes.PERMISSION_DENIED); + + assertTrue(context.isAccessDenied(exception), + "Should return true for PERMISSION_DENIED"); + } + + @Test + public void testIsAccessDeniedWithAccessDenied() { + OMException exception = new OMException("Access denied", + OMException.ResultCodes.ACCESS_DENIED); + + assertTrue(context.isAccessDenied(exception), + "Should return true for ACCESS_DENIED"); + } + + @Test + public void testIsAccessDeniedWithBucketNotFound() { + OMException exception = new OMException("Bucket not found", + OMException.ResultCodes.BUCKET_NOT_FOUND); + + assertFalse(context.isAccessDenied(exception), + "Should return false for BUCKET_NOT_FOUND"); + } + + @Test + public void testIsAccessDeniedWithKeyNotFound() { + OMException exception = new OMException("Key not found", + OMException.ResultCodes.KEY_NOT_FOUND); + + assertFalse(context.isAccessDenied(exception), + "Should return false for KEY_NOT_FOUND"); + } + + @Test + public void testIsAccessDeniedWithIOException() { + IOException exception = new IOException("I/O error"); + + assertFalse(context.isAccessDenied(exception), + "Should return false for non-OMException"); + } + + @Test + public void testIsAccessDeniedWithNullException() { + assertFalse(context.isAccessDenied(null), + "Should return false for null exception"); + } + + @Test + public void testIsAccessDeniedWithRuntimeException() { + RuntimeException exception = new RuntimeException("Runtime error"); + + assertFalse(context.isAccessDenied(exception), + "Should return false for RuntimeException"); + } + + @Test + public void testGetEndpoint() { + BucketEndpoint endpoint = context.getEndpoint(); + assertNotNull(endpoint, "Endpoint should not be null"); + } + + @Test + public void testContextDelegatesCorrectly() throws IOException, OS3Exception { + // Test that context properly delegates to endpoint methods + OzoneBucket bucket = context.getBucket(BUCKET_NAME); + OzoneVolume volume = context.getVolume(); + + assertNotNull(bucket, "Delegated getBucket should work"); + assertNotNull(volume, "Delegated getVolume should work"); + } + + @Test + public void testIsAccessDeniedWithMultipleResultCodes() { + // Test all OMException result codes to ensure only access-related ones + // return true + + OMException[] accessDeniedExceptions = { + new OMException("", OMException.ResultCodes.PERMISSION_DENIED), + new OMException("", OMException.ResultCodes.ACCESS_DENIED) + }; + + for (OMException ex : accessDeniedExceptions) { + assertTrue(context.isAccessDenied(ex), + "Should return true for " + ex.getResult()); + } + + OMException[] otherExceptions = { + new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND), + new OMException("", OMException.ResultCodes.KEY_NOT_FOUND), + new OMException("", OMException.ResultCodes.VOLUME_NOT_FOUND), + new OMException("", OMException.ResultCodes.INTERNAL_ERROR) + }; + + for (OMException ex : otherExceptions) { + assertFalse(context.isAccessDenied(ex), + "Should return false for " + ex.getResult()); + } + } + + @Test + public void testBucketOperationsWithContext() throws Exception { + // Create a second bucket to test multiple operations + String secondBucket = "test-bucket-2"; + client.getObjectStore().createS3Bucket(secondBucket); + + // Test getting different buckets through context + OzoneBucket bucket1 = context.getBucket(BUCKET_NAME); + OzoneBucket bucket2 = context.getBucket(secondBucket); + + assertNotNull(bucket1, "First bucket should not be null"); + assertNotNull(bucket2, "Second bucket should not be null"); + assertEquals(BUCKET_NAME, bucket1.getName(), + "First bucket name should match"); + assertEquals(secondBucket, bucket2.getName(), + "Second bucket name should match"); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java new file mode 100644 index 000000000000..1aeb8dc85fb4 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.io.InputStream; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for BucketOperationHandlerFactory. + */ +public class TestBucketOperationHandlerFactory { + + private BucketOperationHandlerFactory factory; + + @BeforeEach + public void setup() { + factory = new BucketOperationHandlerFactory(); + } + + @Test + public void testDefaultHandlersRegistered() { + // Verify that the default ACL handler is registered + assertTrue(factory.hasHandler("acl"), + "ACL handler should be registered by default"); + assertNotNull(factory.getHandler("acl"), + "ACL handler should not be null"); + } + + @Test + public void testGetHandlerForAcl() { + BucketOperationHandler handler = factory.getHandler("acl"); + assertNotNull(handler, "ACL handler should exist"); + assertTrue(handler instanceof AclHandler, + "Handler should be an instance of AclHandler"); + assertEquals("acl", handler.getQueryParamName(), + "Handler query param name should be 'acl'"); + } + + @Test + public void testGetHandlerForNonExistentParam() { + BucketOperationHandler handler = factory.getHandler("nonexistent"); + assertNull(handler, "Handler for non-existent param should be null"); + } + + @Test + public void testHasHandlerReturnsTrueForExisting() { + assertTrue(factory.hasHandler("acl"), + "Should return true for existing handler"); + } + + @Test + public void testHasHandlerReturnsFalseForNonExisting() { + assertFalse(factory.hasHandler("nonexistent"), + "Should return false for non-existing handler"); + } + + @Test + public void testRegisterNewHandler() { + // Create a mock handler + BucketOperationHandler mockHandler = new MockBucketOperationHandler("test"); + + // Register the handler + factory.register(mockHandler); + + // Verify registration + assertTrue(factory.hasHandler("test"), + "Newly registered handler should exist"); + assertEquals(mockHandler, factory.getHandler("test"), + "Retrieved handler should be the same instance"); + } + + @Test + public void testRegisterOverwritesExistingHandler() { + // Register a new handler with the same query param as ACL + BucketOperationHandler mockHandler = new MockBucketOperationHandler("acl"); + + factory.register(mockHandler); + + // Verify the handler was overwritten + BucketOperationHandler handler = factory.getHandler("acl"); + assertEquals(mockHandler, handler, + "Handler should be the newly registered one"); + assertTrue(handler instanceof MockBucketOperationHandler, + "Handler should be an instance of MockBucketOperationHandler"); + } + + @Test + public void testGetRegisteredQueryParams() { + // Default should have at least "acl" + assertTrue(factory.getRegisteredQueryParams().contains("acl"), + "Registered query params should contain 'acl'"); + + // Register additional handlers + factory.register(new MockBucketOperationHandler("lifecycle")); + factory.register(new MockBucketOperationHandler("notification")); + + // Verify all are present + assertEquals(3, factory.getRegisteredQueryParams().size(), + "Should have 3 registered handlers"); + assertTrue(factory.getRegisteredQueryParams().contains("lifecycle"), + "Should contain 'lifecycle'"); + assertTrue(factory.getRegisteredQueryParams().contains("notification"), + "Should contain 'notification'"); + } + + @Test + public void testMultipleHandlerRegistration() { + BucketOperationHandler handler1 = new MockBucketOperationHandler("test1"); + BucketOperationHandler handler2 = new MockBucketOperationHandler("test2"); + BucketOperationHandler handler3 = new MockBucketOperationHandler("test3"); + + factory.register(handler1); + factory.register(handler2); + factory.register(handler3); + + assertTrue(factory.hasHandler("test1"), "Handler test1 should exist"); + assertTrue(factory.hasHandler("test2"), "Handler test2 should exist"); + assertTrue(factory.hasHandler("test3"), "Handler test3 should exist"); + + assertEquals(handler1, factory.getHandler("test1")); + assertEquals(handler2, factory.getHandler("test2")); + assertEquals(handler3, factory.getHandler("test3")); + } + + /** + * Mock implementation of BucketOperationHandler for testing. + */ + private static class MockBucketOperationHandler implements BucketOperationHandler { + private final String queryParamName; + + MockBucketOperationHandler(String queryParamName) { + this.queryParamName = queryParamName; + } + + @Override + public Response handlePutRequest(String bucketName, InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos) + throws IOException, OS3Exception { + return Response.ok().build(); + } + + @Override + public String getQueryParamName() { + return queryParamName; + } + } +} From 210aa3e9a3e33996fcf702ac89fa58e3eb52375e Mon Sep 17 00:00:00 2001 From: echonesis Date: Mon, 15 Dec 2025 01:04:33 +0800 Subject: [PATCH 02/36] HDDS-14123. Refactor BucketEndpoint#Put method --- .../hadoop/ozone/s3/endpoint/AclHandler.java | 261 +++++++++++++ .../ozone/s3/endpoint/BucketEndpoint.java | 35 +- .../s3/endpoint/BucketEndpointContext.java | 108 ++++++ .../s3/endpoint/BucketOperationHandler.java | 60 +++ .../BucketOperationHandlerFactory.java | 124 +++++++ .../hadoop/ozone/s3/endpoint/S3Acl.java | 4 +- .../ozone/s3/endpoint/TestAclHandler.java | 343 ++++++++++++++++++ .../ozone/s3/endpoint/TestBucketAcl.java | 8 +- .../endpoint/TestBucketEndpointContext.java | 211 +++++++++++ .../TestBucketOperationHandlerFactory.java | 175 +++++++++ 10 files changed, 1321 insertions(+), 8 deletions(-) create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java new file mode 100644 index 000000000000..9d1e05eaa6b9 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.http.HttpStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Handler for bucket ACL operations (?acl query parameter). + * Implements PUT operations for bucket Access Control Lists. + */ +public class AclHandler implements BucketOperationHandler { + + private static final Logger LOG = LoggerFactory.getLogger(AclHandler.class); + + @Override + public String getQueryParamName() { + return "acl"; + } + + /** + * Implement acl put. + *

+ * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html + */ + @Override + public Response handlePutRequest( + String bucketName, + InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos) throws IOException, OS3Exception { + + String grantReads = headers.getHeaderString(S3Acl.GRANT_READ); + String grantWrites = headers.getHeaderString(S3Acl.GRANT_WRITE); + String grantReadACP = headers.getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = headers.getHeaderString(S3Acl.GRANT_WRITE_ACP); + String grantFull = headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL); + + try { + OzoneBucket bucket = context.getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner()); + OzoneVolume volume = context.getVolume(); + + List ozoneAclListOnBucket = new ArrayList<>(); + List ozoneAclListOnVolume = new ArrayList<>(); + + if (grantReads == null && grantWrites == null && grantReadACP == null + && grantWriteACP == null && grantFull == null) { + // Handle grants in body + S3BucketAcl putBucketAclRequest = + new PutBucketAclRequestUnmarshaller().readFrom(body); + ozoneAclListOnBucket.addAll( + S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); + ozoneAclListOnVolume.addAll( + S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest)); + } else { + // Handle grants in headers + if (grantReads != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads, + S3Acl.ACLType.READ.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads, + S3Acl.ACLType.READ.getValue())); + } + if (grantWrites != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites, + S3Acl.ACLType.WRITE.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites, + S3Acl.ACLType.WRITE.getValue())); + } + if (grantReadACP != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP, + S3Acl.ACLType.READ_ACP.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP, + S3Acl.ACLType.READ_ACP.getValue())); + } + if (grantWriteACP != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP, + S3Acl.ACLType.WRITE_ACP.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP, + S3Acl.ACLType.WRITE_ACP.getValue())); + } + if (grantFull != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull, + S3Acl.ACLType.FULL_CONTROL.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull, + S3Acl.ACLType.FULL_CONTROL.getValue())); + } + } + + // A put request will reset all previous ACLs on bucket + bucket.setAcl(ozoneAclListOnBucket); + + // A put request will reset input user/group's permission on volume + List acls = bucket.getAcls(); + List aclsToRemoveOnVolume = new ArrayList<>(); + List currentAclsOnVolume = volume.getAcls(); + + // Remove input user/group's permission from Volume first + if (!currentAclsOnVolume.isEmpty()) { + for (OzoneAcl acl : acls) { + if (acl.getAclScope() == ACCESS) { + aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList( + acl.getName(), acl.getType(), currentAclsOnVolume)); + } + } + for (OzoneAcl acl : aclsToRemoveOnVolume) { + volume.removeAcl(acl); + } + } + + // Add new permission on Volume + for (OzoneAcl acl : ozoneAclListOnVolume) { + volume.addAcl(acl); + } + + context.getEndpoint().getMetrics().updatePutAclSuccessStats(startNanos); + return Response.status(HttpStatus.SC_OK).build(); + + } catch (OMException exception) { + context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); + context.auditWriteFailure(S3GAction.PUT_ACL, exception); + if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); + } else if (context.isAccessDenied(exception)) { + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); + } + throw exception; + } catch (OS3Exception ex) { + context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); + context.auditWriteFailure(S3GAction.PUT_ACL, ex); + throw ex; + } + } + + /** + * Convert ACL string to Ozone ACL on bucket. + * + * Example: x-amz-grant-write: id="111122223333", id="555566667777" + */ + private List getAndConvertAclOnBucket(String value, + String permission) + throws OS3Exception { + return parseAndConvertAcl(value, permission, true); + } + + /** + * Convert ACL string to Ozone ACL on volume. + */ + private List getAndConvertAclOnVolume(String value, + String permission) + throws OS3Exception { + return parseAndConvertAcl(value, permission, false); + } + + /** + * Parse ACL string and convert to Ozone ACLs. + * + * This is a common method extracted from getAndConvertAclOnBucket and + * getAndConvertAclOnVolume to reduce code duplication. + * + * @param value the ACL header value (e.g., "id=\"user1\",id=\"user2\"") + * @param permission the S3 permission type (READ, WRITE, etc.) + * @param isBucket true for bucket ACL, false for volume ACL + * @return list of OzoneAcl objects + * @throws OS3Exception if parsing fails or grantee type is not supported + */ + private List parseAndConvertAcl(String value, String permission, + boolean isBucket) + throws OS3Exception { + List ozoneAclList = new ArrayList<>(); + if (StringUtils.isEmpty(value)) { + return ozoneAclList; + } + + String[] subValues = value.split(","); + for (String acl : subValues) { + String[] part = acl.split("="); + if (part.length != 2) { + throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); + } + + S3Acl.ACLIdentityType type = + S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); + if (type == null || !type.isSupported()) { + LOG.warn("S3 grantee {} is null or not supported", part[0]); + throw newError(NOT_IMPLEMENTED, part[0]); + } + + String userId = part[1]; + + if (isBucket) { + // Build ACL on Bucket + EnumSet aclsOnBucket = + S3Acl.getOzoneAclOnBucketFromS3Permission(permission); + ozoneAclList.add(OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, + userId, + OzoneAcl.AclScope.DEFAULT, + aclsOnBucket + )); + ozoneAclList.add(OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, + userId, + ACCESS, + aclsOnBucket + )); + } else { + // Build ACL on Volume + EnumSet aclsOnVolume = + S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); + ozoneAclList.add(OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, + userId, + ACCESS, + aclsOnVolume + )); + } + } + + return ozoneAclList; + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 6b888ce12fcb..3c53562420c4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -34,6 +34,7 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.EnumSet; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -96,6 +97,23 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; + private static final BucketOperationHandlerFactory HANDLER_FACTORY = + new BucketOperationHandlerFactory(); + + @Context + private HttpHeaders headers; + + private BucketEndpointContext context; + + public BucketEndpoint() { + super(); + this.context = new BucketEndpointContext(this); + } + + private BucketEndpointContext getContext() { + return context; + } + /** * Rest endpoint to list objects in a specific bucket. *

@@ -320,6 +338,7 @@ public Response put( auditWriteSuccess(s3GAction); return response; } + String location = createS3Bucket(bucketName); auditWriteSuccess(s3GAction); getMetrics().updateCreateBucketSuccessStats(startNanos); @@ -338,6 +357,18 @@ public Response put( } } + /** + * Map query parameter to corresponding S3GAction for audit logging. + */ + private S3GAction getActionForQueryParam(String queryParam) { + switch (queryParam) { + case "acl": + return S3GAction.PUT_ACL; + default: + return S3GAction.GET_BUCKET; + } + } + public Response listMultipartUploads( String bucketName, String prefix, @@ -582,8 +613,8 @@ public Response putAcl(String bucketName, long startNanos = Time.monotonicNowNanos(); String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ); String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE); - String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_CAP); - String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_CAP); + String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP); String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL); try { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java new file mode 100644 index 000000000000..0cefb0300b26 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import java.io.IOException; +import org.apache.hadoop.ozone.audit.AuditAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; + +/** + * Context object that provides access to BucketEndpoint resources. + * This allows handlers to access endpoint functionality without + * tight coupling to the BucketEndpoint class. + * + * Since BucketEndpoint extends EndpointBase, handlers can access: + * - Bucket and Volume operations + * - Methods inherited from EndpointBase + */ +public class BucketEndpointContext { + + private final BucketEndpoint endpoint; + + public BucketEndpointContext(BucketEndpoint endpoint) { + this.endpoint = endpoint; + } + + /** + * Get the bucket object. + * Delegates to BucketEndpoint's inherited getBucket() from EndpointBase. + * + * @param bucketName the bucket name + * @return OzoneBucket instance + * @throws IOException if bucket cannot be retrieved + * @throws OS3Exception if S3-specific error occurs + */ + public OzoneBucket getBucket(String bucketName) + throws IOException, OS3Exception { + return endpoint.getBucket(bucketName); + } + + /** + * Get the volume object. + * Delegates to BucketEndpoint's inherited getVolume() from EndpointBase. + * + * @return OzoneVolume instance + * @throws IOException if volume cannot be retrieved + * @throws OS3Exception if S3-specific error occurs + */ + public OzoneVolume getVolume() throws IOException, OS3Exception { + return endpoint.getVolume(); + } + + /** + * Check if an exception indicates access denied. + * This checks for OMException.ResultCodes that indicate permission issues. + * + * @param ex the exception to check + * @return true if access is denied + */ + public boolean isAccessDenied(Exception ex) { + // Check if it's an OMException with ACCESS_DENIED result code + if (ex instanceof OMException) { + OMException omEx = (OMException) ex; + return omEx.getResult() == OMException.ResultCodes.PERMISSION_DENIED || + omEx.getResult() == OMException.ResultCodes.ACCESS_DENIED; + } + return false; + } + + /** + * Audit a write operation failure. + * Delegates to BucketEndpoint's inherited auditWriteFailure() from EndpointBase. + * + * @param action the audit action being performed + * @param ex the exception that occurred + */ + public void auditWriteFailure(AuditAction action, Throwable ex) { + endpoint.auditWriteFailure(action, ex); + } + + /** + * Get reference to the endpoint for accessing other methods. + * Use with caution - prefer adding specific methods to this context + * rather than exposing the entire endpoint. + * + * @return BucketEndpoint instance + */ + protected BucketEndpoint getEndpoint() { + return endpoint; + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java new file mode 100644 index 000000000000..b42c59b257c1 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import java.io.IOException; +import java.io.InputStream; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; + +/** + * Interface for handling bucket operations based on query parameters. + * Each implementation handles a specific S3 bucket subresource operation + * (e.g., ?acl, ?lifecycle, ?notification). + */ +public interface BucketOperationHandler { + + /** + * Handle the bucket operation. + * + * @param bucketName the name of the bucket + * @param body the request body stream + * @param headers the HTTP headers + * @param context the endpoint context containing shared dependencies + * @param startNanos the start time in nanoseconds for metrics tracking + * @return HTTP response + * @throws IOException if an I/O error occurs + * @throws OS3Exception if an S3-specific error occurs + */ + Response handlePutRequest( + String bucketName, + InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos + ) throws IOException, OS3Exception; + + /** + * Get the query parameter name this handler is responsible for. + * For example: "acl", "lifecycle", "notification" + * + * @return the query parameter name + */ + String getQueryParamName(); +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java new file mode 100644 index 000000000000..1edb19165ded --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import com.google.common.annotations.VisibleForTesting; +import java.util.HashMap; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Factory class that manages all bucket operation handlers. + * Provides a registry pattern for looking up handlers based on query parameters. + */ +public class BucketOperationHandlerFactory { + + private static final Logger LOG = + LoggerFactory.getLogger(BucketOperationHandlerFactory.class); + + private final Map handlers = new HashMap<>(); + + /** + * Register all available bucket operation handlers. + */ + public BucketOperationHandlerFactory() { + registerDefaultHandlers(); + } + + /** + * Register default handlers for S3 bucket operations. + */ + private void registerDefaultHandlers() { + register(new AclHandler()); + } + + /** + * Register a bucket operation handler. + * + * @param handler the handler to register + */ + @VisibleForTesting + public void register(BucketOperationHandler handler) { + String queryParam = handler.getQueryParamName(); + if (handlers.containsKey(queryParam)) { + LOG.warn("Overwriting existing handler for query parameter: {}", + queryParam); + } + handlers.put(queryParam, handler); + LOG.debug("Registered handler for query parameter: {}", queryParam); + } + + /** + * Get a handler for the specified query parameter. + * + * @param queryParam the query parameter name + * @return the corresponding handler, or null if not found + */ + public BucketOperationHandler getHandler(String queryParam) { + return handlers.get(queryParam); + } + + /** + * Check if a handler exists for the specified query parameter. + * + * @param queryParam the query parameter name + * @return true if a handler exists + */ + public boolean hasHandler(String queryParam) { + return handlers.containsKey(queryParam); + } + + /** + * Find the first supported query parameter that has a non-null value. + * + * This method iterates through all registered handlers and checks if the + * corresponding query parameter has a non-null value in the provided map. + * + * @param queryParams map of query parameter names to their values + * @return the name of the first query parameter that has both a non-null value + * and a registered handler, or null if none found + */ + public String findFirstSupportedQueryParam(Map queryParams) { + if (queryParams == null || queryParams.isEmpty()) { + return null; + } + + // Iterate through registered handlers and find the first one with a value + for (Map.Entry entry : handlers.entrySet()) { + String paramName = entry.getKey(); + String paramValue = queryParams.get(paramName); + + if (paramValue != null) { + return paramName; + } + } + + return null; + } + + /** + * Get all registered query parameter names. + * + * @return set of query parameter names + */ + @VisibleForTesting + public java.util.Set getRegisteredQueryParams() { + return handlers.keySet(); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java index ed1df4e3c671..60f2a33fe319 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java @@ -41,8 +41,8 @@ public final class S3Acl { // ACL put related headers public static final String GRANT_READ = "x-amz-grant-read"; public static final String GRANT_WRITE = "x-amz-grant-write"; - public static final String GRANT_READ_CAP = "x-amz-grant-read-acp"; - public static final String GRANT_WRITE_CAP = "x-amz-grant-write-acp"; + public static final String GRANT_READ_ACP = "x-amz-grant-read-acp"; + public static final String GRANT_WRITE_ACP = "x-amz-grant-write-acp"; public static final String GRANT_FULL_CONTROL = "x-amz-grant-full-control"; // Not supported headers at current stage, may support it in future diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java new file mode 100644 index 000000000000..9e7cc3f3280d --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_OK; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for AclHandler. + */ +public class TestAclHandler { + + private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; + private OzoneClient client; + private BucketEndpointContext context; + private AclHandler aclHandler; + private HttpHeaders headers; + + @BeforeEach + public void setup() throws IOException { + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + headers = mock(HttpHeaders.class); + + BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build(); + + context = new BucketEndpointContext(bucketEndpoint); + aclHandler = new AclHandler(); + } + + @AfterEach + public void clean() throws IOException { + if (client != null) { + client.close(); + } + } + + @Test + public void testGetQueryParamName() { + assertEquals("acl", aclHandler.getQueryParamName(), + "Query param name should be 'acl'"); + } + + @Test + public void testHandlePutRequestWithReadHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithWriteHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithReadAcpHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithWriteAcpHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithFullControlHeader() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithMultipleHeaders() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser1\""); + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn("id=\"testuser2\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL with multiple headers should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithUnsupportedGranteeType() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("uri=\"http://example.com\""); + + long startNanos = System.nanoTime(); + OS3Exception exception = assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + }, "Should throw OS3Exception for unsupported grantee type"); + + assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), + "Should return NOT_IMPLEMENTED for unsupported grantee type"); + } + + @Test + public void testHandlePutRequestWithEmailAddressType() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("emailAddress=\"test@example.com\""); + + long startNanos = System.nanoTime(); + OS3Exception exception = assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + }, "Should throw OS3Exception for email address grantee type"); + + assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), + "Should return NOT_IMPLEMENTED for email address grantee type"); + } + + @Test + public void testHandlePutRequestBucketNotFound() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest("nonexistent-bucket", null, headers, + context, startNanos); + }, "Should throw OS3Exception for non-existent bucket"); + } + + @Test + public void testHandlePutRequestWithBody() throws Exception { + String aclXml = "\n" + + "\n" + + " \n" + + " testowner\n" + + " Test Owner\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " testuser\n" + + " \n" + + " READ\n" + + " \n" + + " \n" + + ""; + + InputStream body = new ByteArrayInputStream( + aclXml.getBytes(StandardCharsets.UTF_8)); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, body, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL with body should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithInvalidHeaderFormat() { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("invalid-format"); + + long startNanos = System.nanoTime(); + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + }, "Should throw OS3Exception for invalid header format"); + } + + @Test + public void testHandlePutRequestWithMultipleGrantees() throws Exception { + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"user1\",id=\"user2\""); + + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL with multiple grantees should return 200 OK"); + } + + @Test + public void testPutAclReplacesExistingAcls() throws Exception { + // Set initial ACL + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"user1\""); + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn(null); + + long startNanos = System.nanoTime(); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); + + // Replace with new ACL + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn(null); + when(headers.getHeaderString(S3Acl.GRANT_WRITE)) + .thenReturn("id=\"user2\""); + + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); + + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should replace existing ACLs"); + } + + @Test + public void testAuditLoggingOnBucketNotFound() throws Exception { + // Create a spy of BucketEndpoint to verify audit logging + BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build()); + + BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + long startNanos = System.nanoTime(); + + // This should throw exception for non-existent bucket + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest("nonexistent-bucket", null, headers, + spyContext, startNanos); + }); + + // Verify that auditWriteFailure was called with PUT_ACL action + // Note: getBucket() wraps OMException as OS3Exception, so we catch OS3Exception + verify(spyEndpoint, times(1)).auditWriteFailure( + eq(S3GAction.PUT_ACL), + any(OS3Exception.class)); + } + + @Test + public void testAuditLoggingOnInvalidArgument() throws Exception { + // Create a spy of BucketEndpoint to verify audit logging + BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build()); + + BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + + // Invalid format will trigger OS3Exception + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("invalid-format"); + + long startNanos = System.nanoTime(); + + assertThrows(OS3Exception.class, () -> { + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, + spyContext, startNanos); + }); + + // Verify that auditWriteFailure was called with PUT_ACL action + verify(spyEndpoint, times(1)).auditWriteFailure( + eq(S3GAction.PUT_ACL), + any(OS3Exception.class)); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java index 1598a48a9892..e53267111009 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java @@ -128,7 +128,7 @@ public void testWrite() throws Exception { @Test public void testReadACP() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); - when(headers.getHeaderString(S3Acl.GRANT_READ_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = bucketEndpoint.put(BUCKET_NAME, null); @@ -143,7 +143,7 @@ public void testReadACP() throws Exception { @Test public void testWriteACP() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); - when(headers.getHeaderString(S3Acl.GRANT_WRITE_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = bucketEndpoint.put(BUCKET_NAME, null); @@ -175,9 +175,9 @@ public void testCombination() throws Exception { .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); - when(headers.getHeaderString(S3Acl.GRANT_READ_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); - when(headers.getHeaderString(S3Acl.GRANT_WRITE_CAP)) + when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java new file mode 100644 index 000000000000..7d27ae23d664 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +import java.io.IOException; +import javax.ws.rs.core.HttpHeaders; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for BucketEndpointContext. + */ +public class TestBucketEndpointContext { + + private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; + private OzoneClient client; + private BucketEndpointContext context; + + @BeforeEach + public void setup() throws IOException { + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + HttpHeaders headers = mock(HttpHeaders.class); + + BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build(); + + context = new BucketEndpointContext(bucketEndpoint); + } + + @AfterEach + public void clean() throws IOException { + if (client != null) { + client.close(); + } + } + + @Test + public void testGetBucket() throws IOException, OS3Exception { + OzoneBucket bucket = context.getBucket(BUCKET_NAME); + assertNotNull(bucket, "Bucket should not be null"); + assertEquals(BUCKET_NAME, bucket.getName(), + "Bucket name should match"); + } + + @Test + public void testGetBucketNotFound() { + assertThrows(OS3Exception.class, () -> { + context.getBucket("nonexistent-bucket"); + }, "Should throw OS3Exception for non-existent bucket"); + } + + @Test + public void testGetVolume() throws IOException, OS3Exception { + OzoneVolume volume = context.getVolume(); + assertNotNull(volume, "Volume should not be null"); + } + + @Test + public void testIsAccessDeniedWithPermissionDenied() { + OMException exception = new OMException("Access denied", + OMException.ResultCodes.PERMISSION_DENIED); + + assertTrue(context.isAccessDenied(exception), + "Should return true for PERMISSION_DENIED"); + } + + @Test + public void testIsAccessDeniedWithAccessDenied() { + OMException exception = new OMException("Access denied", + OMException.ResultCodes.ACCESS_DENIED); + + assertTrue(context.isAccessDenied(exception), + "Should return true for ACCESS_DENIED"); + } + + @Test + public void testIsAccessDeniedWithBucketNotFound() { + OMException exception = new OMException("Bucket not found", + OMException.ResultCodes.BUCKET_NOT_FOUND); + + assertFalse(context.isAccessDenied(exception), + "Should return false for BUCKET_NOT_FOUND"); + } + + @Test + public void testIsAccessDeniedWithKeyNotFound() { + OMException exception = new OMException("Key not found", + OMException.ResultCodes.KEY_NOT_FOUND); + + assertFalse(context.isAccessDenied(exception), + "Should return false for KEY_NOT_FOUND"); + } + + @Test + public void testIsAccessDeniedWithIOException() { + IOException exception = new IOException("I/O error"); + + assertFalse(context.isAccessDenied(exception), + "Should return false for non-OMException"); + } + + @Test + public void testIsAccessDeniedWithNullException() { + assertFalse(context.isAccessDenied(null), + "Should return false for null exception"); + } + + @Test + public void testIsAccessDeniedWithRuntimeException() { + RuntimeException exception = new RuntimeException("Runtime error"); + + assertFalse(context.isAccessDenied(exception), + "Should return false for RuntimeException"); + } + + @Test + public void testGetEndpoint() { + BucketEndpoint endpoint = context.getEndpoint(); + assertNotNull(endpoint, "Endpoint should not be null"); + } + + @Test + public void testContextDelegatesCorrectly() throws IOException, OS3Exception { + // Test that context properly delegates to endpoint methods + OzoneBucket bucket = context.getBucket(BUCKET_NAME); + OzoneVolume volume = context.getVolume(); + + assertNotNull(bucket, "Delegated getBucket should work"); + assertNotNull(volume, "Delegated getVolume should work"); + } + + @Test + public void testIsAccessDeniedWithMultipleResultCodes() { + // Test all OMException result codes to ensure only access-related ones + // return true + + OMException[] accessDeniedExceptions = { + new OMException("", OMException.ResultCodes.PERMISSION_DENIED), + new OMException("", OMException.ResultCodes.ACCESS_DENIED) + }; + + for (OMException ex : accessDeniedExceptions) { + assertTrue(context.isAccessDenied(ex), + "Should return true for " + ex.getResult()); + } + + OMException[] otherExceptions = { + new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND), + new OMException("", OMException.ResultCodes.KEY_NOT_FOUND), + new OMException("", OMException.ResultCodes.VOLUME_NOT_FOUND), + new OMException("", OMException.ResultCodes.INTERNAL_ERROR) + }; + + for (OMException ex : otherExceptions) { + assertFalse(context.isAccessDenied(ex), + "Should return false for " + ex.getResult()); + } + } + + @Test + public void testBucketOperationsWithContext() throws Exception { + // Create a second bucket to test multiple operations + String secondBucket = "test-bucket-2"; + client.getObjectStore().createS3Bucket(secondBucket); + + // Test getting different buckets through context + OzoneBucket bucket1 = context.getBucket(BUCKET_NAME); + OzoneBucket bucket2 = context.getBucket(secondBucket); + + assertNotNull(bucket1, "First bucket should not be null"); + assertNotNull(bucket2, "Second bucket should not be null"); + assertEquals(BUCKET_NAME, bucket1.getName(), + "First bucket name should match"); + assertEquals(secondBucket, bucket2.getName(), + "Second bucket name should match"); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java new file mode 100644 index 000000000000..1aeb8dc85fb4 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.io.InputStream; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for BucketOperationHandlerFactory. + */ +public class TestBucketOperationHandlerFactory { + + private BucketOperationHandlerFactory factory; + + @BeforeEach + public void setup() { + factory = new BucketOperationHandlerFactory(); + } + + @Test + public void testDefaultHandlersRegistered() { + // Verify that the default ACL handler is registered + assertTrue(factory.hasHandler("acl"), + "ACL handler should be registered by default"); + assertNotNull(factory.getHandler("acl"), + "ACL handler should not be null"); + } + + @Test + public void testGetHandlerForAcl() { + BucketOperationHandler handler = factory.getHandler("acl"); + assertNotNull(handler, "ACL handler should exist"); + assertTrue(handler instanceof AclHandler, + "Handler should be an instance of AclHandler"); + assertEquals("acl", handler.getQueryParamName(), + "Handler query param name should be 'acl'"); + } + + @Test + public void testGetHandlerForNonExistentParam() { + BucketOperationHandler handler = factory.getHandler("nonexistent"); + assertNull(handler, "Handler for non-existent param should be null"); + } + + @Test + public void testHasHandlerReturnsTrueForExisting() { + assertTrue(factory.hasHandler("acl"), + "Should return true for existing handler"); + } + + @Test + public void testHasHandlerReturnsFalseForNonExisting() { + assertFalse(factory.hasHandler("nonexistent"), + "Should return false for non-existing handler"); + } + + @Test + public void testRegisterNewHandler() { + // Create a mock handler + BucketOperationHandler mockHandler = new MockBucketOperationHandler("test"); + + // Register the handler + factory.register(mockHandler); + + // Verify registration + assertTrue(factory.hasHandler("test"), + "Newly registered handler should exist"); + assertEquals(mockHandler, factory.getHandler("test"), + "Retrieved handler should be the same instance"); + } + + @Test + public void testRegisterOverwritesExistingHandler() { + // Register a new handler with the same query param as ACL + BucketOperationHandler mockHandler = new MockBucketOperationHandler("acl"); + + factory.register(mockHandler); + + // Verify the handler was overwritten + BucketOperationHandler handler = factory.getHandler("acl"); + assertEquals(mockHandler, handler, + "Handler should be the newly registered one"); + assertTrue(handler instanceof MockBucketOperationHandler, + "Handler should be an instance of MockBucketOperationHandler"); + } + + @Test + public void testGetRegisteredQueryParams() { + // Default should have at least "acl" + assertTrue(factory.getRegisteredQueryParams().contains("acl"), + "Registered query params should contain 'acl'"); + + // Register additional handlers + factory.register(new MockBucketOperationHandler("lifecycle")); + factory.register(new MockBucketOperationHandler("notification")); + + // Verify all are present + assertEquals(3, factory.getRegisteredQueryParams().size(), + "Should have 3 registered handlers"); + assertTrue(factory.getRegisteredQueryParams().contains("lifecycle"), + "Should contain 'lifecycle'"); + assertTrue(factory.getRegisteredQueryParams().contains("notification"), + "Should contain 'notification'"); + } + + @Test + public void testMultipleHandlerRegistration() { + BucketOperationHandler handler1 = new MockBucketOperationHandler("test1"); + BucketOperationHandler handler2 = new MockBucketOperationHandler("test2"); + BucketOperationHandler handler3 = new MockBucketOperationHandler("test3"); + + factory.register(handler1); + factory.register(handler2); + factory.register(handler3); + + assertTrue(factory.hasHandler("test1"), "Handler test1 should exist"); + assertTrue(factory.hasHandler("test2"), "Handler test2 should exist"); + assertTrue(factory.hasHandler("test3"), "Handler test3 should exist"); + + assertEquals(handler1, factory.getHandler("test1")); + assertEquals(handler2, factory.getHandler("test2")); + assertEquals(handler3, factory.getHandler("test3")); + } + + /** + * Mock implementation of BucketOperationHandler for testing. + */ + private static class MockBucketOperationHandler implements BucketOperationHandler { + private final String queryParamName; + + MockBucketOperationHandler(String queryParamName) { + this.queryParamName = queryParamName; + } + + @Override + public Response handlePutRequest(String bucketName, InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos) + throws IOException, OS3Exception { + return Response.ok().build(); + } + + @Override + public String getQueryParamName() { + return queryParamName; + } + } +} From 90bc2592c2dd1f176c88e0c142c34ecaa667667c Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 14:21:07 +0800 Subject: [PATCH 03/36] fix: update CR --- .../hadoop/ozone/s3/endpoint/AclHandler.java | 76 +++--- .../ozone/s3/endpoint/BucketEndpoint.java | 235 ++---------------- .../s3/endpoint/BucketEndpointContext.java | 108 -------- .../s3/endpoint/BucketOperationHandler.java | 32 +-- .../BucketOperationHandlerFactory.java | 124 --------- .../ozone/s3/endpoint/EndpointBuilder.java | 4 + .../ozone/s3/endpoint/TestAclHandler.java | 131 +++++----- .../endpoint/TestBucketEndpointContext.java | 211 ---------------- .../TestBucketOperationHandlerFactory.java | 175 ------------- 9 files changed, 149 insertions(+), 947 deletions(-) delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java index 9d1e05eaa6b9..499d355e831f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; -import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -38,47 +37,59 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; +import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Handler for bucket ACL operations (?acl query parameter). * Implements PUT operations for bucket Access Control Lists. + * + * This handler extends EndpointBase to inherit all required functionality + * (configuration, headers, request context, audit logging, metrics, etc.). */ -public class AclHandler implements BucketOperationHandler { - +public class AclHandler extends EndpointBase implements BucketOperationHandler { + private static final Logger LOG = LoggerFactory.getLogger(AclHandler.class); - - @Override - public String getQueryParamName() { - return "acl"; + + /** + * Determine if this handler should handle the current request. + * @return true if the request has the "acl" query parameter + */ + private boolean shouldHandle() { + return queryParams().get(QueryParams.ACL) != null; } - + /** * Implement acl put. *

* see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html */ @Override - public Response handlePutRequest( - String bucketName, - InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos) throws IOException, OS3Exception { + public Response handlePutRequest(String bucketName, InputStream body) + throws IOException, OS3Exception { - String grantReads = headers.getHeaderString(S3Acl.GRANT_READ); - String grantWrites = headers.getHeaderString(S3Acl.GRANT_WRITE); - String grantReadACP = headers.getHeaderString(S3Acl.GRANT_READ_ACP); - String grantWriteACP = headers.getHeaderString(S3Acl.GRANT_WRITE_ACP); - String grantFull = headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL); + if (!shouldHandle()) { + return null; // Not responsible for this request + } + + long startNanos = Time.monotonicNowNanos(); + S3GAction s3GAction = S3GAction.PUT_ACL; + + String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ); + String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE); + String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP); + String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL); try { - OzoneBucket bucket = context.getBucket(bucketName); - S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner()); - OzoneVolume volume = context.getVolume(); + OzoneBucket bucket = getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); + OzoneVolume volume = getVolume(); List ozoneAclListOnBucket = new ArrayList<>(); List ozoneAclListOnVolume = new ArrayList<>(); @@ -152,25 +163,26 @@ public Response handlePutRequest( volume.addAcl(acl); } - context.getEndpoint().getMetrics().updatePutAclSuccessStats(startNanos); + getMetrics().updatePutAclSuccessStats(startNanos); + auditWriteSuccess(s3GAction); return Response.status(HttpStatus.SC_OK).build(); } catch (OMException exception) { - context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); - context.auditWriteFailure(S3GAction.PUT_ACL, exception); + getMetrics().updatePutAclFailureStats(startNanos); + auditWriteFailure(s3GAction, exception); if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); - } else if (context.isAccessDenied(exception)) { + } else if (isAccessDenied(exception)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); } throw exception; } catch (OS3Exception ex) { - context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); - context.auditWriteFailure(S3GAction.PUT_ACL, ex); + getMetrics().updatePutAclFailureStats(startNanos); + auditWriteFailure(s3GAction, ex); throw ex; } } - + /** * Convert ACL string to Ozone ACL on bucket. * @@ -258,4 +270,10 @@ private List parseAndConvertAcl(String value, String permission, return ozoneAclList; } + + @Override + @PostConstruct + public void init() { + // No initialization needed for AclHandler + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 3c53562420c4..c2ddb126e7f6 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; @@ -25,7 +24,6 @@ import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_KEYS_SHALLOW_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapInQuotes; @@ -33,8 +31,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.EnumSet; -import java.util.HashMap; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -61,11 +59,9 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata; import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject; @@ -77,7 +73,6 @@ import org.apache.hadoop.ozone.s3.util.ContinueToken; import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.s3.util.S3StorageType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; import org.slf4j.Logger; @@ -97,23 +92,11 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; - private static final BucketOperationHandlerFactory HANDLER_FACTORY = - new BucketOperationHandlerFactory(); + private static final List PUT_HANDLERS = + Collections.unmodifiableList(Arrays.asList( + new AclHandler() + )); - @Context - private HttpHeaders headers; - - private BucketEndpointContext context; - - public BucketEndpoint() { - super(); - this.context = new BucketEndpointContext(this); - } - - private BucketEndpointContext getContext() { - return context; - } - /** * Rest endpoint to list objects in a specific bucket. *

@@ -327,18 +310,28 @@ public Response put( @PathParam(BUCKET) String bucketName, InputStream body ) throws IOException, OS3Exception { + + // Chain of responsibility: let each handler try to handle the request + for (BucketOperationHandler handler : PUT_HANDLERS) { + Response response = handler.handlePutRequest(bucketName, body); + if (response != null) { + return response; // Handler handled the request + } + } + + // No handler handled the request, execute default operation: create bucket + return handleCreateBucket(bucketName); + } + + /** + * Default PUT bucket operation (create bucket). + */ + private Response handleCreateBucket(String bucketName) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { - final String aclMarker = queryParams().get(QueryParams.ACL); - if (aclMarker != null) { - s3GAction = S3GAction.PUT_ACL; - Response response = putAcl(bucketName, body); - auditWriteSuccess(s3GAction); - return response; - } - String location = createS3Bucket(bucketName); auditWriteSuccess(s3GAction); getMetrics().updateCreateBucketSuccessStats(startNanos); @@ -357,18 +350,6 @@ public Response put( } } - /** - * Map query parameter to corresponding S3GAction for audit logging. - */ - private S3GAction getActionForQueryParam(String queryParam) { - switch (queryParam) { - case "acl": - return S3GAction.PUT_ACL; - default: - return S3GAction.GET_BUCKET; - } - } - public Response listMultipartUploads( String bucketName, String prefix, @@ -603,174 +584,6 @@ public S3BucketAcl getAcl(String bucketName) } } - /** - * Implement acl put. - *

- * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html - */ - public Response putAcl(String bucketName, - InputStream body) throws IOException, OS3Exception { - long startNanos = Time.monotonicNowNanos(); - String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ); - String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE); - String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP); - String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP); - String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL); - - try { - OzoneBucket bucket = getBucket(bucketName); - S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); - OzoneVolume volume = getVolume(); - - List ozoneAclListOnBucket = new ArrayList<>(); - List ozoneAclListOnVolume = new ArrayList<>(); - - if (grantReads == null && grantWrites == null && grantReadACP == null - && grantWriteACP == null && grantFull == null) { - S3BucketAcl putBucketAclRequest = - new PutBucketAclRequestUnmarshaller().readFrom(body); - // Handle grants in body - ozoneAclListOnBucket.addAll( - S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); - ozoneAclListOnVolume.addAll( - S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest)); - } else { - - // Handle grants in headers - if (grantReads != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads, - S3Acl.ACLType.READ.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads, - S3Acl.ACLType.READ.getValue())); - } - if (grantWrites != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites, - S3Acl.ACLType.WRITE.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites, - S3Acl.ACLType.WRITE.getValue())); - } - if (grantReadACP != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP, - S3Acl.ACLType.READ_ACP.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP, - S3Acl.ACLType.READ_ACP.getValue())); - } - if (grantWriteACP != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP, - S3Acl.ACLType.WRITE_ACP.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP, - S3Acl.ACLType.WRITE_ACP.getValue())); - } - if (grantFull != null) { - ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull, - S3Acl.ACLType.FULL_CONTROL.getValue())); - ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull, - S3Acl.ACLType.FULL_CONTROL.getValue())); - } - } - // A put request will reset all previous ACLs on bucket - bucket.setAcl(ozoneAclListOnBucket); - // A put request will reset input user/group's permission on volume - List acls = bucket.getAcls(); - List aclsToRemoveOnVolume = new ArrayList<>(); - List currentAclsOnVolume = volume.getAcls(); - // Remove input user/group's permission from Volume first - if (!currentAclsOnVolume.isEmpty()) { - for (OzoneAcl acl : acls) { - if (acl.getAclScope() == ACCESS) { - aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList( - acl.getName(), acl.getType(), currentAclsOnVolume)); - } - } - for (OzoneAcl acl : aclsToRemoveOnVolume) { - volume.removeAcl(acl); - } - } - // Add new permission on Volume - for (OzoneAcl acl : ozoneAclListOnVolume) { - volume.addAcl(acl); - } - } catch (OMException exception) { - getMetrics().updatePutAclFailureStats(startNanos); - auditWriteFailure(S3GAction.PUT_ACL, exception); - if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); - } else if (isAccessDenied(exception)) { - throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); - } - throw exception; - } catch (OS3Exception ex) { - getMetrics().updatePutAclFailureStats(startNanos); - throw ex; - } - getMetrics().updatePutAclSuccessStats(startNanos); - return Response.status(HttpStatus.SC_OK).build(); - } - - /** - * Example: x-amz-grant-write: \ - * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", \ - * id="555566667777". - */ - private List getAndConvertAclOnBucket(String value, - String permission) - throws OS3Exception { - List ozoneAclList = new ArrayList<>(); - if (StringUtils.isEmpty(value)) { - return ozoneAclList; - } - String[] subValues = value.split(","); - for (String acl : subValues) { - String[] part = acl.split("="); - if (part.length != 2) { - throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); - } - S3Acl.ACLIdentityType type = - S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); - if (type == null || !type.isSupported()) { - LOG.warn("S3 grantee {} is null or not supported", part[0]); - throw newError(NOT_IMPLEMENTED, part[0]); - } - // Build ACL on Bucket - EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission); - OzoneAcl defaultOzoneAcl = OzoneAcl.of( - IAccessAuthorizer.ACLIdentityType.USER, part[1], OzoneAcl.AclScope.DEFAULT, aclsOnBucket - ); - OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnBucket); - ozoneAclList.add(defaultOzoneAcl); - ozoneAclList.add(accessOzoneAcl); - } - return ozoneAclList; - } - - private List getAndConvertAclOnVolume(String value, - String permission) - throws OS3Exception { - List ozoneAclList = new ArrayList<>(); - if (StringUtils.isEmpty(value)) { - return ozoneAclList; - } - String[] subValues = value.split(","); - for (String acl : subValues) { - String[] part = acl.split("="); - if (part.length != 2) { - throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); - } - S3Acl.ACLIdentityType type = - S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); - if (type == null || !type.isSupported()) { - LOG.warn("S3 grantee {} is null or not supported", part[0]); - throw newError(NOT_IMPLEMENTED, part[0]); - } - // Build ACL on Volume - EnumSet aclsOnVolume = - S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); - OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnVolume); - ozoneAclList.add(accessOzoneAcl); - } - return ozoneAclList; - } - private void addKey(ListObjectResponse response, OzoneKey next) { KeyMetadata keyMetadata = new KeyMetadata(); keyMetadata.setKey(EncodingTypeObject.createNullable(next.getName(), diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java deleted file mode 100644 index 0cefb0300b26..000000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import java.io.IOException; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -/** - * Context object that provides access to BucketEndpoint resources. - * This allows handlers to access endpoint functionality without - * tight coupling to the BucketEndpoint class. - * - * Since BucketEndpoint extends EndpointBase, handlers can access: - * - Bucket and Volume operations - * - Methods inherited from EndpointBase - */ -public class BucketEndpointContext { - - private final BucketEndpoint endpoint; - - public BucketEndpointContext(BucketEndpoint endpoint) { - this.endpoint = endpoint; - } - - /** - * Get the bucket object. - * Delegates to BucketEndpoint's inherited getBucket() from EndpointBase. - * - * @param bucketName the bucket name - * @return OzoneBucket instance - * @throws IOException if bucket cannot be retrieved - * @throws OS3Exception if S3-specific error occurs - */ - public OzoneBucket getBucket(String bucketName) - throws IOException, OS3Exception { - return endpoint.getBucket(bucketName); - } - - /** - * Get the volume object. - * Delegates to BucketEndpoint's inherited getVolume() from EndpointBase. - * - * @return OzoneVolume instance - * @throws IOException if volume cannot be retrieved - * @throws OS3Exception if S3-specific error occurs - */ - public OzoneVolume getVolume() throws IOException, OS3Exception { - return endpoint.getVolume(); - } - - /** - * Check if an exception indicates access denied. - * This checks for OMException.ResultCodes that indicate permission issues. - * - * @param ex the exception to check - * @return true if access is denied - */ - public boolean isAccessDenied(Exception ex) { - // Check if it's an OMException with ACCESS_DENIED result code - if (ex instanceof OMException) { - OMException omEx = (OMException) ex; - return omEx.getResult() == OMException.ResultCodes.PERMISSION_DENIED || - omEx.getResult() == OMException.ResultCodes.ACCESS_DENIED; - } - return false; - } - - /** - * Audit a write operation failure. - * Delegates to BucketEndpoint's inherited auditWriteFailure() from EndpointBase. - * - * @param action the audit action being performed - * @param ex the exception that occurred - */ - public void auditWriteFailure(AuditAction action, Throwable ex) { - endpoint.auditWriteFailure(action, ex); - } - - /** - * Get reference to the endpoint for accessing other methods. - * Use with caution - prefer adding specific methods to this context - * rather than exposing the entire endpoint. - * - * @return BucketEndpoint instance - */ - protected BucketEndpoint getEndpoint() { - return endpoint; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java index b42c59b257c1..31d771750bae 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java @@ -19,42 +19,30 @@ import java.io.IOException; import java.io.InputStream; -import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.s3.exception.OS3Exception; /** - * Interface for handling bucket operations based on query parameters. + * Interface for handling bucket operations using chain of responsibility pattern. * Each implementation handles a specific S3 bucket subresource operation * (e.g., ?acl, ?lifecycle, ?notification). + * + * Implementations should extend EndpointBase to inherit all required functionality + * (configuration, headers, request context, audit logging, metrics, etc.). */ public interface BucketOperationHandler { /** - * Handle the bucket operation. + * Handle the bucket PUT operation if this handler is responsible for it. + * The handler inspects the request (query parameters, headers, etc.) to determine + * if it should handle the request. * * @param bucketName the name of the bucket * @param body the request body stream - * @param headers the HTTP headers - * @param context the endpoint context containing shared dependencies - * @param startNanos the start time in nanoseconds for metrics tracking - * @return HTTP response + * @return Response if this handler handles the request, null otherwise * @throws IOException if an I/O error occurs * @throws OS3Exception if an S3-specific error occurs */ - Response handlePutRequest( - String bucketName, - InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos - ) throws IOException, OS3Exception; - - /** - * Get the query parameter name this handler is responsible for. - * For example: "acl", "lifecycle", "notification" - * - * @return the query parameter name - */ - String getQueryParamName(); + Response handlePutRequest(String bucketName, InputStream body) + throws IOException, OS3Exception; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java deleted file mode 100644 index 1edb19165ded..000000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import com.google.common.annotations.VisibleForTesting; -import java.util.HashMap; -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory class that manages all bucket operation handlers. - * Provides a registry pattern for looking up handlers based on query parameters. - */ -public class BucketOperationHandlerFactory { - - private static final Logger LOG = - LoggerFactory.getLogger(BucketOperationHandlerFactory.class); - - private final Map handlers = new HashMap<>(); - - /** - * Register all available bucket operation handlers. - */ - public BucketOperationHandlerFactory() { - registerDefaultHandlers(); - } - - /** - * Register default handlers for S3 bucket operations. - */ - private void registerDefaultHandlers() { - register(new AclHandler()); - } - - /** - * Register a bucket operation handler. - * - * @param handler the handler to register - */ - @VisibleForTesting - public void register(BucketOperationHandler handler) { - String queryParam = handler.getQueryParamName(); - if (handlers.containsKey(queryParam)) { - LOG.warn("Overwriting existing handler for query parameter: {}", - queryParam); - } - handlers.put(queryParam, handler); - LOG.debug("Registered handler for query parameter: {}", queryParam); - } - - /** - * Get a handler for the specified query parameter. - * - * @param queryParam the query parameter name - * @return the corresponding handler, or null if not found - */ - public BucketOperationHandler getHandler(String queryParam) { - return handlers.get(queryParam); - } - - /** - * Check if a handler exists for the specified query parameter. - * - * @param queryParam the query parameter name - * @return true if a handler exists - */ - public boolean hasHandler(String queryParam) { - return handlers.containsKey(queryParam); - } - - /** - * Find the first supported query parameter that has a non-null value. - * - * This method iterates through all registered handlers and checks if the - * corresponding query parameter has a non-null value in the provided map. - * - * @param queryParams map of query parameter names to their values - * @return the name of the first query parameter that has both a non-null value - * and a registered handler, or null if none found - */ - public String findFirstSupportedQueryParam(Map queryParams) { - if (queryParams == null || queryParams.isEmpty()) { - return null; - } - - // Iterate through registered handlers and find the first one with a value - for (Map.Entry entry : handlers.entrySet()) { - String paramName = entry.getKey(); - String paramValue = queryParams.get(paramName); - - if (paramValue != null) { - return paramName; - } - } - - return null; - } - - /** - * Get all registered query parameter names. - * - * @return set of query parameter names - */ - @VisibleForTesting - public java.util.Set getRegisteredQueryParams() { - return handlers.keySet(); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java index 13db3962a89b..07132839a6fb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java @@ -150,6 +150,10 @@ public static EndpointBuilder newBucketEndpointBuilder() { return new EndpointBuilder<>(BucketEndpoint::new); } + public static EndpointBuilder newAclHandlerBuilder() { + return new EndpointBuilder<>(AclHandler::new); + } + public static EndpointBuilder newObjectEndpointBuilder() { return new EndpointBuilder<>(ObjectEndpoint::new); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java index 9e7cc3f3280d..f59c2d80e3f5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java @@ -20,6 +20,8 @@ import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; import static java.net.HttpURLConnection.HTTP_OK; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; @@ -51,7 +53,6 @@ public class TestAclHandler { private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; private OzoneClient client; - private BucketEndpointContext context; private AclHandler aclHandler; private HttpHeaders headers; @@ -62,13 +63,11 @@ public void setup() throws IOException { headers = mock(HttpHeaders.class); - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + // Build AclHandler using EndpointBuilder since it extends EndpointBase + aclHandler = EndpointBuilder.newAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build(); - - context = new BucketEndpointContext(bucketEndpoint); - aclHandler = new AclHandler(); } @AfterEach @@ -79,19 +78,37 @@ public void clean() throws IOException { } @Test - public void testGetQueryParamName() { - assertEquals("acl", aclHandler.getQueryParamName(), - "Query param name should be 'acl'"); + public void testHandlePutRequestWithAclQueryParam() throws Exception { + // Set up query parameter to indicate ACL operation + aclHandler.queryParamsForTest().set("acl", ""); + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + + assertNotNull(response, "Handler should handle request with ?acl param"); + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithoutAclQueryParam() throws Exception { + // No "acl" query parameter - handler should not handle request + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + + assertNull(response, "Handler should return null without ?acl param"); } @Test public void testHandlePutRequestWithReadHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -99,12 +116,11 @@ public void testHandlePutRequestWithReadHeader() throws Exception { @Test public void testHandlePutRequestWithWriteHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -112,12 +128,11 @@ public void testHandlePutRequestWithWriteHeader() throws Exception { @Test public void testHandlePutRequestWithReadAcpHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -125,12 +140,11 @@ public void testHandlePutRequestWithReadAcpHeader() throws Exception { @Test public void testHandlePutRequestWithWriteAcpHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -138,12 +152,11 @@ public void testHandlePutRequestWithWriteAcpHeader() throws Exception { @Test public void testHandlePutRequestWithFullControlHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -151,14 +164,13 @@ public void testHandlePutRequestWithFullControlHeader() throws Exception { @Test public void testHandlePutRequestWithMultipleHeaders() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser2\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with multiple headers should return 200 OK"); @@ -166,13 +178,12 @@ public void testHandlePutRequestWithMultipleHeaders() throws Exception { @Test public void testHandlePutRequestWithUnsupportedGranteeType() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("uri=\"http://example.com\""); - long startNanos = System.nanoTime(); OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); }, "Should throw OS3Exception for unsupported grantee type"); assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), @@ -181,13 +192,12 @@ public void testHandlePutRequestWithUnsupportedGranteeType() { @Test public void testHandlePutRequestWithEmailAddressType() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("emailAddress=\"test@example.com\""); - long startNanos = System.nanoTime(); OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); }, "Should throw OS3Exception for email address grantee type"); assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), @@ -196,18 +206,18 @@ public void testHandlePutRequestWithEmailAddressType() { @Test public void testHandlePutRequestBucketNotFound() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest("nonexistent-bucket", null, headers, - context, startNanos); + aclHandler.handlePutRequest("nonexistent-bucket", null); }, "Should throw OS3Exception for non-existent bucket"); } @Test public void testHandlePutRequestWithBody() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); String aclXml = "\n" + "\n" + " \n" + @@ -228,9 +238,7 @@ public void testHandlePutRequestWithBody() throws Exception { InputStream body = new ByteArrayInputStream( aclXml.getBytes(StandardCharsets.UTF_8)); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, body, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, body); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with body should return 200 OK"); @@ -238,24 +246,22 @@ public void testHandlePutRequestWithBody() throws Exception { @Test public void testHandlePutRequestWithInvalidHeaderFormat() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); - long startNanos = System.nanoTime(); assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); }, "Should throw OS3Exception for invalid header format"); } @Test public void testHandlePutRequestWithMultipleGrantees() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\",id=\"user2\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with multiple grantees should return 200 OK"); @@ -263,15 +269,15 @@ public void testHandlePutRequestWithMultipleGrantees() throws Exception { @Test public void testPutAclReplacesExistingAcls() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); + // Set initial ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn(null); - long startNanos = System.nanoTime(); - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); // Replace with new ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) @@ -279,8 +285,7 @@ public void testPutAclReplacesExistingAcls() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"user2\""); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should replace existing ACLs"); @@ -288,55 +293,47 @@ public void testPutAclReplacesExistingAcls() throws Exception { @Test public void testAuditLoggingOnBucketNotFound() throws Exception { - // Create a spy of BucketEndpoint to verify audit logging - BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + // Create a spy of AclHandler to verify audit logging + AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build()); - BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); - + spyHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - // This should throw exception for non-existent bucket assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest("nonexistent-bucket", null, headers, - spyContext, startNanos); + spyHandler.handlePutRequest("nonexistent-bucket", null); }); // Verify that auditWriteFailure was called with PUT_ACL action - // Note: getBucket() wraps OMException as OS3Exception, so we catch OS3Exception - verify(spyEndpoint, times(1)).auditWriteFailure( + verify(spyHandler, times(1)).auditWriteFailure( eq(S3GAction.PUT_ACL), any(OS3Exception.class)); } @Test public void testAuditLoggingOnInvalidArgument() throws Exception { - // Create a spy of BucketEndpoint to verify audit logging - BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + // Create a spy of AclHandler to verify audit logging + AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build()); - BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + spyHandler.queryParamsForTest().set("acl", ""); // Invalid format will trigger OS3Exception when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); - long startNanos = System.nanoTime(); - assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, - spyContext, startNanos); + spyHandler.handlePutRequest(BUCKET_NAME, null); }); // Verify that auditWriteFailure was called with PUT_ACL action - verify(spyEndpoint, times(1)).auditWriteFailure( + verify(spyHandler, times(1)).auditWriteFailure( eq(S3GAction.PUT_ACL), any(OS3Exception.class)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java deleted file mode 100644 index 7d27ae23d664..000000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; - -import java.io.IOException; -import javax.ws.rs.core.HttpHeaders; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -/** - * Test class for BucketEndpointContext. - */ -public class TestBucketEndpointContext { - - private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; - private OzoneClient client; - private BucketEndpointContext context; - - @BeforeEach - public void setup() throws IOException { - client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket(BUCKET_NAME); - - HttpHeaders headers = mock(HttpHeaders.class); - - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() - .setClient(client) - .setHeaders(headers) - .build(); - - context = new BucketEndpointContext(bucketEndpoint); - } - - @AfterEach - public void clean() throws IOException { - if (client != null) { - client.close(); - } - } - - @Test - public void testGetBucket() throws IOException, OS3Exception { - OzoneBucket bucket = context.getBucket(BUCKET_NAME); - assertNotNull(bucket, "Bucket should not be null"); - assertEquals(BUCKET_NAME, bucket.getName(), - "Bucket name should match"); - } - - @Test - public void testGetBucketNotFound() { - assertThrows(OS3Exception.class, () -> { - context.getBucket("nonexistent-bucket"); - }, "Should throw OS3Exception for non-existent bucket"); - } - - @Test - public void testGetVolume() throws IOException, OS3Exception { - OzoneVolume volume = context.getVolume(); - assertNotNull(volume, "Volume should not be null"); - } - - @Test - public void testIsAccessDeniedWithPermissionDenied() { - OMException exception = new OMException("Access denied", - OMException.ResultCodes.PERMISSION_DENIED); - - assertTrue(context.isAccessDenied(exception), - "Should return true for PERMISSION_DENIED"); - } - - @Test - public void testIsAccessDeniedWithAccessDenied() { - OMException exception = new OMException("Access denied", - OMException.ResultCodes.ACCESS_DENIED); - - assertTrue(context.isAccessDenied(exception), - "Should return true for ACCESS_DENIED"); - } - - @Test - public void testIsAccessDeniedWithBucketNotFound() { - OMException exception = new OMException("Bucket not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); - - assertFalse(context.isAccessDenied(exception), - "Should return false for BUCKET_NOT_FOUND"); - } - - @Test - public void testIsAccessDeniedWithKeyNotFound() { - OMException exception = new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); - - assertFalse(context.isAccessDenied(exception), - "Should return false for KEY_NOT_FOUND"); - } - - @Test - public void testIsAccessDeniedWithIOException() { - IOException exception = new IOException("I/O error"); - - assertFalse(context.isAccessDenied(exception), - "Should return false for non-OMException"); - } - - @Test - public void testIsAccessDeniedWithNullException() { - assertFalse(context.isAccessDenied(null), - "Should return false for null exception"); - } - - @Test - public void testIsAccessDeniedWithRuntimeException() { - RuntimeException exception = new RuntimeException("Runtime error"); - - assertFalse(context.isAccessDenied(exception), - "Should return false for RuntimeException"); - } - - @Test - public void testGetEndpoint() { - BucketEndpoint endpoint = context.getEndpoint(); - assertNotNull(endpoint, "Endpoint should not be null"); - } - - @Test - public void testContextDelegatesCorrectly() throws IOException, OS3Exception { - // Test that context properly delegates to endpoint methods - OzoneBucket bucket = context.getBucket(BUCKET_NAME); - OzoneVolume volume = context.getVolume(); - - assertNotNull(bucket, "Delegated getBucket should work"); - assertNotNull(volume, "Delegated getVolume should work"); - } - - @Test - public void testIsAccessDeniedWithMultipleResultCodes() { - // Test all OMException result codes to ensure only access-related ones - // return true - - OMException[] accessDeniedExceptions = { - new OMException("", OMException.ResultCodes.PERMISSION_DENIED), - new OMException("", OMException.ResultCodes.ACCESS_DENIED) - }; - - for (OMException ex : accessDeniedExceptions) { - assertTrue(context.isAccessDenied(ex), - "Should return true for " + ex.getResult()); - } - - OMException[] otherExceptions = { - new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND), - new OMException("", OMException.ResultCodes.KEY_NOT_FOUND), - new OMException("", OMException.ResultCodes.VOLUME_NOT_FOUND), - new OMException("", OMException.ResultCodes.INTERNAL_ERROR) - }; - - for (OMException ex : otherExceptions) { - assertFalse(context.isAccessDenied(ex), - "Should return false for " + ex.getResult()); - } - } - - @Test - public void testBucketOperationsWithContext() throws Exception { - // Create a second bucket to test multiple operations - String secondBucket = "test-bucket-2"; - client.getObjectStore().createS3Bucket(secondBucket); - - // Test getting different buckets through context - OzoneBucket bucket1 = context.getBucket(BUCKET_NAME); - OzoneBucket bucket2 = context.getBucket(secondBucket); - - assertNotNull(bucket1, "First bucket should not be null"); - assertNotNull(bucket2, "Second bucket should not be null"); - assertEquals(BUCKET_NAME, bucket1.getName(), - "First bucket name should match"); - assertEquals(secondBucket, bucket2.getName(), - "Second bucket name should match"); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java deleted file mode 100644 index 1aeb8dc85fb4..000000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.io.IOException; -import java.io.InputStream; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -/** - * Test class for BucketOperationHandlerFactory. - */ -public class TestBucketOperationHandlerFactory { - - private BucketOperationHandlerFactory factory; - - @BeforeEach - public void setup() { - factory = new BucketOperationHandlerFactory(); - } - - @Test - public void testDefaultHandlersRegistered() { - // Verify that the default ACL handler is registered - assertTrue(factory.hasHandler("acl"), - "ACL handler should be registered by default"); - assertNotNull(factory.getHandler("acl"), - "ACL handler should not be null"); - } - - @Test - public void testGetHandlerForAcl() { - BucketOperationHandler handler = factory.getHandler("acl"); - assertNotNull(handler, "ACL handler should exist"); - assertTrue(handler instanceof AclHandler, - "Handler should be an instance of AclHandler"); - assertEquals("acl", handler.getQueryParamName(), - "Handler query param name should be 'acl'"); - } - - @Test - public void testGetHandlerForNonExistentParam() { - BucketOperationHandler handler = factory.getHandler("nonexistent"); - assertNull(handler, "Handler for non-existent param should be null"); - } - - @Test - public void testHasHandlerReturnsTrueForExisting() { - assertTrue(factory.hasHandler("acl"), - "Should return true for existing handler"); - } - - @Test - public void testHasHandlerReturnsFalseForNonExisting() { - assertFalse(factory.hasHandler("nonexistent"), - "Should return false for non-existing handler"); - } - - @Test - public void testRegisterNewHandler() { - // Create a mock handler - BucketOperationHandler mockHandler = new MockBucketOperationHandler("test"); - - // Register the handler - factory.register(mockHandler); - - // Verify registration - assertTrue(factory.hasHandler("test"), - "Newly registered handler should exist"); - assertEquals(mockHandler, factory.getHandler("test"), - "Retrieved handler should be the same instance"); - } - - @Test - public void testRegisterOverwritesExistingHandler() { - // Register a new handler with the same query param as ACL - BucketOperationHandler mockHandler = new MockBucketOperationHandler("acl"); - - factory.register(mockHandler); - - // Verify the handler was overwritten - BucketOperationHandler handler = factory.getHandler("acl"); - assertEquals(mockHandler, handler, - "Handler should be the newly registered one"); - assertTrue(handler instanceof MockBucketOperationHandler, - "Handler should be an instance of MockBucketOperationHandler"); - } - - @Test - public void testGetRegisteredQueryParams() { - // Default should have at least "acl" - assertTrue(factory.getRegisteredQueryParams().contains("acl"), - "Registered query params should contain 'acl'"); - - // Register additional handlers - factory.register(new MockBucketOperationHandler("lifecycle")); - factory.register(new MockBucketOperationHandler("notification")); - - // Verify all are present - assertEquals(3, factory.getRegisteredQueryParams().size(), - "Should have 3 registered handlers"); - assertTrue(factory.getRegisteredQueryParams().contains("lifecycle"), - "Should contain 'lifecycle'"); - assertTrue(factory.getRegisteredQueryParams().contains("notification"), - "Should contain 'notification'"); - } - - @Test - public void testMultipleHandlerRegistration() { - BucketOperationHandler handler1 = new MockBucketOperationHandler("test1"); - BucketOperationHandler handler2 = new MockBucketOperationHandler("test2"); - BucketOperationHandler handler3 = new MockBucketOperationHandler("test3"); - - factory.register(handler1); - factory.register(handler2); - factory.register(handler3); - - assertTrue(factory.hasHandler("test1"), "Handler test1 should exist"); - assertTrue(factory.hasHandler("test2"), "Handler test2 should exist"); - assertTrue(factory.hasHandler("test3"), "Handler test3 should exist"); - - assertEquals(handler1, factory.getHandler("test1")); - assertEquals(handler2, factory.getHandler("test2")); - assertEquals(handler3, factory.getHandler("test3")); - } - - /** - * Mock implementation of BucketOperationHandler for testing. - */ - private static class MockBucketOperationHandler implements BucketOperationHandler { - private final String queryParamName; - - MockBucketOperationHandler(String queryParamName) { - this.queryParamName = queryParamName; - } - - @Override - public Response handlePutRequest(String bucketName, InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos) - throws IOException, OS3Exception { - return Response.ok().build(); - } - - @Override - public String getQueryParamName() { - return queryParamName; - } - } -} From caa72cfb7f6edf74d8cccc0e5427aa2681d08739 Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 14:45:28 +0800 Subject: [PATCH 04/36] fix: checkstyle fix --- .../java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java index 499d355e831f..1252df4db5e8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; +import javax.annotation.PostConstruct; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -41,7 +42,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; -import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From 2cedbbfbb25b2a61daa50237e50b39e4f36a81b6 Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 16:36:38 +0800 Subject: [PATCH 05/36] fix: test update --- .../ozone/s3/endpoint/BucketEndpoint.java | 17 ++++++++++------- .../hadoop/ozone/s3/endpoint/EndpointBase.java | 13 +++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index c2ddb126e7f6..d7a583323987 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -31,8 +31,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -92,10 +90,7 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; - private static final List PUT_HANDLERS = - Collections.unmodifiableList(Arrays.asList( - new AclHandler() - )); + private List putHandlers; /** * Rest endpoint to list objects in a specific bucket. @@ -312,7 +307,7 @@ public Response put( ) throws IOException, OS3Exception { // Chain of responsibility: let each handler try to handle the request - for (BucketOperationHandler handler : PUT_HANDLERS) { + for (BucketOperationHandler handler : putHandlers) { Response response = handler.handlePutRequest(bucketName, body); if (response != null) { return response; // Handler handled the request @@ -610,5 +605,13 @@ public void init() { maxKeysLimit = getOzoneConfiguration().getInt( OZONE_S3G_LIST_MAX_KEYS_LIMIT, OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT); + + // Initialize PUT handlers + AclHandler aclHandler = new AclHandler(); + copyDependenciesTo(aclHandler); + aclHandler.initialization(); + + putHandlers = new ArrayList<>(); + putHandlers.add(aclHandler); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index dbc91c1e55e7..ce13f7545443 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -543,6 +543,19 @@ void setOzoneConfiguration(OzoneConfiguration conf) { ozoneConfiguration = conf; } + /** + * Copy dependencies from this endpoint to another endpoint. + * Used for initializing handler instances. + */ + protected void copyDependenciesTo(EndpointBase target) { + target.setClient(this.client); + target.setOzoneConfiguration(this.ozoneConfiguration); + target.setContext(this.context); + target.setHeaders(this.headers); + target.setRequestIdentifier(this.requestIdentifier); + target.setSignatureInfo(this.signatureInfo); + } + protected OzoneConfiguration getOzoneConfiguration() { return ozoneConfiguration; } From 18f94a51dd36c7e4ab5d0eee2388b33a70f42ef2 Mon Sep 17 00:00:00 2001 From: Priyesh Karatha <35779060+priyeshkaratha@users.noreply.github.com> Date: Thu, 18 Dec 2025 18:39:58 +0530 Subject: [PATCH 06/36] HDDS-14010. [Recon] Endpoint to retrieve pending deletion metrics from DataNodes, SCM, and OM. (#9413) --- .../src/main/resources/ozone-default.xml | 18 + .../dist/src/main/compose/ozone/docker-config | 2 +- .../TestStorageDistributionEndpoint.java | 190 ++++++++-- .../recon/MetricsServiceProviderFactory.java | 30 +- .../ozone/recon/ReconServerConfigKeys.java | 12 +- .../apache/hadoop/ozone/recon/ReconUtils.java | 30 ++ .../recon/api/DataNodeMetricsService.java | 353 ++++++++++++++++++ .../recon/api/PendingDeletionEndpoint.java | 110 ++++++ .../recon/api/ReconGlobalMetricsService.java | 31 +- .../types/DataNodeMetricsServiceResponse.java | 130 +++++++ .../types/DatanodePendingDeletionMetrics.java | 59 +++ .../recon/api/types/ScmPendingDeletion.java | 58 +++ .../recon/spi/MetricsServiceProvider.java | 5 +- .../spi/impl/JmxServiceProviderImpl.java | 117 ++++++ .../impl/PrometheusServiceProviderImpl.java | 39 +- .../tasks/DataNodeMetricsCollectionTask.java | 87 +++++ 16 files changed, 1194 insertions(+), 77 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/DataNodeMetricsService.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DataNodeMetricsServiceResponse.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodePendingDeletionMetrics.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ScmPendingDeletion.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/JmxServiceProviderImpl.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DataNodeMetricsCollectionTask.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index aed31414abfd..7a67f27cc49e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3504,6 +3504,24 @@ If the buffer overflows, task reinitialization will be triggered. + + ozone.recon.dn.metrics.collection.minimum.api.delay + 30s + OZONE, RECON, DN + + Minimum delay in API to start a new task for Jmx collection. + It behaves like a rate limiter to avoid unnecessary task creation. + + + + ozone.recon.dn.metrics.collection.timeout + 10m + OZONE, RECON, DN + + Maximum time taken for the api to complete. + If it exceeds pending tasks will be cancelled. + + ozone.scm.datanode.admin.monitor.interval 30s diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index 738495873bfa..0631cba616d4 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -58,7 +58,7 @@ OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true OZONE-SITE.XML_ozone.fs.hsync.enabled=true - +OZONE-SITE.XML_ozone.recon.dn.metrics.collection.minimum.api.delay=5s OZONE_CONF_DIR=/etc/hadoop OZONE_LOG_DIR=/var/log/hadoop diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java index c43958fe1549..5ac779d6596b 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java @@ -22,19 +22,23 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.recon.TestReconEndpointUtil.getReconWebAddress; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.fasterxml.jackson.databind.ObjectMapper; import java.time.Duration; import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -46,6 +50,8 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.HddsDatanodeService; @@ -66,6 +72,9 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.recon.api.DataNodeMetricsService; +import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; +import org.apache.hadoop.ozone.recon.api.types.ScmPendingDeletion; import org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.ozone.test.GenericTestUtils; @@ -100,6 +109,7 @@ public class TestStorageDistributionEndpoint { private static final ObjectMapper MAPPER = new ObjectMapper(); private static final String STORAGE_DIST_ENDPOINT = "/api/v1/storageDistribution"; + private static final String PENDING_DELETION_ENDPOINT = "/api/v1/pendingDeletion"; static List replicationConfigs() { return Collections.singletonList( @@ -110,17 +120,14 @@ static List replicationConfigs() { @BeforeAll public static void setup() throws Exception { conf = new OzoneConfiguration(); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setLong(OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, 1L); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 50, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 50, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.set(ReconServerConfigKeys.OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY, "5s"); // Enhanced SCM configuration for faster block deletion processing ScmConfig scmConfig = conf.getObject(ScmConfig.class); @@ -129,18 +136,9 @@ public static void setup() throws Exception { conf.set(HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "0s"); // Enhanced DataNode configuration to move pending deletion from SCM to DN faster - DatanodeConfiguration dnConf = - conf.getObject(DatanodeConfiguration.class); - dnConf.setBlockDeletionInterval(Duration.ofMillis(100)); - // Increase block delete queue limit to allow more queued commands on DN - dnConf.setBlockDeleteQueueLimit(50); - // Reduce the interval for delete command worker processing - dnConf.setBlockDeleteCommandWorkerInterval(Duration.ofMillis(100)); - // Increase blocks deleted per interval to speed up deletion - dnConf.setBlockDeletionLimit(5000); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setBlockDeletionInterval(Duration.ofMillis(30000)); conf.setFromObject(dnConf); - // Increase DN delete threads for faster parallel processing - conf.setInt("ozone.datanode.block.delete.threads.max", 10); recon = new ReconService(conf); cluster = MiniOzoneCluster.newBuilder(conf) @@ -190,19 +188,133 @@ public void testStorageDistributionEndpoint(ReplicationConfig replicationConfig) } } waitForKeysCreated(replicationConfig); - Thread.sleep(10000); - StringBuilder urlBuilder = new StringBuilder(); - urlBuilder.append(getReconWebAddress(conf)) - .append(STORAGE_DIST_ENDPOINT); - String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); - StorageCapacityDistributionResponse storageResponse = - MAPPER.readValue(response, StorageCapacityDistributionResponse.class); - - assertEquals(20, storageResponse.getGlobalNamespace().getTotalKeys()); - assertEquals(60, storageResponse.getGlobalNamespace().getTotalUsedSpace()); - assertEquals(0, storageResponse.getUsedSpaceBreakDown().getOpenKeyBytes()); - assertEquals(60, storageResponse.getUsedSpaceBreakDown().getCommittedKeyBytes()); - assertEquals(3, storageResponse.getDataNodeUsage().size()); + GenericTestUtils.waitFor(this::verifyStorageDistributionAfterKeyCreation, 1000, 30000); + closeAllContainers(); + fs.delete(dir1, true); + GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionOm, 1000, 30000); + GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionScm, 2000, 30000); + GenericTestUtils.waitFor(() -> + Objects.requireNonNull(scm.getClientProtocolServer().getDeletedBlockSummary()).getTotalBlockCount() == 0, + 1000, 30000); + GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionDn, 2000, 60000); + GenericTestUtils.waitFor(this::verifyPendingDeletionClearsAtDn, 2000, 60000); + cluster.getHddsDatanodes().get(0).stop(); + GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionOnDnFailure, 2000, 60000); + } + + private boolean verifyStorageDistributionAfterKeyCreation() { + try { + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)).append(STORAGE_DIST_ENDPOINT); + String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); + StorageCapacityDistributionResponse storageResponse = + MAPPER.readValue(response, StorageCapacityDistributionResponse.class); + + assertEquals(20, storageResponse.getGlobalNamespace().getTotalKeys()); + assertEquals(60, storageResponse.getGlobalNamespace().getTotalUsedSpace()); + assertEquals(0, storageResponse.getUsedSpaceBreakDown().getOpenKeyBytes()); + assertEquals(60, storageResponse.getUsedSpaceBreakDown().getCommittedKeyBytes()); + assertEquals(3, storageResponse.getDataNodeUsage().size()); + + return true; + } catch (Exception e) { + LOG.debug("Waiting for storage distribution assertions to pass", e); + return false; + } + } + + private boolean verifyPendingDeletionAfterKeyDeletionOm() { + try { + syncDataFromOM(); + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=om"); + String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); + Map pendingDeletionMap = MAPPER.readValue(response, Map.class); + assertEquals(30L, pendingDeletionMap.get("totalSize").longValue()); + assertEquals(30L, pendingDeletionMap.get("pendingDirectorySize").longValue() + + pendingDeletionMap.get("pendingKeySize").longValue()); + return true; + } catch (Exception e) { + LOG.debug("Waiting for storage distribution assertions to pass", e); + return false; + } + } + + private boolean verifyPendingDeletionAfterKeyDeletionScm() { + try { + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=scm"); + String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); + ScmPendingDeletion pendingDeletion = MAPPER.readValue(response, ScmPendingDeletion.class); + assertEquals(30, pendingDeletion.getTotalReplicatedBlockSize()); + assertEquals(10, pendingDeletion.getTotalBlocksize()); + assertEquals(10, pendingDeletion.getTotalBlocksCount()); + return true; + } catch (Throwable e) { + LOG.debug("Waiting for storage distribution assertions to pass", e); + return false; + } + } + + private boolean verifyPendingDeletionAfterKeyDeletionDn() { + try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=dn"); + String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); + DataNodeMetricsServiceResponse pendingDeletion = MAPPER.readValue(response, DataNodeMetricsServiceResponse.class); + assertNotNull(pendingDeletion); + assertEquals(30, pendingDeletion.getTotalPendingDeletionSize()); + assertEquals(DataNodeMetricsService.MetricCollectionStatus.FINISHED, pendingDeletion.getStatus()); + assertEquals(pendingDeletion.getTotalNodesQueried(), pendingDeletion.getPendingDeletionPerDataNode().size()); + assertEquals(0, pendingDeletion.getTotalNodeQueryFailures()); + pendingDeletion.getPendingDeletionPerDataNode().forEach(dn -> { + assertEquals(10, dn.getPendingBlockSize()); + }); + return true; + } catch (Throwable e) { + LOG.debug("Waiting for storage distribution assertions to pass", e); + return false; + } + } + + private boolean verifyPendingDeletionClearsAtDn() { + try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=dn"); + String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); + DataNodeMetricsServiceResponse pendingDeletion = MAPPER.readValue(response, DataNodeMetricsServiceResponse.class); + assertNotNull(pendingDeletion); + assertEquals(0, pendingDeletion.getTotalPendingDeletionSize()); + assertEquals(DataNodeMetricsService.MetricCollectionStatus.FINISHED, pendingDeletion.getStatus()); + assertEquals(pendingDeletion.getTotalNodesQueried(), pendingDeletion.getPendingDeletionPerDataNode().size()); + assertEquals(0, pendingDeletion.getTotalNodeQueryFailures()); + pendingDeletion.getPendingDeletionPerDataNode().forEach(dn -> { + assertEquals(0, dn.getPendingBlockSize()); + }); + return true; + } catch (Throwable e) { + LOG.debug("Waiting for storage distribution assertions to pass", e); + return false; + } + } + + private boolean verifyPendingDeletionAfterKeyDeletionOnDnFailure() { + try { + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=dn"); + String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder); + DataNodeMetricsServiceResponse pendingDeletion = MAPPER.readValue(response, DataNodeMetricsServiceResponse.class); + assertNotNull(pendingDeletion); + assertEquals(1, pendingDeletion.getTotalNodeQueryFailures()); + assertTrue(pendingDeletion.getPendingDeletionPerDataNode() + .stream() + .anyMatch(dn -> dn.getPendingBlockSize() == -1)); + return true; + } catch (Throwable e) { + return false; + } } private void verifyBlocksCreated( @@ -286,4 +398,12 @@ public static void tear() { cluster.shutdown(); } } + + private static void closeAllContainers() { + for (ContainerInfo container : + scm.getContainerManager().getContainers()) { + scm.getEventQueue().fireEvent(SCMEvents.CLOSE_CONTAINER, + container.containerID()); + } + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java index d09c01ea72f9..ee99ec9fa2e2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java @@ -17,12 +17,20 @@ package org.apache.hadoop.ozone.recon; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT; + +import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.recon.ReconConfigKeys; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.ozone.recon.spi.MetricsServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.JmxServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,12 +47,23 @@ public class MetricsServiceProviderFactory { private OzoneConfiguration configuration; private ReconUtils reconUtils; + private URLConnectionFactory connectionFactory; @Inject public MetricsServiceProviderFactory(OzoneConfiguration configuration, ReconUtils reconUtils) { this.configuration = configuration; this.reconUtils = reconUtils; + int connectionTimeout = (int) configuration.getTimeDuration( + OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT, + OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + int connectionRequestTimeout = (int) configuration.getTimeDuration( + OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT, + OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(connectionTimeout, + connectionRequestTimeout, configuration); } /** @@ -62,11 +81,20 @@ public MetricsServiceProvider getMetricsServiceProvider() { String.format("Choosing Prometheus as Metrics service provider " + "with configured endpoint: %s", prometheusEndpoint)); } - return new PrometheusServiceProviderImpl(configuration, reconUtils); + return new PrometheusServiceProviderImpl(configuration, reconUtils, connectionFactory); } return null; } + /** + * Returns the configured MetricsServiceProvider implementation for Jmx. + * @param endpoint + * @return MetricsServiceProvider instance for Jmx + */ + public MetricsServiceProvider getJmxMetricsServiceProvider(String endpoint) { + return new JmxServiceProviderImpl(reconUtils, endpoint, connectionFactory); + } + /** * Returns the Prometheus endpoint if configured. Otherwise returns null. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index ba357f4ba145..dc75200214ff 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -123,14 +123,14 @@ public final class ReconServerConfigKeys { public static final String OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT = - "10s"; + "30s"; public static final String OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT = "ozone.recon.metrics.http.connection.request.timeout"; public static final String - OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "10s"; + OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "60s"; public static final String OZONE_RECON_SCM_CONTAINER_THRESHOLD = "ozone.recon.scm.container.threshold"; @@ -213,6 +213,14 @@ public final class ReconServerConfigKeys { public static final int OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3; + public static final String OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY = + "ozone.recon.dn.metrics.collection.minimum.api.delay"; + public static final String OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY_DEFAULT = "30s"; + + public static final String OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT = + "ozone.recon.dn.metrics.collection.timeout"; + public static final String OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT_DEFAULT = "10m"; + /** * Private constructor for utility class. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index ccc92648f117..0594169f03c4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -847,4 +847,34 @@ public static String constructObjectPathWithPrefix(long... ids) { } return pathBuilder.toString(); } + + public static Map getMetricsData(List> metrics, String beanName) { + if (metrics == null || StringUtils.isEmpty(beanName)) { + return null; + } + for (Map item :metrics) { + if (beanName.equals(item.get("name"))) { + return item; + } + } + return null; + } + + public static long extractLongMetricValue(Map metrics, String keyName) { + if (metrics == null || StringUtils.isEmpty(keyName)) { + return -1; + } + Object value = metrics.get(keyName); + if (value instanceof Number) { + return ((Number) value).longValue(); + } + if (value instanceof String) { + try { + return Long.parseLong((String) value); + } catch (NumberFormatException e) { + log.error("Failed to parse long value for key: {} with value: {}", keyName, value, e); + } + } + return -1; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/DataNodeMetricsService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/DataNodeMetricsService.java new file mode 100644 index 000000000000..c37e8e65ace7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/DataNodeMetricsService.java @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT_DEFAULT; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.PreDestroy; +import javax.inject.Inject; +import javax.inject.Singleton; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.server.http.HttpConfig; +import org.apache.hadoop.ozone.recon.MetricsServiceProviderFactory; +import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; +import org.apache.hadoop.ozone.recon.api.types.DatanodePendingDeletionMetrics; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.tasks.DataNodeMetricsCollectionTask; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Service for collecting and managing DataNode pending deletion metrics. + * Collects metrics asynchronously from all datanodes and provides aggregated results. + */ +@Singleton +public class DataNodeMetricsService { + + private static final Logger LOG = LoggerFactory.getLogger(DataNodeMetricsService.class); + private static final int MAX_POOL_SIZE = 500; + private static final int KEEP_ALIVE_TIME = 5; + private static final int POLL_INTERVAL_MS = 200; + + private final ThreadPoolExecutor executorService; + private final ReconNodeManager reconNodeManager; + private final boolean httpsEnabled; + private final int minimumApiDelayMs; + private final MetricsServiceProviderFactory metricsServiceProviderFactory; + private final int maximumTaskTimeout; + private final AtomicBoolean isRunning = new AtomicBoolean(false); + + private MetricCollectionStatus currentStatus = MetricCollectionStatus.NOT_STARTED; + private List pendingDeletionList; + private Long totalPendingDeletion = 0L; + private int totalNodesQueried; + private int totalNodesFailed; + private AtomicLong lastCollectionEndTime = new AtomicLong(0L); + + @Inject + public DataNodeMetricsService( + OzoneStorageContainerManager reconSCM, + OzoneConfiguration config, + MetricsServiceProviderFactory metricsServiceProviderFactory) { + + this.reconNodeManager = (ReconNodeManager) reconSCM.getScmNodeManager(); + this.httpsEnabled = HttpConfig.getHttpPolicy(config).isHttpsEnabled(); + this.minimumApiDelayMs = (int) config.getTimeDuration( + OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY, + OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY_DEFAULT, + TimeUnit.MILLISECONDS); + this.maximumTaskTimeout = (int) config.getTimeDuration(OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT, + OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); + this.metricsServiceProviderFactory = metricsServiceProviderFactory; + this.lastCollectionEndTime.set(-minimumApiDelayMs); + int corePoolSize = Runtime.getRuntime().availableProcessors() * 2; + this.executorService = new ThreadPoolExecutor( + corePoolSize, MAX_POOL_SIZE, + KEEP_ALIVE_TIME, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(), + new ThreadFactoryBuilder() + .setNameFormat("DataNodeMetricsCollector-%d") + .build()); + } + + /** + * Starts the metrics collection task if not already running and rate limit allows. + */ + public void startTask() { + // Check if already running + if (!isRunning.compareAndSet(false, true)) { + LOG.warn("Metrics collection already in progress, skipping"); + return; + } + + // Check rate limit + if (System.currentTimeMillis() - lastCollectionEndTime.get() < minimumApiDelayMs) { + LOG.debug("Rate limit active, skipping collection (delay: {}ms)", minimumApiDelayMs); + isRunning.set(false); + return; + } + + Set nodes = reconNodeManager.getNodeStats().keySet(); + if (nodes.isEmpty()) { + LOG.warn("No datanodes found to query"); + resetState(); + currentStatus = MetricCollectionStatus.FINISHED; + isRunning.set(false); + return; + } + + // Set status immediately before starting async collection + currentStatus = MetricCollectionStatus.IN_PROGRESS; + LOG.debug("Starting metrics collection for {} datanodes", nodes.size()); + + // Run a collection asynchronously so status can be queried + CompletableFuture.runAsync(() -> collectMetrics(nodes), executorService) + .exceptionally(throwable -> { + LOG.error("Metrics collection failed", throwable); + synchronized (DataNodeMetricsService.this) { + currentStatus = MetricCollectionStatus.FINISHED; + isRunning.set(false); + } + return null; + }); + } + + /** + * Collects metrics from all datanodes. Processes completed tasks first, waits for all. + */ + private void collectMetrics(Set nodes) { + try { + CollectionContext context = submitMetricsCollectionTasks(nodes); + processCollectionFutures(context); + updateFinalState(context); + } catch (Exception e) { + resetState(); + currentStatus = MetricCollectionStatus.FAILED; + isRunning.set(false); + } + } + + /** + * Submits metrics collection tasks for all given datanodes. + * @return A context object containing tracking structures for the submitted futures. + */ + private CollectionContext submitMetricsCollectionTasks(Set nodes) { + // Initialize state + List results = new ArrayList<>(nodes.size()); + // Submit all collection tasks + Map> futures = new HashMap<>(); + + long submissionTime = System.currentTimeMillis(); + for (DatanodeDetails node : nodes) { + DataNodeMetricsCollectionTask task = new DataNodeMetricsCollectionTask( + node, httpsEnabled, metricsServiceProviderFactory); + DatanodePendingDeletionMetrics key = new DatanodePendingDeletionMetrics( + node.getHostName(), node.getUuidString(), -1L); // -1 is used as placeholder/failed status + futures.put(key, executorService.submit(task)); + } + int totalQueried = futures.size(); + LOG.debug("Submitted {} collection tasks", totalQueried); + return new CollectionContext(totalQueried, futures, submissionTime, results); + } + + /** + * Polls the submitted futures, enforcing timeouts and aggregating results until all are complete. + */ + private void processCollectionFutures(CollectionContext context) { + // Poll with timeout enforcement + while (!context.futures.isEmpty()) { + long currentTime = System.currentTimeMillis(); + Iterator>> + iterator = context.futures.entrySet().iterator(); + boolean processedAny = false; + while (iterator.hasNext()) { + Map.Entry> entry = + iterator.next(); + DatanodePendingDeletionMetrics key = entry.getKey(); + Future future = entry.getValue(); + // Check for timeout + if (checkAndHandleTimeout(key, future, context, currentTime)) { + iterator.remove(); + processedAny = true; + continue; + } + // Check for completion + if (future.isDone()) { + handleCompletedFuture(key, future, context); + iterator.remove(); + processedAny = true; + } + } + // Sleep before the next poll only if there are remaining futures and nothing was processed + if (!context.futures.isEmpty() && !processedAny) { + try { + Thread.sleep(POLL_INTERVAL_MS); + } catch (InterruptedException e) { + LOG.warn("Collection polling interrupted"); + Thread.currentThread().interrupt(); + break; + } + } + } + } + + private boolean checkAndHandleTimeout( + DatanodePendingDeletionMetrics key, Future future, + CollectionContext context, long currentTime) { + long elapsedTime = currentTime - context.submissionTime; + if (elapsedTime > maximumTaskTimeout && !future.isDone()) { + LOG.warn("Task for datanode {} [{}] timed out after {}ms", + key.getHostName(), key.getDatanodeUuid(), elapsedTime); + future.cancel(true); // Interrupt the task + context.failed++; + context.results.add(key); // Add with -1 (failed) + return true; + } + return false; + } + + private void handleCompletedFuture( + DatanodePendingDeletionMetrics key, Future future, + CollectionContext context) { + try { + DatanodePendingDeletionMetrics result = future.get(); + if (result.getPendingBlockSize() < 0) { + context.failed++; + } else { + context.totalPending += result.getPendingBlockSize(); + } + context.results.add(result); + LOG.debug("Processed result from {}", key.getHostName()); + } catch (ExecutionException | InterruptedException e) { + String errorType = e instanceof InterruptedException ? "interrupted" : "execution failed"; + LOG.error("Task {} for datanode {} [{}] failed", + errorType, key.getHostName(), key.getDatanodeUuid(), e); + context.failed++; + context.results.add(key); + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } + } + } + + /** + * Atomically updates the class's shared state with the results from the collection context. + */ + private void updateFinalState(CollectionContext context) { + // Update shared state atomically + synchronized (this) { + pendingDeletionList = context.results; + totalPendingDeletion = context.totalPending; + totalNodesQueried = context.totalQueried; + totalNodesFailed = context.failed; + currentStatus = MetricCollectionStatus.FINISHED; + isRunning.set(false); + lastCollectionEndTime.set(System.currentTimeMillis()); + } + + LOG.debug("Metrics collection completed. Queried: {}, Failed: {}", + context.totalQueried, context.failed); + } + + /** + * Resets the collection state. + */ + private void resetState() { + pendingDeletionList = new ArrayList<>(); + totalPendingDeletion = 0L; + totalNodesQueried = 0; + totalNodesFailed = 0; + } + + public DataNodeMetricsServiceResponse getCollectedMetrics() { + startTask(); + if (currentStatus == MetricCollectionStatus.FINISHED) { + return DataNodeMetricsServiceResponse.newBuilder() + .setStatus(currentStatus) + .setPendingDeletion(pendingDeletionList) + .setTotalPendingDeletionSize(totalPendingDeletion) + .setTotalNodesQueried(totalNodesQueried) + .setTotalNodeQueryFailures(totalNodesFailed) + .build(); + } + return DataNodeMetricsServiceResponse.newBuilder() + .setStatus(currentStatus) + .build(); + } + + @PreDestroy + public void shutdown() { + LOG.info("Shutting down DataNodeMetricsService"); + executorService.shutdown(); + try { + if (!executorService.awaitTermination(10, TimeUnit.SECONDS)) { + executorService.shutdownNow(); + } + } catch (InterruptedException e) { + executorService.shutdownNow(); + Thread.currentThread().interrupt(); + } + } + + /** + * Status of metric collection task. + */ + public enum MetricCollectionStatus { + NOT_STARTED, IN_PROGRESS, FINISHED, FAILED + } + + private static class CollectionContext { + private final int totalQueried; + private final Map> futures; + private final List results; + private final long submissionTime; + private long totalPending = 0L; + private int failed = 0; + + CollectionContext( + int totalQueried, + Map> futures, + long submissionTime, + List results) { + this.totalQueried = totalQueried; + this.futures = futures; + this.submissionTime = submissionTime; + this.results = results; + } + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java new file mode 100644 index 000000000000..2fbb9c6bb8d3 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PendingDeletionEndpoint.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import java.util.Map; +import javax.inject.Inject; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Response; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; +import org.apache.hadoop.ozone.recon.api.types.ScmPendingDeletion; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * REST API endpoint that provides metrics and information related to + * pending deletions. It responds to requests on the "/pendingDeletion" path + * and produces application/json responses. + */ +@Path("/pendingDeletion") +@Produces("application/json") +@AdminOnly +public class PendingDeletionEndpoint { + private static final Logger LOG = LoggerFactory.getLogger(PendingDeletionEndpoint.class); + private final ReconGlobalMetricsService reconGlobalMetricsService; + private final DataNodeMetricsService dataNodeMetricsService; + private final StorageContainerLocationProtocol scmClient; + + @Inject + public PendingDeletionEndpoint( + ReconGlobalMetricsService reconGlobalMetricsService, + DataNodeMetricsService dataNodeMetricsService, + StorageContainerLocationProtocol scmClient) { + this.reconGlobalMetricsService = reconGlobalMetricsService; + this.dataNodeMetricsService = dataNodeMetricsService; + this.scmClient = scmClient; + } + + @GET + public Response getPendingDeletionByComponent(@QueryParam("component") String component) { + if (component == null || component.isEmpty()) { + return Response.status(Response.Status.BAD_REQUEST) + .entity("component query parameter is required").build(); + } + final String normalizedComponent = component.trim().toLowerCase(); + switch (normalizedComponent) { + case "dn": + return handleDataNodeMetrics(); + case "scm": + return handleScmPendingDeletion(); + case "om": + return handleOmPendingDeletion(); + default: + return Response.status(Response.Status.BAD_REQUEST) + .entity("component query parameter must be one of dn, scm, om").build(); + } + } + + private Response handleDataNodeMetrics() { + DataNodeMetricsServiceResponse response = dataNodeMetricsService.getCollectedMetrics(); + if (response.getStatus() == DataNodeMetricsService.MetricCollectionStatus.FINISHED) { + return Response.ok(response).build(); + } else { + return Response.accepted(response).build(); + } + } + + private Response handleScmPendingDeletion() { + try { + HddsProtos.DeletedBlocksTransactionSummary summary = scmClient.getDeletedBlockSummary(); + if (summary == null) { + return Response.noContent() + .build(); + } + ScmPendingDeletion pendingDeletion = new ScmPendingDeletion( + summary.getTotalBlockSize(), + summary.getTotalBlockReplicatedSize(), + summary.getTotalBlockCount()); + return Response.ok(pendingDeletion).build(); + } catch (Exception e) { + LOG.error("Failed to get pending deletion info from SCM", e); + ScmPendingDeletion pendingDeletion = new ScmPendingDeletion(-1L, -1L, -1L); + return Response.ok(pendingDeletion).build(); + } + } + + private Response handleOmPendingDeletion() { + Map pendingDeletion = reconGlobalMetricsService.calculatePendingSizes(); + return Response.ok(pendingDeletion).build(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ReconGlobalMetricsService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ReconGlobalMetricsService.java index b08727aab40d..0796fafd90bc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ReconGlobalMetricsService.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ReconGlobalMetricsService.java @@ -219,11 +219,32 @@ public KeyInsightInfoResponse getPendingForDeletionDirInfo(int limit, String pre public Map calculatePendingSizes() { Map result = new HashMap<>(); - KeyInsightInfoResponse response = getPendingForDeletionDirInfo(-1, ""); - Map pendingKeySize = getDeletedKeySummary(); - result.put("pendingDirectorySize", response.getReplicatedDataSize()); - result.put("pendingKeySize", pendingKeySize.getOrDefault("totalReplicatedDataSize", 0L)); - result.put("totalSize", result.get("pendingDirectorySize") + result.get("pendingKeySize")); + long pendingDirectorySize = -1L; + long pendingKeySizeValue = -1L; + + //Getting pending deletion directory size + try { + KeyInsightInfoResponse response = getPendingForDeletionDirInfo(-1, ""); + pendingDirectorySize = response.getReplicatedDataSize(); + } catch (Exception ex) { + LOG.error("Error calculating pending directory size", ex); + } + result.put("pendingDirectorySize", pendingDirectorySize); + + //Getting pending deletion key size + try { + Map pendingKeySizeMap = getDeletedKeySummary(); + pendingKeySizeValue = pendingKeySizeMap.getOrDefault("totalReplicatedDataSize", 0L); + } catch (Exception ex) { + LOG.error("Error calculating pending key size", ex); + } + result.put("pendingKeySize", pendingKeySizeValue); + + if (pendingDirectorySize < 0 || pendingKeySizeValue < 0) { + result.put("totalSize", -1L); + } else { + result.put("totalSize", pendingDirectorySize + pendingKeySizeValue); + } return result; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DataNodeMetricsServiceResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DataNodeMetricsServiceResponse.java new file mode 100644 index 000000000000..bd1284d60ee0 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DataNodeMetricsServiceResponse.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import org.apache.hadoop.ozone.recon.api.DataNodeMetricsService; + +/** + * Represents a response from the DataNodeMetricsService. + * This class encapsulates the result of a metrics collection task, + * including the collection status, total pending deletions across all data nodes, + * and details about pending deletions for each data node. + * + * Instances of this class are created using the {@link Builder} class. + */ +public class DataNodeMetricsServiceResponse { + @JsonProperty("status") + private DataNodeMetricsService.MetricCollectionStatus status; + @JsonProperty("totalPendingDeletionSize") + private Long totalPendingDeletionSize; + @JsonProperty("pendingDeletionPerDataNode") + private List pendingDeletionPerDataNode; + @JsonProperty("totalNodesQueried") + private int totalNodesQueried; + @JsonProperty("totalNodeQueriesFailed") + private long totalNodeQueryFailures; + + public DataNodeMetricsServiceResponse(Builder builder) { + this.status = builder.status; + this.totalPendingDeletionSize = builder.totalPendingDeletionSize; + this.pendingDeletionPerDataNode = builder.pendingDeletion; + this.totalNodesQueried = builder.totalNodesQueried; + this.totalNodeQueryFailures = builder.totalNodeQueryFailures; + } + + public DataNodeMetricsServiceResponse() { + this.status = DataNodeMetricsService.MetricCollectionStatus.NOT_STARTED; + this.totalPendingDeletionSize = 0L; + this.pendingDeletionPerDataNode = null; + this.totalNodesQueried = 0; + this.totalNodeQueryFailures = 0; + } + + public DataNodeMetricsService.MetricCollectionStatus getStatus() { + return status; + } + + public Long getTotalPendingDeletionSize() { + return totalPendingDeletionSize; + } + + public List getPendingDeletionPerDataNode() { + return pendingDeletionPerDataNode; + } + + public int getTotalNodesQueried() { + return totalNodesQueried; + } + + public long getTotalNodeQueryFailures() { + return totalNodeQueryFailures; + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder class for constructing instances of {@link DataNodeMetricsServiceResponse}. + * This class provides a fluent interface for setting the various properties + * of a DataNodeMetricsServiceResponse object before creating a new immutable instance. + * The Builder is designed to be used in a staged and intuitive manner. + * The properties that can be configured include: + * - Status of the metric collection process. + * - Total number of blocks pending deletion across all data nodes. + * - Metrics related to pending deletions from individual data nodes. + */ + public static final class Builder { + private DataNodeMetricsService.MetricCollectionStatus status; + private Long totalPendingDeletionSize; + private List pendingDeletion; + private int totalNodesQueried; + private long totalNodeQueryFailures; + + public Builder setStatus(DataNodeMetricsService.MetricCollectionStatus status) { + this.status = status; + return this; + } + + public Builder setTotalPendingDeletionSize(Long totalPendingDeletionSize) { + this.totalPendingDeletionSize = totalPendingDeletionSize; + return this; + } + + public Builder setPendingDeletion(List pendingDeletion) { + this.pendingDeletion = pendingDeletion; + return this; + } + + public Builder setTotalNodesQueried(int totalNodesQueried) { + this.totalNodesQueried = totalNodesQueried; + return this; + } + + public Builder setTotalNodeQueryFailures(long totalNodeQueryFailures) { + this.totalNodeQueryFailures = totalNodeQueryFailures; + return this; + } + + public DataNodeMetricsServiceResponse build() { + return new DataNodeMetricsServiceResponse(this); + } + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodePendingDeletionMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodePendingDeletionMetrics.java new file mode 100644 index 000000000000..964add573096 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodePendingDeletionMetrics.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Represents pending deletion metrics for a datanode. + * This class encapsulates information about blocks pending deletion on a specific datanode. + */ +public class DatanodePendingDeletionMetrics { + + @JsonProperty("hostName") + private final String hostName; + + @JsonProperty("datanodeUuid") + private final String datanodeUuid; + + @JsonProperty("pendingBlockSize") + private final long pendingBlockSize; + + @JsonCreator + public DatanodePendingDeletionMetrics( + @JsonProperty("hostName") String hostName, + @JsonProperty("datanodeUuid") String datanodeUuid, + @JsonProperty("pendingBlockSize") long pendingBlockSize) { + this.hostName = hostName; + this.datanodeUuid = datanodeUuid; + this.pendingBlockSize = pendingBlockSize; + } + + public String getHostName() { + return hostName; + } + + public long getPendingBlockSize() { + return pendingBlockSize; + } + + public String getDatanodeUuid() { + return datanodeUuid; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ScmPendingDeletion.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ScmPendingDeletion.java new file mode 100644 index 000000000000..357189857a3a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ScmPendingDeletion.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Represents metadata related to pending deletions in the storage container manager (SCM). + * This class encapsulates information such as the total block size, the total size of replicated blocks, + * and the total number of blocks awaiting deletion. + */ +public class ScmPendingDeletion { + @JsonProperty("totalBlocksize") + private final long totalBlocksize; + @JsonProperty("totalReplicatedBlockSize") + private final long totalReplicatedBlockSize; + @JsonProperty("totalBlocksCount") + private final long totalBlocksCount; + + public ScmPendingDeletion() { + this.totalBlocksize = 0; + this.totalReplicatedBlockSize = 0; + this.totalBlocksCount = 0; + } + + public ScmPendingDeletion(long size, long replicatedSize, long totalBlocks) { + this.totalBlocksize = size; + this.totalReplicatedBlockSize = replicatedSize; + this.totalBlocksCount = totalBlocks; + } + + public long getTotalBlocksize() { + return totalBlocksize; + } + + public long getTotalReplicatedBlockSize() { + return totalReplicatedBlockSize; + } + + public long getTotalBlocksCount() { + return totalBlocksCount; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/MetricsServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/MetricsServiceProvider.java index 3d0aa3140671..7ddba027bfec 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/MetricsServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/MetricsServiceProvider.java @@ -19,6 +19,7 @@ import java.net.HttpURLConnection; import java.util.List; +import java.util.Map; import org.apache.hadoop.ozone.recon.metrics.Metric; /** @@ -48,12 +49,12 @@ HttpURLConnection getMetricsResponse(String api, String queryString) List getMetricsInstant(String queryString) throws Exception; /** - * Returns a list of {@link Metric} for the given ranged query. + * Returns a list of {@link Map} for the given query. * * @param queryString query string with metric name, start time, end time, * step and other filters. * @return List of Json map of metrics response. * @throws Exception exception */ - List getMetricsRanged(String queryString) throws Exception; + List> getMetrics(String queryString) throws Exception; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/JmxServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/JmxServiceProviderImpl.java new file mode 100644 index 000000000000..4249d215e33e --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/JmxServiceProviderImpl.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.spi.impl; + +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import javax.inject.Singleton; +import javax.ws.rs.core.Response; +import org.apache.hadoop.hdds.server.JsonUtils; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.metrics.Metric; +import org.apache.hadoop.ozone.recon.spi.MetricsServiceProvider; + +/** + * Implementation of the Jmx Metrics Service provider. + */ +@Singleton +public class JmxServiceProviderImpl implements MetricsServiceProvider { + + public static final String JMX_INSTANT_QUERY_API = "qry"; + private URLConnectionFactory connectionFactory; + private final String jmxEndpoint; + private ReconUtils reconUtils; + + public JmxServiceProviderImpl( + ReconUtils reconUtils, + String jmxEndpoint, + URLConnectionFactory connectionFactory) { + // Remove the trailing slash from endpoint url. + if (jmxEndpoint != null && jmxEndpoint.endsWith("/")) { + jmxEndpoint = jmxEndpoint.substring(0, jmxEndpoint.length() - 1); + } + this.jmxEndpoint = jmxEndpoint; + this.reconUtils = reconUtils; + this.connectionFactory = connectionFactory; + } + + /** + * Returns {@link HttpURLConnection} after querying Metrics endpoint for the + * given metric. + * + * @param api api. + * @param queryString query string with metric name and other filters. + * @return HttpURLConnection + * @throws Exception exception + */ + @Override + public HttpURLConnection getMetricsResponse(String api, String queryString) + throws Exception { + String url = String.format("%s?%s=%s", jmxEndpoint, api, + queryString); + return reconUtils.makeHttpCall(connectionFactory, + url, false); + } + + @Override + public List getMetricsInstant(String queryString) throws Exception { + return Collections.emptyList(); + } + + /** + * Returns a list of {@link Metric} for the given instant query. + * + * @param queryString query string with metric name and other filters. + * @return List of Json map of metrics response. + * @throws Exception exception + */ + @Override + public List> getMetrics(String queryString) + throws Exception { + return getMetrics(JMX_INSTANT_QUERY_API, queryString); + } + + /** + * Returns a list of {@link Metric} for the given api and query string. + * + * @param api api + * @param queryString query string with metric name and other filters. + * @return List of Json map of metrics response. + * @throws Exception + */ + private List> getMetrics(String api, String queryString) + throws Exception { + HttpURLConnection urlConnection = + getMetricsResponse(api, queryString); + if (Response.Status.fromStatusCode(urlConnection.getResponseCode()) + .getFamily() == Response.Status.Family.SUCCESSFUL) { + try (InputStream inputStream = urlConnection.getInputStream()) { + Map jsonMap = JsonUtils.getDefaultMapper().readValue(inputStream, Map.class); + Object beansObj = jsonMap.get("beans"); + if (beansObj instanceof List) { + return (List>) beansObj; + } + } + } + return Collections.emptyList(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/PrometheusServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/PrometheusServiceProviderImpl.java index 64c00490613e..0a784c50c90f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/PrometheusServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/PrometheusServiceProviderImpl.java @@ -17,19 +17,14 @@ package org.apache.hadoop.ozone.recon.spi.impl; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT; - import com.fasterxml.jackson.databind.ObjectMapper; import java.io.InputStream; import java.net.HttpURLConnection; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.concurrent.TimeUnit; import javax.inject.Singleton; import javax.ws.rs.core.Response; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -49,7 +44,6 @@ public class PrometheusServiceProviderImpl implements MetricsServiceProvider { public static final String PROMETHEUS_INSTANT_QUERY_API = "query"; - public static final String PROMETHEUS_RANGED_QUERY_API = "query_range"; private static final Logger LOG = LoggerFactory.getLogger(PrometheusServiceProviderImpl.class); @@ -58,21 +52,12 @@ public class PrometheusServiceProviderImpl private final String prometheusEndpoint; private ReconUtils reconUtils; - public PrometheusServiceProviderImpl(OzoneConfiguration configuration, - ReconUtils reconUtils) { - - int connectionTimeout = (int) configuration.getTimeDuration( - OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT, - OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - int connectionRequestTimeout = (int) configuration.getTimeDuration( - OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT, - OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); + public PrometheusServiceProviderImpl( + OzoneConfiguration configuration, + ReconUtils reconUtils, + URLConnectionFactory connectionFactory) { - connectionFactory = - URLConnectionFactory.newDefaultURLConnectionFactory(connectionTimeout, - connectionRequestTimeout, configuration); + this.connectionFactory = connectionFactory; String endpoint = configuration.getTrimmed(getEndpointConfigKey()); // Remove the trailing slash from endpoint url. @@ -123,17 +108,9 @@ public List getMetricsInstant(String queryString) return getMetrics(PROMETHEUS_INSTANT_QUERY_API, queryString); } - /** - * Returns a list of {@link Metric} for the given ranged query. - * - * @param queryString query string with metric name, start time, end time, - * step and other filters. - * @return List of Json map of metrics response. - * @throws Exception exception - */ @Override - public List getMetricsRanged(String queryString) throws Exception { - return getMetrics(PROMETHEUS_RANGED_QUERY_API, queryString); + public List> getMetrics(String queryString) throws Exception { + return Collections.emptyList(); } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DataNodeMetricsCollectionTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DataNodeMetricsCollectionTask.java new file mode 100644 index 000000000000..f12627a202a7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DataNodeMetricsCollectionTask.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; +import org.apache.hadoop.ozone.recon.MetricsServiceProviderFactory; +import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.types.DatanodePendingDeletionMetrics; +import org.apache.hadoop.ozone.recon.spi.MetricsServiceProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Task for collecting pending deletion metrics from a DataNode using JMX. + * This class implements the Callable interface and retrieves pending deletion + * information (e.g., total pending block bytes) from a DataNode by invoking its + * JMX endpoint. The metrics are parsed and encapsulated in the + * {@link DatanodePendingDeletionMetrics} object. + */ +public class DataNodeMetricsCollectionTask implements Callable { + + private static final Logger LOG = LoggerFactory.getLogger(DataNodeMetricsCollectionTask.class); + private final DatanodeDetails nodeDetails; + private final boolean httpsEnabled; + private final MetricsServiceProvider metricsServiceProvider; + private static final String BEAN_NAME = "Hadoop:service=HddsDatanode,name=BlockDeletingService"; + private static final String METRICS_KEY = "TotalPendingBlockBytes"; + + public DataNodeMetricsCollectionTask( + DatanodeDetails nodeDetails, + boolean httpsEnabled, + MetricsServiceProviderFactory factory) { + this.nodeDetails = nodeDetails; + this.httpsEnabled = httpsEnabled; + this.metricsServiceProvider = factory.getJmxMetricsServiceProvider(getJmxMetricsUrl()); + } + + @Override + public DatanodePendingDeletionMetrics call() { + LOG.debug("Collecting pending deletion metrics from DataNode {}", nodeDetails.getHostName()); + try { + List> metrics = metricsServiceProvider.getMetrics(BEAN_NAME); + if (metrics == null || metrics.isEmpty()) { + return new DatanodePendingDeletionMetrics( + nodeDetails.getHostName(), nodeDetails.getUuidString(), -1L); + } + Map deletionMetrics = ReconUtils.getMetricsData(metrics, BEAN_NAME); + long pendingBlockSize = ReconUtils.extractLongMetricValue(deletionMetrics, METRICS_KEY); + + return new DatanodePendingDeletionMetrics( + nodeDetails.getHostName(), nodeDetails.getUuidString(), pendingBlockSize); + + } catch (Exception e) { + LOG.error("Failed to collect metrics from DataNode {}", nodeDetails.getHostName(), e); + return new DatanodePendingDeletionMetrics( + nodeDetails.getHostName(), nodeDetails.getUuidString(), -1L); + } + } + + private String getJmxMetricsUrl() { + String protocol = httpsEnabled ? "https" : "http"; + Name portName = httpsEnabled ? DatanodeDetails.Port.Name.HTTPS : DatanodeDetails.Port.Name.HTTP; + return String.format("%s://%s:%d/jmx", + protocol, + nodeDetails.getHostName(), + nodeDetails.getPort(portName).getValue()); + } +} From 9fbf64ca6c9cca70a029bfca72b289649406a2ac Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 18 Dec 2025 15:54:00 +0100 Subject: [PATCH 07/36] HDDS-14203. Use EndpointBuilder in all S3G unit tests (#9522) --- .../hadoop/ozone/client/ObjectStoreStub.java | 3 +- .../ozone/s3/endpoint/EndpointBuilder.java | 10 +- .../ozone/s3/endpoint/TestBucketList.java | 158 ++++++------------ .../endpoint/TestMultipartUploadWithCopy.java | 43 +++-- .../s3/endpoint/TestObjectTaggingDelete.java | 2 + .../s3/endpoint/TestObjectTaggingPut.java | 2 + .../s3/endpoint/TestPartUploadWithStream.java | 2 - .../s3/endpoint/TestPermissionCheck.java | 1 + .../s3/endpoint/TestUploadWithStream.java | 2 - .../s3/metrics/TestS3GatewayMetrics.java | 10 +- 10 files changed, 83 insertions(+), 150 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index 42671e2c294a..7dad6d600f0b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.mockito.Mockito.mock; import java.io.IOException; import java.util.HashMap; @@ -44,7 +45,7 @@ public class ObjectStoreStub extends ObjectStore { private Map bucketEmptyStatus = new HashMap<>(); public ObjectStoreStub() { - super(); + super(conf, mock(ClientProtocol.class)); } public ObjectStoreStub(ConfigurationSource conf, ClientProtocol proxy) { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java index 9c4cade64079..13db3962a89b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java @@ -27,6 +27,7 @@ import javax.ws.rs.core.UriInfo; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.signature.SignatureInfo; @@ -97,8 +98,8 @@ public EndpointBuilder setSignatureInfo(SignatureInfo newSignatureInfo) { public T build() { T endpoint = base != null ? base : constructor.get(); - if (ozoneClient != null) { - endpoint.setClient(ozoneClient); + if (endpoint.getClient() == null) { + endpoint.setClient(getClient()); } final OzoneConfiguration config = getConfig(); @@ -109,10 +110,15 @@ public T build() { endpoint.setRequestIdentifier(identifier); endpoint.setSignatureInfo(signatureInfo); + endpoint.initialization(); + return endpoint; } protected OzoneClient getClient() { + if (ozoneClient == null) { + ozoneClient = new OzoneClientStub(); + } return ozoneClient; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index f23f8f81b60d..0d2f087a75b5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_LIST_MAX_KEYS_LIMIT; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointBuilder.newBucketEndpointBuilder; import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -32,7 +33,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; @@ -48,15 +48,13 @@ public class TestBucketList { @Test public void listRoot() throws OS3Exception, IOException { - OzoneClient client = createClientWithKeys("file1", "dir1/file2"); - - BucketEndpoint getBucket = EndpointBuilder.newBucketEndpointBuilder() + BucketEndpoint endpoint = newBucketEndpointBuilder() .setClient(client) .build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "", + (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "", null, null, null, null, null, null, 0) .getEntity(); @@ -67,21 +65,15 @@ public void listRoot() throws OS3Exception, IOException { assertEquals(1, getBucketResponse.getContents().size()); assertEquals("file1", getBucketResponse.getContents().get(0).getKey().getName()); - } @Test public void listDir() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2"); - - getBucket.setClient(client); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, + (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "dir1", null, null, null, null, null, null, 0).getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -89,22 +81,18 @@ public void listDir() throws OS3Exception, IOException { getBucketResponse.getCommonPrefixes().get(0).getPrefix().getName()); assertEquals(0, getBucketResponse.getContents().size()); - } @Test public void listSubDir() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", "dir1bha/file2"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket + (ListObjectResponse) endpoint .get("b1", "/", null, null, 100, "dir1/", null, null, null, null, null, null, 0) .getEntity(); @@ -116,18 +104,15 @@ public void listSubDir() throws OS3Exception, IOException { assertEquals(1, getBucketResponse.getContents().size()); assertEquals("dir1/file2", getBucketResponse.getContents().get(0).getKey().getName()); - } @Test public void listObjectOwner() throws OS3Exception, IOException { - UserGroupInformation user1 = UserGroupInformation .createUserForTesting("user1", new String[] {"user1"}); UserGroupInformation user2 = UserGroupInformation .createUserForTesting("user2", new String[] {"user2"}); - BucketEndpoint getBucket = new BucketEndpoint(); OzoneClient client = new OzoneClientStub(); client.getObjectStore().createS3Bucket("b1"); OzoneBucket bucket = client.getObjectStore().getS3Bucket("b1"); @@ -137,10 +122,10 @@ public void listObjectOwner() throws OS3Exception, IOException { UserGroupInformation.setLoginUser(user2); bucket.createKey("key2", 0).close(); - getBucket.setClient(client); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); + ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, + (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "key", null, null, null, null, null, null, 0).getEntity(); assertEquals(2, getBucketResponse.getContents().size()); @@ -148,84 +133,67 @@ public void listObjectOwner() throws OS3Exception, IOException { getBucketResponse.getContents().get(0).getOwner().getDisplayName()); assertEquals(user2.getShortUserName(), getBucketResponse.getContents().get(1).getOwner().getDisplayName()); - } @Test public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", "dir1bha/file2", "file2"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, + (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "dir1", null, null, null, null, null, null, 0).getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); - } @Test public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", "dir1bha/file2", "file2"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, + (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "", null, null, null, null, null, null, 0).getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); assertEquals("file2", getBucketResponse.getContents().get(0) .getKey().getName()); - } @Test public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", "dir1bha/file2", "file2"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "dir1bh", + (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "dir1bh", null, "dir1/dir2/file2", null, null, null, null, 0).getEntity(); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); - } @Test public void listWithPrefixAndEmptyStrDelimiter() throws OS3Exception, IOException { - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/", "dir1/dir2/", "dir1/dir2/file1", "dir1/dir2/file2"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + // Should behave the same if delimiter is null ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "", null, null, 100, "dir1/", + (ListObjectResponse) endpoint.get("b1", "", null, null, 100, "dir1/", null, null, null, null, null, null, 0).getEntity(); assertEquals(0, getBucketResponse.getCommonPrefixes().size()); @@ -238,26 +206,22 @@ public void listWithPrefixAndEmptyStrDelimiter() getBucketResponse.getContents().get(2).getKey().getName()); assertEquals("dir1/dir2/file2", getBucketResponse.getContents().get(3).getKey().getName()); - } @Test public void listWithContinuationToken() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", "dir1bha/file2", "file2"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + int maxKeys = 2; // As we have 5 keys, with max keys 2 we should call list 3 times. // First time ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", null, null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", null, null, null, maxKeys, "", null, null, null, null, null, null, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); @@ -266,7 +230,7 @@ public void listWithContinuationToken() throws OS3Exception, IOException { // 2nd time String continueToken = getBucketResponse.getNextToken(); getBucketResponse = - (ListObjectResponse) getBucket.get("b1", null, null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", null, null, null, maxKeys, "", continueToken, null, null, null, null, null, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); @@ -276,20 +240,16 @@ public void listWithContinuationToken() throws OS3Exception, IOException { //3rd time getBucketResponse = - (ListObjectResponse) getBucket.get("b1", null, null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", null, null, null, maxKeys, "", continueToken, null, null, null, null, null, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getContents().size()); - } @Test public void listWithContinuationTokenDirBreak() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys( "test/dir1/file1", @@ -301,15 +261,14 @@ public void listWithContinuationTokenDirBreak() "test/dir3/file7", "test/file8"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); int maxKeys = 2; ListObjectResponse getBucketResponse; getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, "test/", null, null, null, null, null, null, 0).getEntity(); assertEquals(0, getBucketResponse.getContents().size()); @@ -320,7 +279,7 @@ public void listWithContinuationTokenDirBreak() getBucketResponse.getCommonPrefixes().get(1).getPrefix().getName()); getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, "test/", getBucketResponse.getNextToken(), null, null, null, null, null, 0).getEntity(); assertEquals(1, getBucketResponse.getContents().size()); @@ -329,7 +288,6 @@ public void listWithContinuationTokenDirBreak() getBucketResponse.getCommonPrefixes().get(0).getPrefix().getName()); assertEquals("test/file8", getBucketResponse.getContents().get(0).getKey().getName()); - } /** @@ -338,22 +296,18 @@ public void listWithContinuationTokenDirBreak() */ @Test public void listWithContinuationToken1() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file1", "dir1bh/file1", "dir1bha/file1", "dir0/file1", "dir2/file1"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); int maxKeys = 2; // As we have 5 keys, with max keys 2 we should call list 3 times. // First time ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, "dir", null, null, null, null, null, null, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); @@ -362,7 +316,7 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { // 2nd time String continueToken = getBucketResponse.getNextToken(); getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, "dir", continueToken, null, null, null, null, null, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -370,27 +324,22 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { //3rd time continueToken = getBucketResponse.getNextToken(); getBucketResponse = - (ListObjectResponse) getBucket.get("b1", "/", null, null, maxKeys, + (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, "dir", continueToken, null, null, null, null, null, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); - } @Test public void listWithContinuationTokenFail() throws IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", "dir1bha/file2", "dir1", "dir2", "dir3"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - OS3Exception e = assertThrows(OS3Exception.class, () -> getBucket.get("b1", + OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get("b1", "/", null, null, 2, "dir", "random", null, null, null, null, null, 1000) .getEntity(), "listWithContinuationTokenFail"); assertEquals("random", e.getResource()); @@ -399,17 +348,14 @@ public void listWithContinuationTokenFail() throws IOException { @Test public void testStartAfter() throws IOException, OS3Exception { - BucketEndpoint getBucket = new BucketEndpoint(); - OzoneClient ozoneClient = createClientWithKeys("dir1/file1", "dir1bh/file1", "dir1bha/file1", "dir0/file1", "dir2/file1"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.get("b1", null, null, null, 1000, + (ListObjectResponse) endpoint.get("b1", null, null, null, 1000, null, null, null, null, null, null, null, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); @@ -420,20 +366,18 @@ public void testStartAfter() throws IOException, OS3Exception { String startAfter = "dir0/file1"; getBucketResponse = - (ListObjectResponse) getBucket.get("b1", null, null, null, + (ListObjectResponse) endpoint.get("b1", null, null, null, 1000, null, null, startAfter, null, null, null, null, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(4, getBucketResponse.getContents().size()); getBucketResponse = - (ListObjectResponse) getBucket.get("b1", null, null, null, + (ListObjectResponse) endpoint.get("b1", null, null, null, 1000, null, null, "random", null, null, null, null, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(0, getBucketResponse.getContents().size()); - - } @Test @@ -462,18 +406,16 @@ public void testEncodingType() throws IOException, OS3Exception { if encodingType == null , the = will not be encoded to "%3D" * */ - BucketEndpoint getBucket = new BucketEndpoint(); OzoneClient ozoneClient = createClientWithKeys("data=1970", "data==1970"); - getBucket.setClient(ozoneClient); - getBucket.setRequestIdentifier(new RequestIdentifier()); + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); String delimiter = "="; String prefix = "data="; String startAfter = "data="; String encodingType = ENCODING_TYPE; - ListObjectResponse response = (ListObjectResponse) getBucket.get( + ListObjectResponse response = (ListObjectResponse) endpoint.get( "b1", delimiter, encodingType, null, 1000, prefix, null, startAfter, null, null, null, null, 0).getEntity(); @@ -491,7 +433,7 @@ public void testEncodingType() throws IOException, OS3Exception { assertEquals(encodingType, response.getContents().get(0).getKey().getEncodingType()); - response = (ListObjectResponse) getBucket.get( + response = (ListObjectResponse) endpoint.get( "b1", delimiter, null, null, 1000, prefix, null, startAfter, null, null, null, null, 0).getEntity(); @@ -506,17 +448,14 @@ public void testEncodingType() throws IOException, OS3Exception { assertEncodingTypeObject(prefix + delimiter, null, response.getCommonPrefixes().get(0).getPrefix()); assertNull(response.getContents().get(0).getKey().getEncodingType()); - } @Test public void testEncodingTypeException() throws IOException { - BucketEndpoint getBucket = new BucketEndpoint(); OzoneClient client = new OzoneClientStub(); client.getObjectStore().createS3Bucket("b1"); - getBucket.setClient(client); - getBucket.setRequestIdentifier(new RequestIdentifier()); - OS3Exception e = assertThrows(OS3Exception.class, () -> getBucket.get( + BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); + OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get( "b1", null, "unSupportType", null, 1000, null, null, null, null, null, null, null, 0).getEntity()); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e.getCode()); @@ -526,7 +465,7 @@ public void testEncodingTypeException() throws IOException { public void testListObjectsWithNegativeMaxKeys() throws Exception { OzoneClient client = new OzoneClientStub(); client.getObjectStore().createS3Bucket("bucket"); - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + BucketEndpoint bucketEndpoint = newBucketEndpointBuilder() .setClient(client) .build(); @@ -542,7 +481,7 @@ public void testListObjectsWithNegativeMaxKeys() throws Exception { public void testListObjectsWithZeroMaxKeys() throws Exception { OzoneClient client = new OzoneClientStub(); client.getObjectStore().createS3Bucket("bucket"); - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + BucketEndpoint bucketEndpoint = newBucketEndpointBuilder() .setClient(client) .build(); @@ -558,7 +497,7 @@ public void testListObjectsWithZeroMaxKeys() throws Exception { @Test public void testListObjectsWithZeroMaxKeysInNonEmptyBucket() throws Exception { OzoneClient client = createClientWithKeys("file1", "file2", "file3", "file4", "file5"); - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + BucketEndpoint bucketEndpoint = newBucketEndpointBuilder() .setClient(client) .build(); @@ -588,11 +527,10 @@ public void testListObjectsRespectsConfiguredMaxKeysLimit() throws Exception { config.set(OZONE_S3G_LIST_MAX_KEYS_LIMIT, configuredMaxKeysLimit); // Arrange: Build and initialize the BucketEndpoint with the config - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + BucketEndpoint bucketEndpoint = newBucketEndpointBuilder() .setClient(client) .setConfig(config) .build(); - bucketEndpoint.init(); // Assert: Ensure the config value is correctly set in the endpoint assertEquals(configuredMaxKeysLimit, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index fd83523214ca..702c32d1abab 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -45,17 +45,14 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; -import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.signature.SignatureInfo; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -66,12 +63,9 @@ public class TestMultipartUploadWithCopy { - private static final ObjectEndpoint REST = new ObjectEndpoint(); - private static final String KEY = "key2"; private static final String EXISTING_KEY = "key1"; private static final String EXISTING_KEY_CONTENT = "testkey"; - private static final OzoneClient CLIENT = new OzoneClientStub(); private static final long DELAY_MS = 2000; private static String beforeSourceKeyModificationTimeStr; private static String afterSourceKeyModificationTimeStr; @@ -80,12 +74,16 @@ public class TestMultipartUploadWithCopy { private static final String ERROR_CODE = S3ErrorTable.PRECOND_FAILED.getCode(); + private static ObjectEndpoint endpoint; + private static OzoneClient client; + @BeforeAll public static void setUp() throws Exception { - CLIENT.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); OzoneBucket bucket = - CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET); + client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET); byte[] keyContent = EXISTING_KEY_CONTENT.getBytes(UTF_8); try (OutputStream stream = bucket @@ -99,7 +97,7 @@ public static void setUp() throws Exception { stream.write(keyContent); } - long sourceKeyLastModificationTime = CLIENT.getObjectStore() + long sourceKeyLastModificationTime = client.getObjectStore() .getS3Bucket(OzoneConsts.S3_BUCKET) .getKey(EXISTING_KEY) .getModificationTime().toEpochMilli(); @@ -125,13 +123,10 @@ public static void setUp() throws Exception { when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); - REST.setHeaders(headers); - REST.setClient(CLIENT); - REST.setOzoneConfiguration(new OzoneConfiguration()); - REST.setRequestIdentifier(new RequestIdentifier()); - SignatureInfo signatureInfo = mock(SignatureInfo.class); - when(signatureInfo.isSignPayload()).thenReturn(true); - REST.setSignatureInfo(signatureInfo); + endpoint = EndpointBuilder.newObjectEndpointBuilder() + .setHeaders(headers) + .setClient(client) + .build(); } @Test @@ -174,7 +169,7 @@ public void testMultipart() throws Exception { uploadID); OzoneBucket bucket = - CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET); + client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET); try (InputStream is = bucket.readKey(KEY)) { String keyContent = new Scanner(is, UTF_8.name()) .useDelimiter("\\A").next(); @@ -317,7 +312,7 @@ public void testMultipartTSHeaders() throws Exception { private String initiateMultipartUpload(String key) throws IOException, OS3Exception { setHeaders(); - Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + Response response = endpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); @@ -335,7 +330,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String setHeaders(); ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); - Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), + Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -380,7 +375,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, setHeaders(additionalHeaders); ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); - Response response = REST.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, + Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); @@ -408,9 +403,9 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); + endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); OzoneMultipartUploadPartListParts parts = - CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) + client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); assertEquals(1, parts.getPartInfoList().size()); assertEquals(4, parts.getPartInfoList().get(0).getSize()); @@ -420,7 +415,7 @@ private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { setHeaders(); - Response response = REST.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, + Response response = endpoint.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, uploadID, completeMultipartUploadRequest); assertEquals(200, response.getStatus()); @@ -445,7 +440,7 @@ private void setHeaders(Map additionalHeaders) { additionalHeaders .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); - REST.setHeaders(headers); + endpoint.setHeaders(headers); } private void setHeaders() { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index cb3e24472b05..488474e30390 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -124,6 +125,7 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { when(mockClient.getObjectStore()).thenReturn(mockObjectStore); when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockObjectStore.getClientProxy()).thenReturn(mock(ClientProtocol.class)); when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); ObjectEndpoint endpoint = EndpointBuilder.newObjectEndpointBuilder() diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index 02b71e8772c4..d1651d6b59c0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -170,6 +171,7 @@ public void testPutObjectTaggingNotImplemented() throws Exception { when(mockClient.getObjectStore()).thenReturn(mockObjectStore); when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockObjectStore.getClientProxy()).thenReturn(mock(ClientProtocol.class)); when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); ObjectEndpoint endpoint = EndpointBuilder.newObjectEndpointBuilder() diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 4b2d8a49efb9..736660073d57 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -72,8 +72,6 @@ public void setUp() throws Exception { .setClient(client) .setConfig(conf) .build(); - - rest.init(); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 81f6853bf73f..e422e4920792 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -93,6 +93,7 @@ public void setup() { clientProtocol = mock(ClientProtocol.class); S3GatewayMetrics.create(conf); when(client.getProxy()).thenReturn(clientProtocol); + when(objectStore.getClientProxy()).thenReturn(clientProtocol); } /** diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index 992b0dc2dc9d..dbe21601dbd3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -80,8 +80,6 @@ public void setUp() throws Exception { .setHeaders(headers) .setConfig(conf) .build(); - - rest.init(); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 5a72fad11b2e..cae9a9422885 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -78,6 +78,7 @@ public void setup() throws Exception { clientStub = new OzoneClientStub(); clientStub.getObjectStore().createS3Bucket(bucketName); bucket = clientStub.getObjectStore().getS3Bucket(bucketName); + bucket.createKey("file1", 0).close(); headers = mock(HttpHeaders.class); when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( @@ -132,8 +133,6 @@ public void testListBucketSuccess() throws Exception { public void testGetBucketSuccess() throws Exception { long oriMetric = metrics.getGetBucketSuccess(); - clientStub = createClientWithKeys("file1"); - bucketEndpoint.setClient(clientStub); bucketEndpoint.get(bucketName, null, null, null, 1000, null, null, "random", null, @@ -637,13 +636,6 @@ public void testDeleteObjectTaggingFailure() throws Exception { assertEquals(1L, curMetric - oriMetric); } - private OzoneClient createClientWithKeys(String... keys) throws IOException { - for (String key : keys) { - bucket.createKey(key, 0).close(); - } - return clientStub; - } - private String initiateMultipartUpload(String bktName, String key) throws IOException, OS3Exception { From 937c15726126757243d485946182b4b81250ca74 Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Thu, 18 Dec 2025 22:09:01 +0530 Subject: [PATCH 08/36] HDDS-14196. ContainerInfo.fromProtobuf creates two instances (#9523) --- .../org/apache/hadoop/hdds/scm/container/ContainerInfo.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index c1d3420c87c3..7a8f0b798b8a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -118,8 +118,7 @@ public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { .setContainerID(info.getContainerID()) .setDeleteTransactionId(info.getDeleteTransactionId()) .setReplicationConfig(config) - .setSequenceId(info.getSequenceId()) - .build(); + .setSequenceId(info.getSequenceId()); if (info.hasPipelineID()) { builder.setPipelineID(PipelineID.getFromProtobuf(info.getPipelineID())); From e4bc4fe827c3f172c5bb6be25005c6e255fb3d31 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 18 Dec 2025 19:28:52 +0100 Subject: [PATCH 09/36] HDDS-14204. Reduce duplication in auditing S3G requests (#9524) --- .../hadoop/ozone/audit/AuditMessage.java | 4 + .../ozone/s3/endpoint/BucketEndpoint.java | 78 +++++++------------ .../ozone/s3/endpoint/EndpointBase.java | 53 +++++++------ .../ozone/s3/endpoint/ObjectEndpoint.java | 47 ++++------- .../ozone/s3/endpoint/RootEndpoint.java | 10 +-- .../ozone/s3/TestS3GatewayAuditLog.java | 6 +- 6 files changed, 77 insertions(+), 121 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java index b5fd9656b7dc..d50ec6ac58ac 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java @@ -101,6 +101,10 @@ public Builder withParams(Map args) { return this; } + public Map getParams() { + return params; + } + public Builder withResult(AuditEventStatus result) { this.ret = result.getStatus(); return this; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 587347a9817c..d3b39369c3af 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -53,6 +53,8 @@ import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.audit.AuditEventStatus; +import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.audit.S3GAction; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; @@ -138,8 +140,7 @@ public Response get( s3GAction = S3GAction.GET_ACL; S3BucketAcl result = getAcl(bucketName); getMetrics().updateGetAclSuccessStats(startNanos); - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + auditReadSuccess(s3GAction); return Response.ok(result, MediaType.APPLICATION_XML_TYPE).build(); } @@ -175,8 +176,7 @@ public Response get( ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow); } catch (OMException ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditReadFailure(s3GAction, ex); getMetrics().updateGetBucketFailureStats(startNanos); if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); @@ -188,8 +188,7 @@ public Response get( } } catch (Exception ex) { getMetrics().updateGetBucketFailureStats(startNanos); - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditReadFailure(s3GAction, ex); throw ex; } @@ -298,8 +297,7 @@ public Response get( getMetrics().incListKeyCount(keyCount); perf.appendCount(keyCount); perf.appendOpLatencyNanos(opLatencyNs); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters(), perf)); + auditReadSuccess(s3GAction, perf); response.setKeyCount(keyCount); return Response.ok(response).build(); } @@ -320,28 +318,15 @@ public Response put(@PathParam("bucket") String bucketName, S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { - // Build map of query parameters - Map queryParams = new HashMap<>(); - queryParams.put("acl", aclMarker); - // Future handlers: queryParams.put("lifecycle", lifecycleMarker); - - // Check for subresource operations using handlers - String queryParam = HANDLER_FACTORY.findFirstSupportedQueryParam(queryParams); - - if (queryParam != null) { - BucketOperationHandler handler = HANDLER_FACTORY.getHandler(queryParam); - // Delegate to specific handler - s3GAction = getActionForQueryParam(queryParam); - Response response = handler.handlePutRequest( - bucketName, body, getHeaders(), getBucketContext(), startNanos); - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + if (aclMarker != null) { + s3GAction = S3GAction.PUT_ACL; + Response response = putAcl(bucketName, body); + auditWriteSuccess(s3GAction); return response; } String location = createS3Bucket(bucketName); - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + auditWriteSuccess(s3GAction); getMetrics().updateCreateBucketSuccessStats(startNanos); return Response.status(HttpStatus.SC_OK).header("Location", location) .build(); @@ -353,8 +338,7 @@ public Response put(@PathParam("bucket") String bucketName, } throw exception; } catch (Exception ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditWriteFailure(s3GAction, ex); throw ex; } } @@ -413,22 +397,18 @@ public Response listMultipartUploads( upload.getCreationTime(), S3StorageType.fromReplicationConfig(upload.getReplicationConfig()) ))); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters())); + auditReadSuccess(s3GAction); getMetrics().updateListMultipartUploadsSuccessStats(startNanos); return Response.ok(result).build(); } catch (OMException exception) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), - exception)); + auditReadFailure(s3GAction, exception); getMetrics().updateListMultipartUploadsFailureStats(startNanos); if (isAccessDenied(exception)) { throw newError(S3ErrorTable.ACCESS_DENIED, prefix, exception); } throw exception; } catch (Exception ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditReadFailure(s3GAction, ex); throw ex; } } @@ -447,13 +427,11 @@ public Response head(@PathParam("bucket") String bucketName) try { OzoneBucket bucket = getBucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + auditReadSuccess(s3GAction); getMetrics().updateHeadBucketSuccessStats(startNanos); return Response.ok().build(); } catch (Exception e) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), e)); + auditReadFailure(s3GAction, e); throw e; } } @@ -477,8 +455,7 @@ public Response delete(@PathParam("bucket") String bucketName) } deleteS3Bucket(bucketName); } catch (OMException ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditWriteFailure(s3GAction, ex); getMetrics().updateDeleteBucketFailureStats(startNanos); if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) { throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex); @@ -490,13 +467,11 @@ public Response delete(@PathParam("bucket") String bucketName) throw ex; } } catch (Exception ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditWriteFailure(s3GAction, ex); throw ex; } - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters())); + auditWriteSuccess(s3GAction); getMetrics().updateDeleteBucketSuccessStats(startNanos); return Response .status(HttpStatus.SC_NO_CONTENT) @@ -555,15 +530,16 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName, } } - Map auditMap = getAuditParameters(); - auditMap.put("failedDeletes", deleteKeys.toString()); + AuditMessage.Builder message = auditMessageFor(s3GAction); + message.getParams().put("failedDeletes", deleteKeys.toString()); + if (!result.getErrors().isEmpty()) { - AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, - auditMap, new Exception("MultiDelete Exception"))); + AUDIT.logWriteFailure(message.withResult(AuditEventStatus.FAILURE) + .withException(new Exception("MultiDelete Exception")).build()); } else { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(s3GAction, auditMap)); + AUDIT.logWriteSuccess(message.withResult(AuditEventStatus.SUCCESS).build()); } + return result; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 42ad2c6b38ab..6f41e2bee06d 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -60,7 +60,6 @@ import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import org.apache.hadoop.ozone.audit.AuditLoggerType; import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKey; @@ -84,7 +83,7 @@ /** * Basic helpers for all the REST endpoints. */ -public abstract class EndpointBase implements Auditor { +public abstract class EndpointBase { protected static final String ETAG_CUSTOM = "etag-custom"; @@ -107,7 +106,7 @@ public abstract class EndpointBase implements Auditor { @Context private HttpHeaders headers; - private Set excludeMetadataFields = + private final Set excludeMetadataFields = new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG, STORAGE_CONFIG_HEADER)); private static final Logger LOG = LoggerFactory.getLogger(EndpointBase.class); @@ -456,8 +455,8 @@ protected static Map validateAndGetTagging( return Collections.unmodifiableMap(tags); } - private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, - Map auditMap) { + protected AuditMessage.Builder auditMessageFor(AuditAction op) { + Map auditMap = getAuditParameters(); auditMap.put("x-amz-request-id", requestIdentifier.getRequestId()); auditMap.put("x-amz-id-2", requestIdentifier.getAmzId()); @@ -475,29 +474,15 @@ private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, return builder; } - @Override - public AuditMessage buildAuditMessageForSuccess(AuditAction op, - Map auditMap) { - AuditMessage.Builder builder = auditMessageBaseBuilder(op, auditMap) + protected AuditMessage.Builder auditMessageForSuccess(AuditAction op) { + return auditMessageFor(op) .withResult(AuditEventStatus.SUCCESS); - return builder.build(); } - public AuditMessage buildAuditMessageForSuccess(AuditAction op, - Map auditMap, PerformanceStringBuilder performance) { - AuditMessage.Builder builder = auditMessageBaseBuilder(op, auditMap) - .withResult(AuditEventStatus.SUCCESS); - builder.setPerformance(performance); - return builder.build(); - } - - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, - Map auditMap, Throwable throwable) { - AuditMessage.Builder builder = auditMessageBaseBuilder(op, auditMap) + protected AuditMessage.Builder auditMessageForFailure(AuditAction op, Throwable throwable) { + return auditMessageFor(op) .withResult(AuditEventStatus.FAILURE) .withException(throwable); - return builder.build(); } @VisibleForTesting @@ -556,14 +541,28 @@ protected Map getAuditParameters() { return AuditUtils.getAuditParameters(context); } + protected void auditWriteSuccess(AuditAction action, PerformanceStringBuilder perf) { + AUDIT.logWriteSuccess(auditMessageForSuccess(action).setPerformance(perf).build()); + } + + protected void auditWriteSuccess(AuditAction action) { + AUDIT.logWriteSuccess(auditMessageForSuccess(action).build()); + } + + protected void auditReadSuccess(AuditAction action, PerformanceStringBuilder perf) { + AUDIT.logReadSuccess(auditMessageForSuccess(action).setPerformance(perf).build()); + } + + protected void auditReadSuccess(AuditAction action) { + AUDIT.logReadSuccess(auditMessageForSuccess(action).build()); + } + protected void auditWriteFailure(AuditAction action, Throwable ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(action, getAuditParameters(), ex)); + AUDIT.logWriteFailure(auditMessageForFailure(action, ex).build()); } protected void auditReadFailure(AuditAction action, Exception ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(action, getAuditParameters(), ex)); + AUDIT.logReadFailure(auditMessageForFailure(action, ex).build()); } protected boolean isAccessDenied(OMException ex) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 4799570a69e1..3154ad74d017 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -387,8 +387,7 @@ public Response put( if (auditSuccess) { long opLatencyNs = getMetrics().updateCreateKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters(), perf)); + auditWriteSuccess(s3GAction, perf); } } } @@ -430,8 +429,7 @@ public Response get( int partMarker = parsePartNumberMarker(partNumberMarker); Response response = listParts(bucket, keyPath, uploadId, partMarker, maxParts, perf); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters(), perf)); + auditReadSuccess(s3GAction, perf); return response; } @@ -469,8 +467,7 @@ public Response get( } long opLatencyNs = getMetrics().updateGetKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(S3GAction.GET_KEY, - getAuditParameters(), perf)); + auditReadSuccess(S3GAction.GET_KEY, perf); }; responseBuilder = Response .ok(output) @@ -493,8 +490,7 @@ public Response get( } long opLatencyNs = getMetrics().updateGetKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(S3GAction.GET_KEY, - getAuditParameters(), perf)); + auditReadSuccess(S3GAction.GET_KEY, perf); }; responseBuilder = Response .status(Status.PARTIAL_CONTENT) @@ -549,9 +545,7 @@ public Response get( perf.appendMetaLatencyNanos(metadataLatencyNs); return responseBuilder.build(); } catch (OMException ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex) - ); + auditReadFailure(s3GAction, ex); if (taggingMarker != null) { getMetrics().updateGetObjectTaggingFailureStats(startNanos); } else if (uploadId != null) { @@ -569,9 +563,7 @@ public Response get( throw ex; } } catch (Exception ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex) - ); + auditReadFailure(s3GAction, ex); throw ex; } } @@ -622,8 +614,7 @@ public Response head( isFile(keyPath, key); // TODO: return the specified range bytes of this object. } catch (OMException ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditReadFailure(s3GAction, ex); getMetrics().updateHeadKeyFailureStats(startNanos); if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { // Just return 404 with no content @@ -636,8 +627,7 @@ public Response head( throw ex; } } catch (Exception ex) { - AUDIT.logReadFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditReadFailure(s3GAction, ex); throw ex; } @@ -665,8 +655,7 @@ public Response head( addLastModifiedDate(response, key); addCustomMetadataHeaders(response, key); getMetrics().updateHeadKeySuccessStats(startNanos); - AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters())); + auditReadSuccess(s3GAction); return response.build(); } @@ -754,8 +743,7 @@ public Response delete( getClientProtocol().deleteKey(volume.getName(), bucketName, keyPath, false); } catch (OMException ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditWriteFailure(s3GAction, ex); if (uploadId != null && !uploadId.equals("")) { getMetrics().updateAbortMultipartUploadFailureStats(startNanos); } else { @@ -780,8 +768,7 @@ public Response delete( throw ex; } } catch (Exception ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditWriteFailure(s3GAction, ex); if (taggingMarker != null) { getMetrics().updateDeleteObjectTaggingFailureStats(startNanos); } else if (uploadId != null && !uploadId.equals("")) { @@ -792,8 +779,7 @@ public Response delete( throw ex; } getMetrics().updateDeleteKeySuccessStats(startNanos); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, - getAuditParameters())); + auditWriteSuccess(s3GAction); return Response .status(Status.NO_CONTENT) .build(); @@ -839,8 +825,7 @@ public Response initializeMultipartUpload( multipartUploadInitiateResponse.setKey(key); multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID()); - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + auditWriteSuccess(s3GAction); getMetrics().updateInitMultipartUploadSuccessStats(startNanos); return Response.status(Status.OK).entity( multipartUploadInitiateResponse).build(); @@ -852,8 +837,7 @@ public Response initializeMultipartUpload( } throw ex; } catch (Exception ex) { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); + auditWriteFailure(s3GAction, ex); getMetrics().updateInitMultipartUploadFailureStats(startNanos); throw ex; } @@ -908,8 +892,7 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, wrapInQuotes(omMultipartUploadCompleteInfo.getHash())); // Location also setting as bucket name. completeMultipartUploadResponse.setLocation(bucket); - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + auditWriteSuccess(s3GAction); getMetrics().updateCompleteMultipartUploadSuccessStats(startNanos); return Response.status(Status.OK).entity(completeMultipartUploadResponse) .build(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java index 9ff2e945cda3..6c64df079cde 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java @@ -73,17 +73,11 @@ public Response get() return Response.ok(response).build(); } catch (Exception ex) { auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(S3GAction.LIST_S3_BUCKETS, - getAuditParameters(), ex) - ); + auditReadFailure(S3GAction.LIST_S3_BUCKETS, ex); throw ex; } finally { if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(S3GAction.LIST_S3_BUCKETS, - getAuditParameters()) - ); + auditReadSuccess(S3GAction.LIST_S3_BUCKETS); } } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java index 411452dec624..8709fa1f6336 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java @@ -120,7 +120,7 @@ public void testHeadBucket() throws Exception { bucketEndpoint.head(bucketName); - String expected = "INFO | S3GAudit | org.apache.hadoop.ozone.s3.endpoint.BucketEndpoint | user=null | ip=null | " + + String expected = "INFO | S3GAudit | org.apache.hadoop.ozone.s3.endpoint.EndpointBase | user=null | ip=null | " + "op=HEAD_BUCKET {\"bucket\":\"[bucket]\",\"x-amz-request-id\":\"" + requestIdentifier.getRequestId() + "\",\"x-amz-id-2\":\"" + requestIdentifier.getAmzId() + "\"} | ret=SUCCESS"; @@ -131,7 +131,7 @@ public void testHeadBucket() throws Exception { public void testListBucket() throws Exception { rootEndpoint.get().getEntity(); - String expected = "INFO | S3GAudit | org.apache.hadoop.ozone.s3.endpoint.RootEndpoint | user=null | ip=null | " + + String expected = "INFO | S3GAudit | org.apache.hadoop.ozone.s3.endpoint.EndpointBase | user=null | ip=null | " + "op=LIST_S3_BUCKETS {\"x-amz-request-id\":\"" + requestIdentifier.getRequestId() + "\",\"x-amz-id-2\":\"" + requestIdentifier.getAmzId() + "\"} | ret=SUCCESS"; @@ -152,7 +152,7 @@ public void testHeadObject() throws Exception { parametersMap.put("path", "[key1]"); keyEndpoint.head(bucketName, "key1"); - String expected = "INFO | S3GAudit | org.apache.hadoop.ozone.s3.endpoint.ObjectEndpoint | user=null | ip=null | " + + String expected = "INFO | S3GAudit | org.apache.hadoop.ozone.s3.endpoint.EndpointBase | user=null | ip=null | " + "op=HEAD_KEY {\"bucket\":\"[bucket]\",\"path\":\"[key1]\",\"x-amz-request-id\":\"" + requestIdentifier.getRequestId() + "\",\"x-amz-id-2\":\"" + requestIdentifier.getAmzId() + "\"} | ret=SUCCESS"; From 4781236dbed054029cd24c4bcb8c2262d0733e35 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 19 Dec 2025 08:13:42 +0100 Subject: [PATCH 10/36] HDDS-14208. Create constants for query parameter names (#9525) --- .../ozone/s3/endpoint/BucketEndpoint.java | 51 +++++---- .../ozone/s3/endpoint/ObjectEndpoint.java | 107 +++++++++--------- .../s3/endpoint/ObjectEndpointStreaming.java | 3 +- .../apache/hadoop/ozone/s3/util/S3Consts.java | 26 +++++ 4 files changed, 112 insertions(+), 75 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index d3b39369c3af..dd2bf6b45f0f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -71,6 +71,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.ContinueToken; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.s3.util.S3StorageType; import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; @@ -83,6 +84,8 @@ @Path("/{bucket}") public class BucketEndpoint extends EndpointBase { + private static final String BUCKET = "bucket"; + private static final Logger LOG = LoggerFactory.getLogger(BucketEndpoint.class); @@ -113,19 +116,20 @@ private BucketEndpointContext getBucketContext() { @GET @SuppressWarnings({"parameternumber", "methodlength"}) public Response get( - @PathParam("bucket") String bucketName, - @QueryParam("delimiter") String delimiter, - @QueryParam("encoding-type") String encodingType, - @QueryParam("marker") String marker, - @DefaultValue("1000") @QueryParam("max-keys") int maxKeys, - @QueryParam("prefix") String prefix, - @QueryParam("continuation-token") String continueToken, - @QueryParam("start-after") String startAfter, - @QueryParam("uploads") String uploads, - @QueryParam("acl") String aclMarker, - @QueryParam("key-marker") String keyMarker, - @QueryParam("upload-id-marker") String uploadIdMarker, - @DefaultValue("1000") @QueryParam("max-uploads") int maxUploads) throws OS3Exception, IOException { + @PathParam(BUCKET) String bucketName, + @QueryParam(QueryParams.DELIMITER) String delimiter, + @QueryParam(QueryParams.ENCODING_TYPE) String encodingType, + @QueryParam(QueryParams.MARKER) String marker, + @DefaultValue("1000") @QueryParam(QueryParams.MAX_KEYS) int maxKeys, + @QueryParam(QueryParams.PREFIX) String prefix, + @QueryParam(QueryParams.CONTINUATION_TOKEN) String continueToken, + @QueryParam(QueryParams.START_AFTER) String startAfter, + @QueryParam(QueryParams.UPLOADS) String uploads, + @QueryParam(QueryParams.ACL) String aclMarker, + @QueryParam(QueryParams.KEY_MARKER) String keyMarker, + @QueryParam(QueryParams.UPLOAD_ID_MARKER) String uploadIdMarker, + @DefaultValue("1000") @QueryParam(QueryParams.MAX_UPLOADS) int maxUploads + ) throws OS3Exception, IOException { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_BUCKET; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -311,9 +315,11 @@ private int validateMaxKeys(int maxKeys) throws OS3Exception { } @PUT - public Response put(@PathParam("bucket") String bucketName, - @QueryParam("acl") String aclMarker, - InputStream body) throws IOException, OS3Exception { + public Response put( + @PathParam(BUCKET) String bucketName, + @QueryParam(QueryParams.ACL) String aclMarker, + InputStream body + ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_BUCKET; @@ -420,7 +426,7 @@ public Response listMultipartUploads( * for more details. */ @HEAD - public Response head(@PathParam("bucket") String bucketName) + public Response head(@PathParam(BUCKET) String bucketName) throws OS3Exception, IOException { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.HEAD_BUCKET; @@ -443,7 +449,7 @@ public Response head(@PathParam("bucket") String bucketName) * for more details. */ @DELETE - public Response delete(@PathParam("bucket") String bucketName) + public Response delete(@PathParam(BUCKET) String bucketName) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_BUCKET; @@ -487,10 +493,11 @@ public Response delete(@PathParam("bucket") String bucketName) */ @POST @Produces(MediaType.APPLICATION_XML) - public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName, - @QueryParam("delete") String delete, - MultiDeleteRequest request) - throws OS3Exception, IOException { + public MultiDeleteResponse multiDelete( + @PathParam(BUCKET) String bucketName, + @QueryParam(QueryParams.DELETE) String delete, + MultiDeleteRequest request + ) throws OS3Exception, IOException { S3GAction s3GAction = S3GAction.MULTI_DELETE; OzoneBucket bucket = getBucket(bucketName); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 3154ad74d017..c6a2b6539098 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -17,9 +17,6 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; -import static javax.ws.rs.core.HttpHeaders.ETAG; -import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; @@ -94,6 +91,7 @@ import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; @@ -135,6 +133,7 @@ import org.apache.hadoop.ozone.s3.util.RangeHeader; import org.apache.hadoop.ozone.s3.util.RangeHeaderParserUtil; import org.apache.hadoop.ozone.s3.util.S3Consts; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.s3.util.S3StorageType; import org.apache.hadoop.ozone.s3.util.S3Utils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; @@ -149,6 +148,9 @@ @Path("/{bucket}/{path:.+}") public class ObjectEndpoint extends EndpointBase { + private static final String BUCKET = "bucket"; + private static final String PATH = "path"; + private static final Logger LOG = LoggerFactory.getLogger(ObjectEndpoint.class); @@ -174,12 +176,12 @@ public class ObjectEndpoint extends EndpointBase { public ObjectEndpoint() { overrideQueryParameter = ImmutableMap.builder() - .put("Content-Type", "response-content-type") - .put("Content-Language", "response-content-language") - .put("Expires", "response-expires") - .put("Cache-Control", "response-cache-control") - .put("Content-Disposition", "response-content-disposition") - .put("Content-Encoding", "response-content-encoding") + .put(HttpHeaders.CONTENT_TYPE, "response-content-type") + .put(HttpHeaders.CONTENT_LANGUAGE, "response-content-language") + .put(HttpHeaders.EXPIRES, "response-expires") + .put(HttpHeaders.CACHE_CONTROL, "response-cache-control") + .put(HttpHeaders.CONTENT_DISPOSITION, "response-content-disposition") + .put(HttpHeaders.CONTENT_ENCODING, "response-content-encoding") .build(); } @@ -210,13 +212,13 @@ public void init() { @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @PUT public Response put( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath, - @HeaderParam("Content-Length") long length, - @QueryParam("partNumber") int partNumber, - @QueryParam("uploadId") @DefaultValue("") String uploadID, - @QueryParam("tagging") String taggingMarker, - @QueryParam("acl") String aclMarker, + @PathParam(BUCKET) String bucketName, + @PathParam(PATH) String keyPath, + @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, + @QueryParam(QueryParams.PART_NUMBER) int partNumber, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, + @QueryParam(QueryParams.TAGGING) String taggingMarker, + @QueryParam(QueryParams.ACL) String aclMarker, final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; @@ -328,13 +330,13 @@ public Response put( eTag = DatatypeConverter.printHexBinary( digestInputStream.getMessageDigest().digest()) .toLowerCase(); - output.getMetadata().put(ETAG, eTag); + output.getMetadata().put(OzoneConsts.ETAG, eTag); } } getMetrics().incPutKeySuccessLength(putLength); perf.appendSizeBytes(putLength); return Response.ok() - .header(ETAG, wrapInQuotes(eTag)) + .header(HttpHeaders.ETAG, wrapInQuotes(eTag)) .status(HttpStatus.SC_OK) .build(); } catch (OMException ex) { @@ -404,13 +406,13 @@ public Response put( @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @GET public Response get( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath, - @QueryParam("partNumber") int partNumber, - @QueryParam("uploadId") String uploadId, - @QueryParam("max-parts") @DefaultValue("1000") int maxParts, - @QueryParam("part-number-marker") String partNumberMarker, - @QueryParam("tagging") String taggingMarker) + @PathParam(BUCKET) String bucketName, + @PathParam(PATH) String keyPath, + @QueryParam(QueryParams.PART_NUMBER) int partNumber, + @QueryParam(QueryParams.UPLOAD_ID) String uploadId, + @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, + @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, + @QueryParam(QueryParams.TAGGING) String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; @@ -471,7 +473,7 @@ public Response get( }; responseBuilder = Response .ok(output) - .header(CONTENT_LENGTH, keyDetails.getDataSize()); + .header(HttpHeaders.CONTENT_LENGTH, keyDetails.getDataSize()); } else { @@ -495,7 +497,7 @@ public Response get( responseBuilder = Response .status(Status.PARTIAL_CONTENT) .entity(output) - .header(CONTENT_LENGTH, copyLength); + .header(HttpHeaders.CONTENT_LENGTH, copyLength); String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " + rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() + @@ -506,9 +508,9 @@ public Response get( responseBuilder .header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT); - String eTag = keyDetails.getMetadata().get(ETAG); + String eTag = keyDetails.getMetadata().get(OzoneConsts.ETAG); if (eTag != null) { - responseBuilder.header(ETAG, wrapInQuotes(eTag)); + responseBuilder.header(HttpHeaders.ETAG, wrapInQuotes(eTag)); String partsCount = extractPartsCount(eTag); if (partsCount != null) { responseBuilder.header(MP_PARTS_COUNT, partsCount); @@ -575,7 +577,7 @@ static void addLastModifiedDate( .atZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE)); responseBuilder - .header(LAST_MODIFIED, + .header(HttpHeaders.LAST_MODIFIED, RFC1123Util.FORMAT.format(lastModificationTime)); } @@ -598,8 +600,8 @@ static void addTagCountIfAny( */ @HEAD public Response head( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath) throws IOException, OS3Exception { + @PathParam(BUCKET) String bucketName, + @PathParam(PATH) String keyPath) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.HEAD_KEY; @@ -636,16 +638,16 @@ public Response head( S3StorageType.fromReplicationConfig(key.getReplicationConfig()); ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK) - .header("Content-Length", key.getDataSize()) - .header("Content-Type", "binary/octet-stream") + .header(HttpHeaders.CONTENT_LENGTH, key.getDataSize()) + .header(HttpHeaders.CONTENT_TYPE, "binary/octet-stream") .header(STORAGE_CLASS_HEADER, s3StorageType.toString()); - String eTag = key.getMetadata().get(ETAG); + String eTag = key.getMetadata().get(OzoneConsts.ETAG); if (eTag != null) { // Should not return ETag header if the ETag is not set // doing so will result in "null" string being returned instead // which breaks some AWS SDK implementation - response.header(ETAG, wrapInQuotes(eTag)); + response.header(HttpHeaders.ETAG, wrapInQuotes(eTag)); String partsCount = extractPartsCount(eTag); if (partsCount != null) { response.header(MP_PARTS_COUNT, partsCount); @@ -717,10 +719,10 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @DELETE @SuppressWarnings("emptyblock") public Response delete( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath, - @QueryParam("uploadId") @DefaultValue("") String uploadId, - @QueryParam("tagging") String taggingMarker) throws + @PathParam(BUCKET) String bucketName, + @PathParam(PATH) String keyPath, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, + @QueryParam(QueryParams.TAGGING) String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -794,8 +796,8 @@ public Response delete( @Produces(MediaType.APPLICATION_XML) @Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER) public Response initializeMultipartUpload( - @PathParam("bucket") String bucket, - @PathParam("path") String key + @PathParam(BUCKET) String bucket, + @PathParam(PATH) String key ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); @@ -858,9 +860,10 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, */ @POST @Produces(MediaType.APPLICATION_XML) - public Response completeMultipartUpload(@PathParam("bucket") String bucket, - @PathParam("path") String key, - @QueryParam("uploadId") @DefaultValue("") String uploadID, + public Response completeMultipartUpload( + @PathParam(BUCKET) String bucket, + @PathParam(PATH) String key, + @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); @@ -1025,9 +1028,9 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, new byte[getIOBufferSize(length)]); ozoneOutputStream.getMetadata() .putAll(sourceKeyDetails.getMetadata()); - String raw = ozoneOutputStream.getMetadata().get(ETAG); + String raw = ozoneOutputStream.getMetadata().get(OzoneConsts.ETAG); if (raw != null) { - ozoneOutputStream.getMetadata().put(ETAG, stripQuotes(raw)); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, stripQuotes(raw)); } outputStream = ozoneOutputStream; } @@ -1045,7 +1048,7 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, new byte[getIOBufferSize(length)]); byte[] digest = digestInputStream.getMessageDigest().digest(); ozoneOutputStream.getMetadata() - .put(ETAG, DatatypeConverter.printHexBinary(digest).toLowerCase()); + .put(OzoneConsts.ETAG, DatatypeConverter.printHexBinary(digest).toLowerCase()); outputStream = ozoneOutputStream; } getMetrics().incPutKeySuccessLength(putLength); @@ -1069,7 +1072,7 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, return Response.ok(new CopyPartResult(eTag)).build(); } else { getMetrics().updateCreateMultipartKeySuccessStats(startNanos); - return Response.ok().header(ETAG, eTag).build(); + return Response.ok().header(HttpHeaders.ETAG, eTag).build(); } } catch (OMException ex) { @@ -1189,7 +1192,7 @@ void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest, 0, srcKeyLen, new byte[getIOBufferSize(srcKeyLen)]); String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); - dest.getMetadata().put(ETAG, eTag); + dest.getMetadata().put(OzoneConsts.ETAG, eTag); } } getMetrics().incCopyObjectSuccessLength(copyLength); @@ -1236,7 +1239,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, // still does not support this just returning dummy response // for now CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(wrapInQuotes(sourceKeyDetails.getMetadata().get(ETAG))); + copyObjectResponse.setETag(wrapInQuotes(sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG))); copyObjectResponse.setLastModified(Instant.ofEpochMilli( Time.now())); return copyObjectResponse; @@ -1291,7 +1294,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, getMetrics().updateCopyObjectSuccessStats(startNanos); CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(ETAG))); + copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(OzoneConsts.ETAG))); copyObjectResponse.setLastModified(destKeyDetails.getModificationTime()); return copyObjectResponse; } catch (OMException ex) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index 647aafe839cb..186719c2b784 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -27,6 +27,7 @@ import java.nio.ByteBuffer; import java.security.DigestInputStream; import java.util.Map; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.tuple.Pair; @@ -191,7 +192,7 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, throw ex; } return Response.ok() - .header(OzoneConsts.ETAG, wrapInQuotes(eTag)) + .header(HttpHeaders.ETAG, wrapInQuotes(eTag)) .build(); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index f41013755030..04038bb81cef 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -109,4 +109,30 @@ public enum CopyDirective { REPLACE } + /** Constants for query parameters. */ + public static final class QueryParams { + public static final String ACL = "acl"; + public static final String CONTINUATION_TOKEN = "continuation-token"; + public static final String DELETE = "delete"; + public static final String DELIMITER = "delimiter"; + public static final String ENCODING_TYPE = "encoding-type"; + public static final String KEY_MARKER = "key-marker"; + public static final String MARKER = "marker"; + public static final String MAX_KEYS = "max-keys"; + public static final String MAX_PARTS = "max-parts"; + public static final String MAX_UPLOADS = "max-uploads"; + public static final String PART_NUMBER = "partNumber"; + public static final String PART_NUMBER_MARKER = "part-number-marker"; + public static final String PREFIX = "prefix"; + public static final String START_AFTER = "start-after"; + public static final String TAGGING = "tagging"; + public static final String UPLOAD_ID = "uploadId"; + public static final String UPLOAD_ID_MARKER = "upload-id-marker"; + public static final String UPLOADS = "uploads"; + + private QueryParams() { + // no instances + } + } + } From 7ee73d067e05636d40ff4b7ec41867cbacb36c14 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:28:06 +0100 Subject: [PATCH 11/36] HDDS-14197. Add utility for generating unique object name in tests (#9517) --- .../org/apache/ozone/test/OzoneTestBase.java | 16 ++++++++++++++++ .../ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java | 9 ++++----- .../ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java | 9 ++++----- .../hadoop/fs/ozone/TestLeaseRecovery.java | 7 ++----- .../hadoop/ozone/om/TestBucketManagerImpl.java | 2 +- .../ozone/om/service/TestKeyDeletingService.java | 4 ---- 6 files changed, 27 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/OzoneTestBase.java b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/OzoneTestBase.java index 67446b27692a..93ded5dd880e 100644 --- a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/OzoneTestBase.java +++ b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/OzoneTestBase.java @@ -18,6 +18,9 @@ package org.apache.ozone.test; import java.lang.reflect.Method; +import java.util.Locale; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.TestInfo; @@ -27,6 +30,7 @@ */ public abstract class OzoneTestBase { + private static final AtomicInteger OBJECT_COUNTER = new AtomicInteger(); private TestInfo info; @BeforeEach @@ -40,4 +44,16 @@ protected String getTestName() { .orElse("unknown"); } + /** @return unique lowercase name of maximum 60 characters, including (some of) the current test's name */ + protected String uniqueObjectName() { + return uniqueObjectName(getTestName()); + } + + /** @return unique lowercase name of maximum 60 characters, including (some of) {@code prefix} */ + public static String uniqueObjectName(String prefix) { + return Objects.requireNonNull(prefix, "prefix == null") + .substring(0, Math.min(prefix.length(), 50)) + .toLowerCase(Locale.ROOT) + + String.format("%010d", OBJECT_COUNTER.getAndIncrement()); + } } diff --git a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index 93fda7b26004..5e200e1350ae 100644 --- a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -94,7 +94,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -1424,16 +1423,16 @@ private String getBucketName() { return getBucketName(""); } - private String getBucketName(String suffix) { - return ("v1-" + getTestName() + "bucket" + suffix).toLowerCase(Locale.ROOT); + private String getBucketName(String ignored) { + return uniqueObjectName(); } private String getKeyName() { return getKeyName(""); } - private String getKeyName(String suffix) { - return (getTestName() + "key" + suffix).toLowerCase(Locale.ROOT); + private String getKeyName(String ignored) { + return uniqueObjectName(); } private String multipartUpload(String bucketName, String key, File file, long partSize, String contentType, diff --git a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java index 7bdc1778289e..09026dcb9182 100644 --- a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java +++ b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java @@ -47,7 +47,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; @@ -1066,16 +1065,16 @@ private String getBucketName() { return getBucketName(""); } - private String getBucketName(String suffix) { - return ("v2-" + getTestName() + "bucket" + suffix).toLowerCase(Locale.ROOT); + private String getBucketName(String ignored) { + return uniqueObjectName(); } private String getKeyName() { return getKeyName(""); } - private String getKeyName(String suffix) { - return (getTestName() + "key" + suffix).toLowerCase(Locale.ROOT); + private String getKeyName(String ignored) { + return uniqueObjectName(); } private String multipartUpload(String bucketName, String key, File file, int partSize, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index 800cc04f476a..061ced5bd93f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -42,7 +42,6 @@ import java.util.LinkedList; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; @@ -96,8 +95,6 @@ @TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class TestLeaseRecovery extends OzoneTestBase { - private static final AtomicInteger FILE_COUNTER = new AtomicInteger(); - private MiniOzoneCluster cluster; private OzoneClient client; @@ -176,7 +173,7 @@ public void init() throws IOException, InterruptedException, @BeforeEach void beforeEach() throws Exception { - file = new Path(dir, "file-" + getTestName() + "-" + FILE_COUNTER.incrementAndGet()); + file = new Path(dir, uniqueObjectName()); fs = (RootedOzoneFileSystem) FileSystem.get(conf); } @@ -266,7 +263,7 @@ public void testOBSRecoveryShouldFail() throws Exception { OzoneBucket obsBucket = TestDataUtil.createVolumeAndBucket(client, "vol2", "obs", BucketLayout.OBJECT_STORE); String obsDir = OZONE_ROOT + obsBucket.getVolumeName() + OZONE_URI_DELIMITER + obsBucket.getName(); - Path obsFile = new Path(obsDir, "file" + getTestName() + FILE_COUNTER.incrementAndGet()); + Path obsFile = new Path(obsDir, uniqueObjectName()); assertThrows(IllegalArgumentException.class, () -> fs.recoverLease(obsFile)); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index 5b742c4b22e7..42d748607ac4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -86,7 +86,7 @@ void cleanup() throws Exception { } public String volumeName() { - return getTestName().toLowerCase(); + return uniqueObjectName(); } private void createSampleVol(String volume) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 73cf8c2cf2f6..a01c6b89f077 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -1389,8 +1389,4 @@ private long countBlocksPendingDeletion() { throw new UncheckedIOException(e); } } - - private static String uniqueObjectName(String prefix) { - return prefix + String.format("%010d", OBJECT_COUNTER.getAndIncrement()); - } } From 559037c6df4c3423c8079a219f54b14ce73d973a Mon Sep 17 00:00:00 2001 From: "Eric C. Ho" Date: Fri, 19 Dec 2025 19:24:51 +0800 Subject: [PATCH 12/36] HDDS-13305. Create wrapper object for container checksums (#8789) --- .../scm/container/ContainerChecksums.java | 87 +++++++++++++++++++ .../scm/container/TestContainerChecksums.java | 55 ++++++++++++ .../AbstractContainerReportHandler.java | 2 +- .../hdds/scm/container/ContainerReplica.java | 24 +++-- ...groundContainerDataScannerIntegration.java | 18 ++-- .../recon/fsck/ContainerHealthStatus.java | 4 +- .../ozone/recon/fsck/ContainerHealthTask.java | 8 +- .../recon/scm/ContainerReplicaHistory.java | 23 +++-- .../recon/scm/ReconContainerManager.java | 19 ++-- .../recon/api/TestContainerEndpoint.java | 19 ++-- .../recon/fsck/TestContainerHealthStatus.java | 9 +- .../recon/fsck/TestContainerHealthTask.java | 5 +- ...estContainerHealthTaskRecordGenerator.java | 5 +- .../recon/scm/TestReconContainerManager.java | 7 +- 14 files changed, 226 insertions(+), 59 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java new file mode 100644 index 000000000000..ef089a87d17e --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerChecksums.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * Wrapper for container checksums (data, metadata, etc.). + * Provides equality, hash, and hex string rendering. + * A value of 0 indicates an unknown or unset checksum. + */ +@Immutable +public final class ContainerChecksums { + // Checksum of the data within the wrapper. + private final long dataChecksum; + + // Checksum of the metadata within the wrapper. + private final long metadataChecksum; + + private static final ContainerChecksums UNKNOWN = + new ContainerChecksums(0L, 0L); + + private ContainerChecksums(long dataChecksum, long metadataChecksum) { + this.dataChecksum = dataChecksum; + this.metadataChecksum = metadataChecksum; + } + + public static ContainerChecksums unknown() { + return UNKNOWN; + } + + public static ContainerChecksums of(long dataChecksum) { + return new ContainerChecksums(dataChecksum, 0L); + } + + public static ContainerChecksums of(long dataChecksum, long metadataChecksum) { + return new ContainerChecksums(dataChecksum, metadataChecksum); + } + + public long getDataChecksum() { + return dataChecksum; + } + + public long getMetadataChecksum() { + return metadataChecksum; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof ContainerChecksums)) { + return false; + } + ContainerChecksums that = (ContainerChecksums) obj; + return dataChecksum == that.dataChecksum && + metadataChecksum == that.metadataChecksum; + } + + @Override + public int hashCode() { + return Objects.hash(dataChecksum, metadataChecksum); + } + + @Override + public String toString() { + return "data=" + Long.toHexString(getDataChecksum()) + + ", metadata=" + Long.toHexString(getMetadataChecksum()); + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java new file mode 100644 index 000000000000..6e9d34c5562b --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerChecksums.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +import org.junit.jupiter.api.Test; + +class TestContainerChecksums { + @Test + void testEqualsAndHashCode() { + ContainerChecksums c1 = ContainerChecksums.of(123L, 0L); + ContainerChecksums c2 = ContainerChecksums.of(123L, 0L); + ContainerChecksums c3 = ContainerChecksums.of(456L, 0L); + ContainerChecksums c4 = ContainerChecksums.of(123L, 789L); + ContainerChecksums c5 = ContainerChecksums.of(123L, 789L); + ContainerChecksums c6 = ContainerChecksums.of(123L, 790L); + + assertEquals(c1, c2); + assertEquals(c1.hashCode(), c2.hashCode()); + assertNotEquals(c1, c3); + assertNotEquals(c1, c4); + assertEquals(c4, c5); + assertNotEquals(c4, c6); + } + + @Test + void testToString() { + ContainerChecksums c1 = ContainerChecksums.of(0x1234ABCDL, 0L); + assertThat(c1.toString()).contains("data=1234abcd", "metadata=0"); + + ContainerChecksums c2 = ContainerChecksums.of(0x1234ABCDL, 0xDEADBEEFL); + assertThat(c2.toString()).contains("data=1234abcd").contains("metadata=deadbeef"); + + ContainerChecksums c3 = ContainerChecksums.unknown(); + assertThat(c3.toString()).contains("data=0").contains("metadata=0"); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java index 7f2030f6e74b..35908afff877 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java @@ -362,7 +362,7 @@ private void updateContainerReplica(final DatanodeDetails datanodeDetails, .setReplicaIndex(replicaProto.getReplicaIndex()) .setBytesUsed(replicaProto.getUsed()) .setEmpty(replicaProto.getIsEmpty()) - .setDataChecksum(replicaProto.getDataChecksum()) + .setChecksums(ContainerChecksums.of(replicaProto.getDataChecksum())) .build(); if (replica.getState().equals(State.DELETED)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java index 6cba1e8f1c31..5c9bd57cd881 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java @@ -45,7 +45,7 @@ public final class ContainerReplica implements Comparable { private final long keyCount; private final long bytesUsed; private final boolean isEmpty; - private final long dataChecksum; + private final ContainerChecksums checksums; private ContainerReplica(ContainerReplicaBuilder b) { this.containerID = Objects.requireNonNull(b.containerID, "containerID == null"); @@ -57,7 +57,7 @@ private ContainerReplica(ContainerReplicaBuilder b) { this.replicaIndex = b.replicaIndex; this.isEmpty = b.isEmpty; this.sequenceId = b.sequenceId; - this.dataChecksum = b.dataChecksum; + this.checksums = Objects.requireNonNull(b.checksums, "checksums == null"); } public ContainerID getContainerID() { @@ -122,8 +122,12 @@ public boolean isEmpty() { return isEmpty; } + public ContainerChecksums getChecksums() { + return checksums; + } + public long getDataChecksum() { - return dataChecksum; + return checksums.getDataChecksum(); } @Override @@ -180,7 +184,8 @@ public ContainerReplicaBuilder toBuilder() { .setOriginNodeId(originDatanodeId) .setReplicaIndex(replicaIndex) .setSequenceId(sequenceId) - .setEmpty(isEmpty); + .setEmpty(isEmpty) + .setChecksums(checksums); } @Override @@ -194,7 +199,7 @@ public String toString() { + ", keyCount=" + keyCount + ", bytesUsed=" + bytesUsed + ", " + (isEmpty ? "empty" : "non-empty") - + ", dataChecksum=" + dataChecksum + + ", checksums=" + checksums + '}'; } @@ -212,7 +217,7 @@ public static class ContainerReplicaBuilder { private long keyCount; private int replicaIndex; private boolean isEmpty; - private long dataChecksum; + private ContainerChecksums checksums; /** * Set Container Id. @@ -287,8 +292,8 @@ public ContainerReplicaBuilder setEmpty(boolean empty) { return this; } - public ContainerReplicaBuilder setDataChecksum(long dataChecksum) { - this.dataChecksum = dataChecksum; + public ContainerReplicaBuilder setChecksums(ContainerChecksums checksums) { + this.checksums = checksums; return this; } @@ -298,6 +303,9 @@ public ContainerReplicaBuilder setDataChecksum(long dataChecksum) { * @return ContainerReplicaBuilder */ public ContainerReplica build() { + if (this.checksums == null) { + this.checksums = ContainerChecksums.unknown(); + } return new ContainerReplica(this); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java index 87c5a719cd76..5e53eec00d3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestBackgroundContainerDataScannerIntegration.java @@ -30,6 +30,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.TestContainerCorruptions; @@ -84,8 +85,8 @@ void testCorruptionDetected(TestContainerCorruptions corruption) assertNotEquals(0, container.getContainerData().getDataChecksum()); waitForScmToSeeReplicaState(containerID, CLOSED); - long initialReportedDataChecksum = getContainerReplica(containerID).getDataChecksum(); - assertNotEquals(0, initialReportedDataChecksum); + ContainerChecksums initialReportedChecksum = getContainerReplica(containerID).getChecksums(); + assertNotEquals(ContainerChecksums.unknown(), initialReportedChecksum); corruption.applyTo(container); resumeScanner(); @@ -97,16 +98,16 @@ void testCorruptionDetected(TestContainerCorruptions corruption) // Wait for SCM to get a report of the unhealthy replica with a different checksum than before. waitForScmToSeeReplicaState(containerID, UNHEALTHY); - long newReportedDataChecksum = getContainerReplica(containerID).getDataChecksum(); + ContainerChecksums newReportedChecksum = getContainerReplica(containerID).getChecksums(); if (corruption == TestContainerCorruptions.MISSING_METADATA_DIR || corruption == TestContainerCorruptions.MISSING_CONTAINER_DIR) { // In these cases, the new tree will not be able to be written since it exists in the metadata directory. // When the tree write fails, the in-memory checksum should remain at its original value. - assertEquals(checksumToString(initialReportedDataChecksum), checksumToString(newReportedDataChecksum)); + assertEquals(initialReportedChecksum, newReportedChecksum); } else { - assertNotEquals(checksumToString(initialReportedDataChecksum), checksumToString(newReportedDataChecksum)); + assertNotEquals(initialReportedChecksum, newReportedChecksum); // Test that the scanner wrote updated checksum info to the disk. - assertReplicaChecksumMatches(container, newReportedDataChecksum); + assertReplicaChecksumMatches(container, newReportedChecksum); assertFalse(container.getContainerData().needsDataChecksum()); KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); verifyAllDataChecksumsMatch(containerData, getConf()); @@ -122,10 +123,11 @@ void testCorruptionDetected(TestContainerCorruptions corruption) } } - private void assertReplicaChecksumMatches(Container container, long expectedChecksum) throws Exception { + private void assertReplicaChecksumMatches( + Container container, ContainerChecksums expectedChecksum) throws Exception { assertTrue(containerChecksumFileExists(container.getContainerData().getContainerID())); long dataChecksumFromFile = readChecksumFile(container.getContainerData()) .getContainerMerkleTree().getDataChecksum(); - assertEquals(checksumToString(expectedChecksum), checksumToString(dataChecksumFromFile)); + assertEquals(checksumToString(expectedChecksum.getDataChecksum()), checksumToString(dataChecksumFromFile)); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java index 4c28806dfa8a..7a69c403050d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java @@ -160,9 +160,9 @@ public boolean isEmpty() { return numKeys == 0; } - public boolean isDataChecksumMismatched() { + public boolean areChecksumsMismatched() { return !replicas.isEmpty() && replicas.stream() - .map(ContainerReplica::getDataChecksum) + .map(ContainerReplica::getChecksums) .distinct() .count() != 1; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 56fc47132b29..a6b6f3a8c30f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -385,7 +385,7 @@ private void processContainer(ContainerInfo container, long currentTime, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); - if ((h.isHealthilyReplicated() && !h.isDataChecksumMismatched()) || h.isDeleted()) { + if ((h.isHealthilyReplicated() && !h.areChecksumsMismatched()) || h.isDeleted()) { return; } // For containers deleted in SCM, we sync the container state here. @@ -563,7 +563,7 @@ public static List generateUnhealthyRecords( Map> unhealthyContainerStateStatsMap) { List records = new ArrayList<>(); - if ((container.isHealthilyReplicated() && !container.isDataChecksumMismatched()) || container.isDeleted()) { + if ((container.isHealthilyReplicated() && !container.areChecksumsMismatched()) || container.isDeleted()) { return records; } @@ -610,7 +610,7 @@ public static List generateUnhealthyRecords( populateContainerStats(container, UnHealthyContainerStates.OVER_REPLICATED, unhealthyContainerStateStatsMap); } - if (container.isDataChecksumMismatched() + if (container.areChecksumsMismatched() && !recordForStateExists.contains( UnHealthyContainerStates.REPLICA_MISMATCH.toString())) { records.add(recordForState( @@ -686,7 +686,7 @@ private static boolean keepMisReplicatedRecord( private static boolean keepReplicaMismatchRecord( ContainerHealthStatus container, UnhealthyContainersRecord rec) { - if (container.isDataChecksumMismatched()) { + if (container.areChecksumsMismatched()) { updateExpectedReplicaCount(rec, container.getReplicationFactor()); updateActualReplicaCount(rec, container.getReplicaCount()); updateReplicaDelta(rec, container.replicaDelta()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java index d47c7010a2aa..971bc2d27258 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistory.java @@ -19,6 +19,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerReplicaHistoryProto; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; /** * A ContainerReplica timestamp class that tracks first and last seen time. @@ -39,16 +40,16 @@ public class ContainerReplicaHistory { private long bcsId; private String state; - private long dataChecksum; + private ContainerChecksums checksums; public ContainerReplicaHistory(UUID id, Long firstSeenTime, - Long lastSeenTime, long bcsId, String state, long dataChecksum) { + Long lastSeenTime, long bcsId, String state, ContainerChecksums checksums) { this.uuid = id; this.firstSeenTime = firstSeenTime; this.lastSeenTime = lastSeenTime; this.bcsId = bcsId; this.state = state; - this.dataChecksum = dataChecksum; + setChecksums(checksums); } public long getBcsId() { @@ -84,23 +85,29 @@ public void setState(String state) { } public long getDataChecksum() { - return dataChecksum; + return getChecksums().getDataChecksum(); } - public void setDataChecksum(long dataChecksum) { - this.dataChecksum = dataChecksum; + public ContainerChecksums getChecksums() { + return checksums; + } + + public void setChecksums(ContainerChecksums checksums) { + this.checksums = checksums != null ? checksums : ContainerChecksums.unknown(); } public static ContainerReplicaHistory fromProto( ContainerReplicaHistoryProto proto) { return new ContainerReplicaHistory(UUID.fromString(proto.getUuid()), proto.getFirstSeenTime(), proto.getLastSeenTime(), proto.getBcsId(), - proto.getState(), proto.getDataChecksum()); + proto.getState(), ContainerChecksums.of(proto.getDataChecksum())); } public ContainerReplicaHistoryProto toProto() { return ContainerReplicaHistoryProto.newBuilder().setUuid(uuid.toString()) .setFirstSeenTime(firstSeenTime).setLastSeenTime(lastSeenTime) - .setBcsId(bcsId).setState(state).setDataChecksum(dataChecksum).build(); + .setBcsId(bcsId).setState(state) + .setDataChecksum(checksums.getDataChecksum()) + .build(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index 59436cb72b2a..586aad5fd68f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; @@ -278,7 +279,7 @@ public void updateContainerReplica(ContainerID containerID, boolean flushToDB = false; long bcsId = replica.getSequenceId() != null ? replica.getSequenceId() : -1; String state = replica.getState().toString(); - long dataChecksum = replica.getDataChecksum(); + ContainerChecksums checksums = replica.getChecksums(); // If replica doesn't exist in in-memory map, add to DB and add to map if (replicaLastSeenMap == null) { @@ -286,7 +287,7 @@ public void updateContainerReplica(ContainerID containerID, replicaHistoryMap.putIfAbsent(id, new ConcurrentHashMap() {{ put(uuid, new ContainerReplicaHistory(uuid, currTime, currTime, - bcsId, state, dataChecksum)); + bcsId, state, checksums)); }}); flushToDB = true; } else { @@ -296,18 +297,19 @@ public void updateContainerReplica(ContainerID containerID, // New Datanode replicaLastSeenMap.put(uuid, new ContainerReplicaHistory(uuid, currTime, currTime, bcsId, - state, dataChecksum)); + state, checksums)); flushToDB = true; } else { // if the object exists, only update the last seen time & bcsId fields ts.setLastSeenTime(currTime); ts.setBcsId(bcsId); ts.setState(state); + ts.setChecksums(checksums); } } if (flushToDB) { - upsertContainerHistory(id, uuid, currTime, bcsId, state, dataChecksum); + upsertContainerHistory(id, uuid, currTime, bcsId, state, checksums); } } @@ -324,7 +326,6 @@ public void removeContainerReplica(ContainerID containerID, final DatanodeDetails dnInfo = replica.getDatanodeDetails(); final UUID uuid = dnInfo.getUuid(); String state = replica.getState().toString(); - long dataChecksum = replica.getDataChecksum(); final Map replicaLastSeenMap = replicaHistoryMap.get(id); @@ -333,7 +334,7 @@ public void removeContainerReplica(ContainerID containerID, if (ts != null) { // Flush to DB, then remove from in-memory map upsertContainerHistory(id, uuid, ts.getLastSeenTime(), ts.getBcsId(), - state, dataChecksum); + state, ts.getChecksums()); replicaLastSeenMap.remove(uuid); } } @@ -430,7 +431,7 @@ public void flushReplicaHistoryMapToDB(boolean clearMap) { } public void upsertContainerHistory(long containerID, UUID uuid, long time, - long bcsId, String state, long dataChecksum) { + long bcsId, String state, ContainerChecksums checksums) { Map tsMap; try { tsMap = cdbServiceProvider.getContainerReplicaHistory(containerID); @@ -438,12 +439,12 @@ public void upsertContainerHistory(long containerID, UUID uuid, long time, if (ts == null) { // New entry tsMap.put(uuid, new ContainerReplicaHistory(uuid, time, time, bcsId, - state, dataChecksum)); + state, checksums)); } else { // Entry exists, update last seen time and put it back to DB. ts.setLastSeenTime(time); ts.setState(state); - ts.setDataChecksum(dataChecksum); + ts.setChecksums(checksums); } cdbServiceProvider.storeContainerReplicaHistory(containerID, tsMap); } catch (IOException e) { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 7df56a57be65..801177d25c47 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; @@ -1074,12 +1075,12 @@ public void testGetReplicaHistoryForContainer() throws IOException { final UUID u2 = newDatanode("host2", "127.0.0.2"); final UUID u3 = newDatanode("host3", "127.0.0.3"); final UUID u4 = newDatanode("host4", "127.0.0.4"); - reconContainerManager.upsertContainerHistory(1L, u1, 1L, 1L, "OPEN", 1234L); - reconContainerManager.upsertContainerHistory(1L, u2, 2L, 1L, "OPEN", 1234L); - reconContainerManager.upsertContainerHistory(1L, u3, 3L, 1L, "OPEN", 1234L); - reconContainerManager.upsertContainerHistory(1L, u4, 4L, 1L, "OPEN", 1234L); + reconContainerManager.upsertContainerHistory(1L, u1, 1L, 1L, "OPEN", ContainerChecksums.of(1234L, 0L)); + reconContainerManager.upsertContainerHistory(1L, u2, 2L, 1L, "OPEN", ContainerChecksums.of(1234L, 0L)); + reconContainerManager.upsertContainerHistory(1L, u3, 3L, 1L, "OPEN", ContainerChecksums.of(1234L, 0L)); + reconContainerManager.upsertContainerHistory(1L, u4, 4L, 1L, "OPEN", ContainerChecksums.of(1234L, 0L)); - reconContainerManager.upsertContainerHistory(1L, u1, 5L, 1L, "OPEN", 1234L); + reconContainerManager.upsertContainerHistory(1L, u1, 5L, 1L, "OPEN", ContainerChecksums.of(1234L, 0L)); Response response = containerEndpoint.getReplicaHistoryForContainer(1L); List histories = @@ -1189,13 +1190,13 @@ private void createUnhealthyRecord(int id, String state, int expected, long differentChecksum = dataChecksumMismatch ? 2345L : 1234L; reconContainerManager.upsertContainerHistory(cID, uuid1, 1L, 1L, - "UNHEALTHY", differentChecksum); + "UNHEALTHY", ContainerChecksums.of(differentChecksum, 0L)); reconContainerManager.upsertContainerHistory(cID, uuid2, 2L, 1L, - "UNHEALTHY", differentChecksum); + "UNHEALTHY", ContainerChecksums.of(differentChecksum, 0L)); reconContainerManager.upsertContainerHistory(cID, uuid3, 3L, 1L, - "UNHEALTHY", 1234L); + "UNHEALTHY", ContainerChecksums.of(1234L, 0L)); reconContainerManager.upsertContainerHistory(cID, uuid4, 4L, 1L, - "UNHEALTHY", 1234L); + "UNHEALTHY", ContainerChecksums.of(1234L, 0L)); } protected ContainerWithPipeline getTestContainer( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java index dc488ea303ca..6fcf5c5fb4f6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -189,7 +190,7 @@ public void testSameDataChecksumContainer() { assertFalse(status.isUnderReplicated()); assertFalse(status.isOverReplicated()); assertFalse(status.isMisReplicated()); - assertFalse(status.isDataChecksumMismatched()); + assertFalse(status.areChecksumsMismatched()); } @Test @@ -206,7 +207,7 @@ public void testDataChecksumMismatchContainer() { assertFalse(status.isUnderReplicated()); assertFalse(status.isOverReplicated()); assertFalse(status.isMisReplicated()); - assertTrue(status.isDataChecksumMismatched()); + assertTrue(status.areChecksumsMismatched()); } /** @@ -416,7 +417,7 @@ private Set generateReplicas(ContainerInfo cont, replicas.add(new ContainerReplica.ContainerReplicaBuilder() .setContainerID(cont.containerID()) .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) - .setDataChecksum(1234L) + .setChecksums(ContainerChecksums.of(1234L, 0L)) .setContainerState(s) .build()); } @@ -432,7 +433,7 @@ private Set generateMismatchedReplicas(ContainerInfo cont, .setContainerID(cont.containerID()) .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) .setContainerState(s) - .setDataChecksum(checksum) + .setChecksums(ContainerChecksums.of(checksum, 0L)) .build()); checksum++; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index ca902ea37825..4210756d1cd5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -691,7 +692,7 @@ private Set getMockReplicas( .setContainerState(s) .setContainerID(ContainerID.valueOf(containerId)) .setSequenceId(1) - .setDataChecksum(1234L) + .setChecksums(ContainerChecksums.of(1234L, 0L)) .build()); } return replicas; @@ -707,7 +708,7 @@ private Set getMockReplicasChecksumMismatch( .setContainerState(s) .setContainerID(ContainerID.valueOf(containerId)) .setSequenceId(1) - .setDataChecksum(checksum) + .setChecksums(ContainerChecksums.of(checksum, 0L)) .build()); checksum++; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index d020a548fc8c..9e8b3905a58a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -642,8 +643,8 @@ private Set generateReplicas(ContainerInfo cont, replicas.add(new ContainerReplica.ContainerReplicaBuilder() .setContainerID(cont.containerID()) .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) + .setChecksums(ContainerChecksums.of(1234L, 0L)) .setContainerState(s) - .setDataChecksum(1234L) .build()); } return replicas; @@ -658,7 +659,7 @@ private Set generateMismatchedReplicas(ContainerInfo cont, .setContainerID(cont.containerID()) .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) .setContainerState(s) - .setDataChecksum(checksum) + .setChecksums(ContainerChecksums.of(checksum, 0L)) .build()); checksum++; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java index 81c9166b13a2..1d871b9974b9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; +import org.apache.hadoop.hdds.scm.container.ContainerChecksums; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -209,7 +210,8 @@ public void testUpdateAndRemoveContainerReplica() .setUuid(uuid1).setHostName("host1").setIpAddress("127.0.0.1").build(); ContainerReplica containerReplica1 = ContainerReplica.newBuilder() .setContainerID(containerID1).setContainerState(State.OPEN) - .setDatanodeDetails(datanodeDetails1).setSequenceId(1001L).setDataChecksum(1234L).build(); + .setDatanodeDetails(datanodeDetails1).setSequenceId(1001L) + .setChecksums(ContainerChecksums.of(1234L, 0L)).build(); final ReconContainerManager containerManager = getContainerManager(); final Map> repHistMap = @@ -256,7 +258,8 @@ public void testUpdateAndRemoveContainerReplica() .setUuid(uuid2).setHostName("host2").setIpAddress("127.0.0.2").build(); final ContainerReplica containerReplica2 = ContainerReplica.newBuilder() .setContainerID(containerID1).setContainerState(State.OPEN) - .setDatanodeDetails(datanodeDetails2).setSequenceId(1051L).setDataChecksum(1234L).build(); + .setDatanodeDetails(datanodeDetails2).setSequenceId(1051L) + .setChecksums(ContainerChecksums.of(1234L, 0L)).build(); // Add replica to DN02 containerManager.updateContainerReplica(containerID1, containerReplica2); From d41f6271cc1751f26cd8be6ec1cab976779eff78 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 19 Dec 2025 13:33:31 +0100 Subject: [PATCH 13/36] HDDS-14189. Copy OmKeyInfo using toBuilder instead of newBuilder (#9510) --- .../hadoop/ozone/client/rpc/RpcClient.java | 17 +------------ .../hadoop/ozone/om/helpers/OmKeyInfo.java | 1 + .../hadoop/ozone/om/KeyManagerImpl.java | 7 +----- .../S3MultipartUploadCompleteRequest.java | 25 ++++--------------- ...MultipartUploadCompleteRequestWithFSO.java | 7 ------ 5 files changed, 8 insertions(+), 49 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 5fce2b83fd54..3947e4b6818b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1601,25 +1601,10 @@ public OzoneInputStream getKey( Collections.singletonList(keyLocationInfoGroup); keyInfo.setKeyLocationVersions(keyLocationInfoGroups); - OmKeyInfo dnKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()) - .setKeyName(keyInfo.getKeyName()) - .setOmKeyLocationInfos(keyInfo.getKeyLocationVersions()) - .setDataSize(keyInfo.getDataSize()) - .setCreationTime(keyInfo.getCreationTime()) - .setModificationTime(keyInfo.getModificationTime()) + OmKeyInfo dnKeyInfo = keyInfo.toBuilder() .setReplicationConfig(replicationConfig instanceof ECReplicationConfig ? RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE) : keyInfo.getReplicationConfig()) - .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo()) - .setAcls(keyInfo.getAcls()) - .setObjectID(keyInfo.getObjectID()) - .setUpdateID(keyInfo.getUpdateID()) - .setParentObjectID(keyInfo.getParentObjectID()) - .setFileChecksum(keyInfo.getFileChecksum()) - .setOwnerName(keyInfo.getOwnerName()) - .addAllMetadata(keyInfo.getMetadata()) .build(); dnKeyInfo.setKeyLocationVersions(keyLocationInfoGroups); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 7cfbf4172d44..76e0cac3f462 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -546,6 +546,7 @@ public Builder setOwnerName(String owner) { public Builder setOmKeyLocationInfos( List omKeyLocationInfoList) { if (omKeyLocationInfoList != null) { + this.omKeyLocationInfoGroups.clear(); this.omKeyLocationInfoGroups.addAll(omKeyLocationInfoList); } return this; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e1850e33bc6a..604a06f57510 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1721,19 +1721,14 @@ private OmKeyInfo createDirectoryKey(OmKeyInfo keyInfo, String keyName) String dir = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); FileEncryptionInfo encInfo = getFileEncryptionInfo(bucketInfo); - return new OmKeyInfo.Builder() - .setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()) + return keyInfo.toBuilder() .setKeyName(dir) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) .setCreationTime(Time.now()) .setModificationTime(Time.now()) .setDataSize(0) - .setReplicationConfig(keyInfo.getReplicationConfig()) .setFileEncryptionInfo(encInfo) - .setAcls(keyInfo.getAcls()) - .setOwnerName(keyInfo.getOwnerName()) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 8db0b7402c9b..cadd7f80b62f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -464,7 +464,7 @@ protected OmKeyInfo getOmKeyInfo(long trxnLogIndex, OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName, omMetadataManager); - OmKeyInfo.Builder builder = null; + OmKeyInfo.Builder builder; if (omKeyInfo == null) { // This is a newly added key, it does not have any versions. OmKeyLocationInfoGroup keyLocationInfoGroup = new @@ -475,30 +475,20 @@ protected OmKeyInfo getOmKeyInfo(long trxnLogIndex, keyName, omMetadataManager); // A newly created key, this is the first version. - builder = new OmKeyInfo.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName()) + builder = dbOpenKeyInfo.toBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) .setReplicationConfig(ReplicationConfig.fromProto( partKeyInfo.getType(), partKeyInfo.getFactor(), partKeyInfo.getEcReplicationConfig())) .setCreationTime(keyArgs.getModificationTime()) .setModificationTime(keyArgs.getModificationTime()) .setDataSize(dataSize) - .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo()) .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) - .setAcls(dbOpenKeyInfo.getAcls()) - .addAllMetadata(dbOpenKeyInfo.getMetadata()) .addMetadata(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)) - .setOwnerName(keyArgs.getOwnerName()) - .addAllTags(dbOpenKeyInfo.getTags()); - // Check if db entry has ObjectID. This check is required because - // it is possible that between multipart key uploads and complete, - // we had an upgrade. - if (dbOpenKeyInfo.getObjectID() != 0) { - builder.setObjectID(dbOpenKeyInfo.getObjectID()); - } - updatePrefixFSOInfo(dbOpenKeyInfo, builder); + .setOwnerName(keyArgs.getOwnerName()); } else { OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartOpenKey, keyName, omMetadataManager); @@ -533,11 +523,6 @@ protected OmKeyInfo getOmKeyInfo(long trxnLogIndex, return builder.setUpdateID(trxnLogIndex).build(); } - protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo, - OmKeyInfo.Builder builder) { - // FSO is disabled. Do nothing. - } - protected String getDBOzoneKey(OMMetadataManager omMetadataManager, String volumeName, String bucketName, String keyName) throws IOException { return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index cf888c50e6cb..a5c8b2703d68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -123,13 +123,6 @@ protected void addKeyTableCacheEntry(OMMetadataManager omMetadataManager, omKeyInfo.getFileName(), transactionLogIndex); } - @Override - protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo, - OmKeyInfo.Builder builder) { - // updates parentID - builder.setParentObjectID(dbOpenKeyInfo.getParentObjectID()); - } - @Override protected String getDBOzoneKey(OMMetadataManager omMetadataManager, String volumeName, String bucketName, String keyName)throws IOException { From 29a4d2f3b9621852a558a23580418be9954dc33d Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Fri, 19 Dec 2025 20:20:16 +0530 Subject: [PATCH 14/36] HDDS-14188. NodesOutOfSpace renamed to NonWritableNodes, include DNs not accepting writes (#9518) --- .../hadoop/hdds/scm/node/SCMNodeManager.java | 88 +++++++++++-------- .../hadoop/hdds/scm/node/SCMNodeMetrics.java | 11 +-- .../hdds/scm/node/TestSCMNodeMetrics.java | 2 +- 3 files changed, 60 insertions(+), 41 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 1487c56aafe9..da0e82f69d04 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -48,9 +48,11 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.function.Predicate; import java.util.stream.Collectors; import javax.management.ObjectName; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -1279,8 +1281,8 @@ public Map getNodeStatistics() { nodeStateStatistics(nodeStatistics); // Statistics node space nodeSpaceStatistics(nodeStatistics); - // Statistics node readOnly - nodeOutOfSpaceStatistics(nodeStatistics); + // Statistics node non-writable + nodeNonWritableStatistics(nodeStatistics); // todo: Statistics of other instances return nodeStatistics; } @@ -1368,43 +1370,59 @@ private void nodeSpaceStatistics(Map nodeStatics) { nodeStatics.put(SpaceStatistics.NON_SCM_USED.getLabel(), nonScmUsed); } - private void nodeOutOfSpaceStatistics(Map nodeStatics) { - List allNodes = getAllNodes(); - long blockSize = (long) conf.getStorageSize( - OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, - OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT, - StorageUnit.BYTES); - long minRatisVolumeSizeBytes = (long) conf.getStorageSize( - ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, - ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT, - StorageUnit.BYTES); - long containerSize = (long) conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - - int nodeOutOfSpaceCount = (int) allNodes.parallelStream() - .filter(dn -> !hasEnoughSpace(dn, minRatisVolumeSizeBytes, containerSize, conf) - && !hasEnoughCommittedVolumeSpace(dn, blockSize)) + private void nodeNonWritableStatistics(Map nodeStatics) { + int nonWritableNodesCount = (int) getAllNodes().parallelStream() + .filter(new NonWritableNodeFilter(conf)) .count(); - nodeStatics.put("NodesOutOfSpace", String.valueOf(nodeOutOfSpaceCount)); - } - - /** - * Check if any volume in the datanode has committed space >= blockSize. - * - * @return true if any volume has committed space >= blockSize, false otherwise - */ - private boolean hasEnoughCommittedVolumeSpace(DatanodeInfo dnInfo, long blockSize) { - for (StorageReportProto reportProto : dnInfo.getStorageReports()) { - if (reportProto.getCommitted() >= blockSize) { - return true; + nodeStatics.put("NonWritableNodes", String.valueOf(nonWritableNodesCount)); + } + + static class NonWritableNodeFilter implements Predicate { + + private final long blockSize; + private final long minRatisVolumeSizeBytes; + private final long containerSize; + private final ConfigurationSource conf; + + NonWritableNodeFilter(ConfigurationSource conf) { + blockSize = (long) conf.getStorageSize( + OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, + OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT, + StorageUnit.BYTES); + minRatisVolumeSizeBytes = (long) conf.getStorageSize( + ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT, + StorageUnit.BYTES); + containerSize = (long) conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, + StorageUnit.BYTES); + this.conf = conf; + } + + @Override + public boolean test(DatanodeInfo dn) { + return !dn.getNodeStatus().isNodeWritable() + || (!hasEnoughSpace(dn, minRatisVolumeSizeBytes, containerSize, conf) + && !hasEnoughCommittedVolumeSpace(dn)); + } + + /** + * Check if any volume in the datanode has committed space >= blockSize. + * + * @return true if any volume has committed space >= blockSize, false otherwise + */ + private boolean hasEnoughCommittedVolumeSpace(DatanodeInfo dnInfo) { + for (StorageReportProto reportProto : dnInfo.getStorageReports()) { + if (reportProto.getCommitted() >= blockSize) { + return true; + } } + LOG.debug("Datanode {} has no volumes with committed space >= {} bytes", + dnInfo.getID(), blockSize); + return false; } - LOG.debug("Datanode {} has no volumes with committed space >= {} bytes", - dnInfo.getID(), blockSize); - return false; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java index b43256e92b9b..323b7dbe1e0c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java @@ -157,12 +157,13 @@ public void getMetrics(MetricsCollector collector, boolean all) { metrics.addGauge( Interns.info("AllNodes", "Number of datanodes"), totalNodeCount); - String nodesOutOfSpace = nodeStatistics.get("NodesOutOfSpace"); - if (nodesOutOfSpace != null) { + String nonWritableNodes = nodeStatistics.get("NonWritableNodes"); + if (nonWritableNodes != null) { metrics.addGauge( - Interns.info("NodesOutOfSpace", "Number of datanodes that are out of space because " + - "they cannot allocate new containers or write to existing ones."), - Integer.parseInt(nodesOutOfSpace)); + Interns.info("NonWritableNodes", "Number of datanodes that cannot accept new writes because " + + "they are either not in IN_SERVICE and HEALTHY state, cannot allocate new containers or " + + "cannot write to existing containers."), + Integer.parseInt(nonWritableNodes)); } for (Map.Entry e : nodeInfo.entrySet()) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java index 0f36b702b29d..81f956778093 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java @@ -227,7 +227,7 @@ public void testNodeCountAndInfoMetricsReported() throws Exception { assertGauge("AllNodes", 1, getMetrics(SCMNodeMetrics.class.getSimpleName())); // The DN has no metadata volumes, so hasEnoughSpace() returns false indicating the DN is out of space. - assertGauge("NodesOutOfSpace", 1, + assertGauge("NonWritableNodes", 1, getMetrics(SCMNodeMetrics.class.getSimpleName())); assertGauge("TotalCapacity", 100L, getMetrics(SCMNodeMetrics.class.getSimpleName())); From b27f4defcd1433e3d45a91bdcd30dd3aece70155 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 19 Dec 2025 17:10:38 +0100 Subject: [PATCH 15/36] HDDS-14198. Reduce parameter count in BucketEndpoint (#9528) --- .../ozone/s3/endpoint/BucketEndpoint.java | 25 ++-- .../ozone/s3/endpoint/EndpointBase.java | 8 ++ .../ozone/s3/endpoint/TestBucketAcl.java | 27 ++-- .../ozone/s3/endpoint/TestBucketList.java | 135 +++++++++--------- .../ozone/s3/endpoint/TestBucketPut.java | 8 +- .../s3/endpoint/TestPermissionCheck.java | 13 +- .../s3/metrics/TestS3GatewayMetrics.java | 30 ++-- 7 files changed, 127 insertions(+), 119 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index dd2bf6b45f0f..491ba2a05b37 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -114,32 +114,30 @@ private BucketEndpointContext getBucketContext() { * for more details. */ @GET - @SuppressWarnings({"parameternumber", "methodlength"}) + @SuppressWarnings("methodlength") public Response get( @PathParam(BUCKET) String bucketName, - @QueryParam(QueryParams.DELIMITER) String delimiter, - @QueryParam(QueryParams.ENCODING_TYPE) String encodingType, - @QueryParam(QueryParams.MARKER) String marker, @DefaultValue("1000") @QueryParam(QueryParams.MAX_KEYS) int maxKeys, - @QueryParam(QueryParams.PREFIX) String prefix, - @QueryParam(QueryParams.CONTINUATION_TOKEN) String continueToken, - @QueryParam(QueryParams.START_AFTER) String startAfter, - @QueryParam(QueryParams.UPLOADS) String uploads, - @QueryParam(QueryParams.ACL) String aclMarker, - @QueryParam(QueryParams.KEY_MARKER) String keyMarker, - @QueryParam(QueryParams.UPLOAD_ID_MARKER) String uploadIdMarker, @DefaultValue("1000") @QueryParam(QueryParams.MAX_UPLOADS) int maxUploads ) throws OS3Exception, IOException { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_BUCKET; PerformanceStringBuilder perf = new PerformanceStringBuilder(); + final String continueToken = getQueryParam(QueryParams.CONTINUATION_TOKEN); + final String delimiter = getQueryParam(QueryParams.DELIMITER); + final String encodingType = getQueryParam(QueryParams.ENCODING_TYPE); + final String marker = getQueryParam(QueryParams.MARKER); + String prefix = getQueryParam(QueryParams.PREFIX); + String startAfter = getQueryParam(QueryParams.START_AFTER); + Iterator ozoneKeyIterator = null; ContinueToken decodedToken = ContinueToken.decodeFromString(continueToken); OzoneBucket bucket = null; try { + final String aclMarker = getQueryParam(QueryParams.ACL); if (aclMarker != null) { s3GAction = S3GAction.GET_ACL; S3BucketAcl result = getAcl(bucketName); @@ -148,8 +146,11 @@ public Response get( return Response.ok(result, MediaType.APPLICATION_XML_TYPE).build(); } + final String uploads = getQueryParam(QueryParams.UPLOADS); if (uploads != null) { s3GAction = S3GAction.LIST_MULTIPART_UPLOAD; + final String uploadIdMarker = getQueryParam(QueryParams.UPLOAD_ID_MARKER); + final String keyMarker = getQueryParam(QueryParams.KEY_MARKER); return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads); } @@ -317,13 +318,13 @@ private int validateMaxKeys(int maxKeys) throws OS3Exception { @PUT public Response put( @PathParam(BUCKET) String bucketName, - @QueryParam(QueryParams.ACL) String aclMarker, InputStream body ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { + final String aclMarker = getQueryParam(QueryParams.ACL); if (aclMarker != null) { s3GAction = S3GAction.PUT_ACL; Response response = putAcl(bucketName, body); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 6f41e2bee06d..99d7adc3042f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -114,6 +114,14 @@ public abstract class EndpointBase { protected static final AuditLogger AUDIT = new AuditLogger(AuditLoggerType.S3GLOGGER); + protected String getQueryParam(String key) { + return getQueryParameters().getFirst(key); + } + + public MultivaluedMap getQueryParameters() { + return context.getUriInfo().getQueryParameters(); + } + protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) throws OS3Exception, IOException { OzoneBucket bucket; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java index 3b6e58bd6067..c5bb9bcc6294 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -68,6 +69,7 @@ public void setup() throws IOException { .setClient(client) .setHeaders(headers) .build(); + bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); } @AfterEach @@ -81,8 +83,7 @@ public void clean() throws IOException { public void testGetAcl() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); Response response = - bucketEndpoint.get(BUCKET_NAME, null, null, null, 0, null, - null, null, null, ACL_MARKER, null, null, 0); + bucketEndpoint.get(BUCKET_NAME, 0, 0); assertEquals(HTTP_OK, response.getStatus()); System.out.println(response.getEntity()); } @@ -93,7 +94,7 @@ public void testSetAclWithNotSupportedGranteeType() throws Exception { .thenReturn(S3Acl.ACLIdentityType.GROUP.getHeaderType() + "=root"); when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); OS3Exception e = assertThrows(OS3Exception.class, () -> - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null)); + bucketEndpoint.put(BUCKET_NAME, null)); assertEquals(e.getHttpCode(), HTTP_NOT_IMPLEMENTED); } @@ -103,7 +104,7 @@ public void testRead() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(1, getResponse.getAclList().getGrantList().size()); @@ -117,7 +118,7 @@ public void testWrite() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(1, getResponse.getAclList().getGrantList().size()); @@ -131,7 +132,7 @@ public void testReadACP() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); @@ -146,7 +147,7 @@ public void testWriteACP() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(1, getResponse.getAclList().getGrantList().size()); @@ -160,7 +161,7 @@ public void testFullControl() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(1, getResponse.getAclList().getGrantList().size()); @@ -182,7 +183,7 @@ public void testCombination() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(5, getResponse.getAclList().getGrantList().size()); @@ -195,7 +196,7 @@ public void testPutClearOldAcls() throws Exception { .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); // Put READ Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(1, getResponse.getAclList().getGrantList().size()); @@ -212,7 +213,7 @@ public void testPutClearOldAcls() throws Exception { .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); //Put WRITE response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, null); + bucketEndpoint.put(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus()); getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(1, getResponse.getAclList().getGrantList().size()); @@ -230,7 +231,7 @@ public void testAclInBodyWithGroupUser() { .getResourceAsStream("groupAccessControlList.xml"); when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); assertThrows(OS3Exception.class, () -> bucketEndpoint.put( - BUCKET_NAME, ACL_MARKER, inputBody)); + BUCKET_NAME, inputBody)); } @Test @@ -239,7 +240,7 @@ public void testAclInBody() throws Exception { .getResourceAsStream("userAccessControlList.xml"); when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); Response response = - bucketEndpoint.put(BUCKET_NAME, ACL_MARKER, inputBody); + bucketEndpoint.put(BUCKET_NAME, inputBody); assertEquals(HTTP_OK, response.getStatus()); S3BucketAcl getResponse = bucketEndpoint.getAcl(BUCKET_NAME); assertEquals(2, getResponse.getAclList().getGrantList().size()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index 0d2f087a75b5..332b6eb36eb7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; @@ -53,9 +54,10 @@ public void listRoot() throws OS3Exception, IOException { .setClient(client) .build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, ""); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "", - null, null, null, null, null, null, 0) + (ListObjectResponse) endpoint.get("b1", 100, 0) .getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -72,9 +74,10 @@ public void listDir() throws OS3Exception, IOException { OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2"); BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, - "dir1", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("dir1/", @@ -91,10 +94,10 @@ public void listSubDir() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1/"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint - .get("b1", "/", null, null, 100, "dir1/", null, - null, null, null, null, null, 0) + (ListObjectResponse) endpoint.get("b1", 100, 0) .getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -124,9 +127,10 @@ public void listObjectOwner() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "key"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, - "key", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); assertEquals(2, getBucketResponse.getContents().size()); assertEquals(user1.getShortUserName(), @@ -143,9 +147,10 @@ public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, - "dir1", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); } @@ -158,9 +163,10 @@ public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, ""); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, - "", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); assertEquals("file2", getBucketResponse.getContents().get(0) @@ -175,9 +181,11 @@ public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1bh"); + endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, "dir1/dir2/file2"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, 100, "dir1bh", - null, "dir1/dir2/file2", null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); } @@ -192,9 +200,10 @@ public void listWithPrefixAndEmptyStrDelimiter() BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); // Should behave the same if delimiter is null + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, ""); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1/"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "", null, null, 100, "dir1/", - null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); assertEquals(0, getBucketResponse.getCommonPrefixes().size()); assertEquals(4, getBucketResponse.getContents().size()); @@ -220,28 +229,24 @@ public void listWithContinuationToken() throws OS3Exception, IOException { // As we have 5 keys, with max keys 2 we should call list 3 times. // First time + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, ""); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", null, null, null, maxKeys, - "", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); // 2nd time - String continueToken = getBucketResponse.getNextToken(); + endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", null, null, null, maxKeys, - "", continueToken, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); - - continueToken = getBucketResponse.getNextToken(); - //3rd time + endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", null, null, null, maxKeys, - "", continueToken, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getContents().size()); @@ -267,9 +272,10 @@ public void listWithContinuationTokenDirBreak() ListObjectResponse getBucketResponse; + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "test/"); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, - "test/", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertEquals(0, getBucketResponse.getContents().size()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -278,10 +284,9 @@ public void listWithContinuationTokenDirBreak() assertEquals("test/dir2/", getBucketResponse.getCommonPrefixes().get(1).getPrefix().getName()); + endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, - "test/", getBucketResponse.getNextToken(), null, null, null, - null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertEquals(1, getBucketResponse.getContents().size()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("test/dir3/", @@ -306,26 +311,25 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { // As we have 5 keys, with max keys 2 we should call list 3 times. // First time + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir"); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, - "dir", null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); // 2nd time - String continueToken = getBucketResponse.getNextToken(); + endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, - "dir", continueToken, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); //3rd time - continueToken = getBucketResponse.getNextToken(); + endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", "/", null, null, maxKeys, - "dir", continueToken, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -339,9 +343,10 @@ public void listWithContinuationTokenFail() throws IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get("b1", - "/", null, null, 2, "dir", "random", null, null, null, null, null, 1000) - .getEntity(), "listWithContinuationTokenFail"); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir"); + endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, "random"); + OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get("b1", 2, 1000).getEntity()); assertEquals("random", e.getResource()); assertEquals("Invalid Argument", e.getErrorMessage()); } @@ -355,8 +360,7 @@ public void testStartAfter() throws IOException, OS3Exception { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", null, null, null, 1000, - null, null, null, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(5, getBucketResponse.getContents().size()); @@ -365,16 +369,16 @@ public void testStartAfter() throws IOException, OS3Exception { // have 4 keys. String startAfter = "dir0/file1"; + endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, startAfter); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", null, null, null, - 1000, null, null, startAfter, null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(4, getBucketResponse.getContents().size()); + endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, "random"); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", null, null, null, - 1000, null, null, "random", null, null, null, null, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(0, getBucketResponse.getContents().size()); @@ -415,9 +419,11 @@ public void testEncodingType() throws IOException, OS3Exception { String startAfter = "data="; String encodingType = ENCODING_TYPE; - ListObjectResponse response = (ListObjectResponse) endpoint.get( - "b1", delimiter, encodingType, null, 1000, prefix, - null, startAfter, null, null, null, null, 0).getEntity(); + endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, delimiter); + endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, prefix); + endpoint.getQueryParameters().putSingle(QueryParams.ENCODING_TYPE, encodingType); + endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, startAfter); + ListObjectResponse response = (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); // Assert encodingType == url. // The Object name will be encoded by ObjectKeyNameAdapter @@ -433,9 +439,8 @@ public void testEncodingType() throws IOException, OS3Exception { assertEquals(encodingType, response.getContents().get(0).getKey().getEncodingType()); - response = (ListObjectResponse) endpoint.get( - "b1", delimiter, null, null, 1000, prefix, - null, startAfter, null, null, null, null, 0).getEntity(); + endpoint.getQueryParameters().remove(QueryParams.ENCODING_TYPE); + response = (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); // Assert encodingType == null. // The Object name will not be encoded by ObjectKeyNameAdapter @@ -455,9 +460,10 @@ public void testEncodingTypeException() throws IOException { OzoneClient client = new OzoneClientStub(); client.getObjectStore().createS3Bucket("b1"); BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); + + endpoint.getQueryParameters().putSingle(QueryParams.ENCODING_TYPE, "unSupportType"); OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get( - "b1", null, "unSupportType", null, 1000, null, - null, null, null, null, null, null, 0).getEntity()); + "b1", 1000, 0).getEntity()); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e.getCode()); } @@ -471,8 +477,7 @@ public void testListObjectsWithNegativeMaxKeys() throws Exception { // maxKeys < 0 should throw InvalidArgument OS3Exception e1 = assertThrows(OS3Exception.class, () -> - bucketEndpoint.get("bucket", null, null, null, -1, null, - null, null, null, null, null, null, 1000) + bucketEndpoint.get("bucket", -1, 1000) ); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e1.getCode()); } @@ -487,8 +492,7 @@ public void testListObjectsWithZeroMaxKeys() throws Exception { // maxKeys = 0, should return empty list and not throw. ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get( - "bucket", null, null, null, 0, null, - null, null, null, null, null, null, 1000).getEntity(); + "bucket", 0, 1000).getEntity(); assertEquals(0, response.getContents().size()); assertFalse(response.isTruncated()); @@ -502,16 +506,14 @@ public void testListObjectsWithZeroMaxKeysInNonEmptyBucket() throws Exception { .build(); ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get( - "b1", null, null, null, 0, null, - null, null, null, null, null, null, 1000).getEntity(); + "b1", 0, 1000).getEntity(); // Should return empty list and not throw. assertEquals(0, response.getContents().size()); assertFalse(response.isTruncated()); ListObjectResponse fullResponse = (ListObjectResponse) bucketEndpoint.get( - "b1", null, null, null, 1000, null, - null, null, null, null, null, null, 1000).getEntity(); + "b1", 1000, 1000).getEntity(); assertEquals(5, fullResponse.getContents().size()); } @@ -539,8 +541,7 @@ public void testListObjectsRespectsConfiguredMaxKeysLimit() throws Exception { // Act: Request more keys than the configured max-keys limit final int requestedMaxKeys = Integer.parseInt(configuredMaxKeysLimit) + 1; ListObjectResponse response = (ListObjectResponse) - bucketEndpoint.get("b1", null, null, null, requestedMaxKeys, - null, null, null, null, null, null, null, + bucketEndpoint.get("b1", requestedMaxKeys, 1000).getEntity(); // Assert: The number of returned keys should be capped at the configured limit diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java index 0464ff54edca..96eea22d9ebd 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java @@ -56,7 +56,7 @@ public void setup() throws Exception { @Test public void testBucketFailWithAuthHeaderMissing() throws Exception { try { - bucketEndpoint.put(bucketName, null, null); + bucketEndpoint.put(bucketName, null); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); assertEquals(MALFORMED_HEADER.getCode(), ex.getCode()); @@ -65,13 +65,13 @@ public void testBucketFailWithAuthHeaderMissing() throws Exception { @Test public void testBucketPut() throws Exception { - Response response = bucketEndpoint.put(bucketName, null, null); + Response response = bucketEndpoint.put(bucketName, null); assertEquals(200, response.getStatus()); assertNotNull(response.getLocation()); // Create-bucket on an existing bucket fails OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.put( - bucketName, null, null)); + bucketName, null)); assertEquals(HTTP_CONFLICT, e.getHttpCode()); assertEquals(BUCKET_ALREADY_EXISTS.getCode(), e.getCode()); } @@ -79,7 +79,7 @@ public void testBucketPut() throws Exception { @Test public void testBucketFailWithInvalidHeader() throws Exception { try { - bucketEndpoint.put(bucketName, null, null); + bucketEndpoint.put(bucketName, null); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); assertEquals(MALFORMED_HEADER.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index e422e4920792..9872a711c639 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; import org.apache.hadoop.ozone.s3.util.S3Consts; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -131,7 +132,7 @@ public void testCreateBucket() throws IOException { .setClient(client) .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - bucketEndpoint.put("bucketName", null, null)); + bucketEndpoint.put("bucketName", null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -169,8 +170,7 @@ public void testListKey() throws IOException { .setClient(client) .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "bucketName", null, null, null, 1000, - null, null, null, null, null, null, null, 0)); + "bucketName", 1000, 0)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -214,9 +214,9 @@ public void testGetAcl() throws Exception { .setClient(client) .setHeaders(headers) .build(); + bucketEndpoint.getQueryParameters().putSingle(QueryParams.ACL, "acl"); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "bucketName", null, null, null, 1000, null, null, null, null, "acl", - null, null, 0), "Expected OS3Exception with FORBIDDEN http code."); + "bucketName", 1000, 0)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -237,8 +237,9 @@ public void testSetAcl() throws Exception { .setClient(client) .setHeaders(headers) .build(); + bucketEndpoint.getQueryParameters().putSingle(QueryParams.ACL, "acl"); try { - bucketEndpoint.put("bucketName", "acl", null); + bucketEndpoint.put("bucketName", null); } catch (Exception e) { assertTrue(e instanceof OS3Exception && ((OS3Exception)e).getHttpCode() == HTTP_FORBIDDEN); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index cae9a9422885..018ad0f1f5e2 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.S3Consts; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -133,10 +134,7 @@ public void testListBucketSuccess() throws Exception { public void testGetBucketSuccess() throws Exception { long oriMetric = metrics.getGetBucketSuccess(); - bucketEndpoint.get(bucketName, null, - null, null, 1000, null, - null, "random", null, - null, null, null, 0).getEntity(); + bucketEndpoint.get(bucketName, 1000, 0).getEntity(); long curMetric = metrics.getGetBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -148,8 +146,7 @@ public void testGetBucketFailure() throws Exception { // Searching for a bucket that does not exist OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "newBucket", null, null, null, 1000, null, null, "random", null, - null, null, null, 0)); + "newBucket", 1000, 0)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -161,7 +158,7 @@ public void testGetBucketFailure() throws Exception { public void testCreateBucketSuccess() throws Exception { long oriMetric = metrics.getCreateBucketSuccess(); - assertDoesNotThrow(() -> bucketEndpoint.put("newBucket", null, null)); + assertDoesNotThrow(() -> bucketEndpoint.put("newBucket", null)); long curMetric = metrics.getCreateBucketSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -172,7 +169,7 @@ public void testCreateBucketFailure() throws Exception { // Creating an error by trying to create a bucket that already exists OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.put( - bucketName, null, null)); + bucketName, null)); assertEquals(HTTP_CONFLICT, e.getHttpCode()); assertEquals(BUCKET_ALREADY_EXISTS.getCode(), e.getCode()); @@ -210,10 +207,9 @@ public void testDeleteBucketFailure() throws Exception { public void testGetAclSuccess() throws Exception { long oriMetric = metrics.getGetAclSuccess(); + bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); Response response = - bucketEndpoint.get(bucketName, null, null, - null, 0, null, null, - null, null, "acl", null, null, 0); + bucketEndpoint.get(bucketName, 0, 0); long curMetric = metrics.getGetAclSuccess(); assertEquals(HTTP_OK, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -223,10 +219,9 @@ public void testGetAclSuccess() throws Exception { public void testGetAclFailure() throws Exception { long oriMetric = metrics.getGetAclFailure(); + bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); // Failing the getACL endpoint by applying ACL on a non-Existent Bucket - OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "random_bucket", null, null, null, 0, null, - null, null, null, "acl", null, null, 0)); + OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("random_bucket", 0, 0)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -242,7 +237,8 @@ public void testPutAclSuccess() throws Exception { InputStream inputBody = TestBucketAcl.class.getClassLoader() .getResourceAsStream("userAccessControlList.xml"); - bucketEndpoint.put("b1", ACL_MARKER, inputBody); + bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); + bucketEndpoint.put("b1", inputBody); inputBody.close(); long curMetric = metrics.getPutAclSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -255,9 +251,9 @@ public void testPutAclFailure() throws Exception { InputStream inputBody = TestBucketAcl.class.getClassLoader() .getResourceAsStream("userAccessControlList.xml"); + bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); try { - assertThrows(OS3Exception.class, () -> bucketEndpoint.put("unknown_bucket", ACL_MARKER, - inputBody)); + assertThrows(OS3Exception.class, () -> bucketEndpoint.put("unknown_bucket", inputBody)); } finally { inputBody.close(); } From e0228db801e77c3e1ec523852f8d0672bb0897bd Mon Sep 17 00:00:00 2001 From: Priyesh Karatha <35779060+priyeshkaratha@users.noreply.github.com> Date: Fri, 19 Dec 2025 22:47:14 +0530 Subject: [PATCH 16/36] HDDS-13945. Show datanode reserved space in StorageDistributionEndpoint (#9488) --- .../balancer/ContainerBalancerTask.java | 2 +- .../container/placement/metrics/NodeStat.java | 8 ++++- .../placement/metrics/SCMNodeMetric.java | 6 ++-- .../placement/metrics/SCMNodeStat.java | 36 +++++++++++++------ .../hadoop/hdds/scm/node/SCMNodeManager.java | 8 +++-- .../hdds/scm/container/MockNodeManager.java | 2 +- .../balancer/TestContainerBalancerTask.java | 2 +- .../balancer/TestFindTargetStrategy.java | 22 ++++++------ .../container/balancer/TestableCluster.java | 2 +- .../TestSCMContainerPlacementCapacity.java | 8 ++--- .../TestReplicationManagerUtil.java | 6 ++-- .../TestCapacityPipelineChoosePolicy.java | 8 ++--- .../placement/TestDatanodeMetrics.java | 8 ++--- .../TestStorageDistributionEndpoint.java | 18 +++++++++- .../api/StorageDistributionEndpoint.java | 2 ++ .../api/types/DatanodeStorageReport.java | 22 ++++++++++++ .../api/TestNSSummaryEndpointWithFSO.java | 2 +- .../api/TestNSSummaryEndpointWithLegacy.java | 2 +- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 2 +- 19 files changed, 116 insertions(+), 50 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 6d8614ecc805..59c010e091cd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -1063,7 +1063,7 @@ public static double calculateAvgUtilization(List nodes) { return 0; } SCMNodeStat aggregatedStats = new SCMNodeStat( - 0, 0, 0, 0, 0); + 0, 0, 0, 0, 0, 0); for (DatanodeUsageInfo node : nodes) { aggregatedStats.add(node.getScmNodeStat()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java index 1d09e01e533e..dacc1487b54e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java @@ -53,6 +53,12 @@ interface NodeStat { */ LongMetric getFreeSpaceToSpare(); + /** + * Get the reserved space on the node. + * @return the reserved space on the node + */ + LongMetric getReserved(); + /** * Set the total/used/remaining space. * @param capacity - total space. @@ -61,7 +67,7 @@ interface NodeStat { */ @VisibleForTesting void set(long capacity, long used, long remain, long committed, - long freeSpaceToSpare); + long freeSpaceToSpare, long reserved); /** * Adding of the stat. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 184dd715c209..2349c0594725 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -46,9 +46,9 @@ public SCMNodeMetric(SCMNodeStat stat) { */ @VisibleForTesting public SCMNodeMetric(long capacity, long used, long remaining, - long committed, long freeSpaceToSpare) { + long committed, long freeSpaceToSpare, long reserved) { this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare); + this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare, reserved); } /** @@ -159,7 +159,7 @@ public SCMNodeStat get() { public void set(SCMNodeStat value) { stat.set(value.getCapacity().get(), value.getScmUsed().get(), value.getRemaining().get(), value.getCommitted().get(), - value.getFreeSpaceToSpare().get()); + value.getFreeSpaceToSpare().get(), value.getReserved().get()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index ef547b702602..d7b8b5892c05 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -29,18 +29,19 @@ public class SCMNodeStat implements NodeStat { private LongMetric remaining; private LongMetric committed; private LongMetric freeSpaceToSpare; + private LongMetric reserved; public SCMNodeStat() { - this(0L, 0L, 0L, 0L, 0L); + this(0L, 0L, 0L, 0L, 0L, 0L); } public SCMNodeStat(SCMNodeStat other) { this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(), - other.committed.get(), other.freeSpaceToSpare.get()); + other.committed.get(), other.freeSpaceToSpare.get(), other.reserved.get()); } public SCMNodeStat(long capacity, long used, long remaining, long committed, - long freeSpaceToSpare) { + long freeSpaceToSpare, long reserved) { Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(used >= 0, "used space cannot be " + @@ -52,6 +53,7 @@ public SCMNodeStat(long capacity, long used, long remaining, long committed, this.remaining = new LongMetric(remaining); this.committed = new LongMetric(committed); this.freeSpaceToSpare = new LongMetric(freeSpaceToSpare); + this.reserved = new LongMetric(reserved); } /** @@ -96,6 +98,15 @@ public LongMetric getFreeSpaceToSpare() { return freeSpaceToSpare; } + /** + * Get the reserved space on the node. + * @return the reserved space on the node + */ + @Override + public LongMetric getReserved() { + return reserved; + } + /** * Set the capacity, used and remaining space on a datanode. * @@ -106,7 +117,7 @@ public LongMetric getFreeSpaceToSpare() { @Override @VisibleForTesting public void set(long newCapacity, long newUsed, long newRemaining, - long newCommitted, long newFreeSpaceToSpare) { + long newCommitted, long newFreeSpaceToSpare, long newReserved) { Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + @@ -119,6 +130,7 @@ public void set(long newCapacity, long newUsed, long newRemaining, this.remaining = new LongMetric(newRemaining); this.committed = new LongMetric(newCommitted); this.freeSpaceToSpare = new LongMetric(newFreeSpaceToSpare); + this.reserved = new LongMetric(newReserved); } /** @@ -133,8 +145,8 @@ public SCMNodeStat add(NodeStat stat) { this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); this.committed.set(this.getCommitted().get() + stat.getCommitted().get()); - this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() + - stat.getFreeSpaceToSpare().get()); + this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() + stat.getFreeSpaceToSpare().get()); + this.reserved.set(this.reserved.get() + stat.getReserved().get()); return this; } @@ -150,8 +162,8 @@ public SCMNodeStat subtract(NodeStat stat) { this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); this.committed.set(this.getCommitted().get() - stat.getCommitted().get()); - this.freeSpaceToSpare.set(freeSpaceToSpare.get() - - stat.getFreeSpaceToSpare().get()); + this.freeSpaceToSpare.set(freeSpaceToSpare.get() - stat.getFreeSpaceToSpare().get()); + this.reserved.set(reserved.get() - stat.getReserved().get()); return this; } @@ -163,7 +175,8 @@ public boolean equals(Object to) { scmUsed.isEqual(tempStat.getScmUsed().get()) && remaining.isEqual(tempStat.getRemaining().get()) && committed.isEqual(tempStat.getCommitted().get()) && - freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get()); + freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get()) && + reserved.isEqual(tempStat.reserved.get()); } return false; } @@ -171,7 +184,7 @@ public boolean equals(Object to) { @Override public int hashCode() { return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ - committed.get() ^ freeSpaceToSpare.get()); + committed.get() ^ freeSpaceToSpare.get() ^ reserved.get()); } @Override @@ -180,6 +193,9 @@ public String toString() { "capacity=" + capacity.get() + ", scmUsed=" + scmUsed.get() + ", remaining=" + remaining.get() + + ", committed=" + committed.get() + + ", freeSpaceToSpare=" + freeSpaceToSpare.get() + + ", reserved=" + reserved.get() + '}'; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index da0e82f69d04..4096c35f3b85 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -940,6 +940,7 @@ public SCMNodeStat getStats() { long remaining = 0L; long committed = 0L; long freeSpaceToSpare = 0L; + long reserved = 0L; for (SCMNodeStat stat : getNodeStats().values()) { capacity += stat.getCapacity().get(); @@ -947,9 +948,10 @@ public SCMNodeStat getStats() { remaining += stat.getRemaining().get(); committed += stat.getCommitted().get(); freeSpaceToSpare += stat.getFreeSpaceToSpare().get(); + reserved += stat.getReserved().get(); } return new SCMNodeStat(capacity, used, remaining, committed, - freeSpaceToSpare); + freeSpaceToSpare, reserved); } /** @@ -1057,6 +1059,7 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { long remaining = 0L; long committed = 0L; long freeSpaceToSpare = 0L; + long reserved = 0L; final DatanodeInfo datanodeInfo = nodeStateManager .getNode(datanodeDetails); @@ -1068,9 +1071,10 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { remaining += reportProto.getRemaining(); committed += reportProto.getCommitted(); freeSpaceToSpare += reportProto.getFreeSpaceToSpare(); + reserved += reportProto.getReserved(); } return new SCMNodeStat(capacity, used, remaining, committed, - freeSpaceToSpare); + freeSpaceToSpare, reserved); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails); return null; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 210409dc6ae2..e87f9a664781 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -218,7 +218,7 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; newStat.set( (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining, 0, 100000); + (NODES[x % NODES.length].used), remaining, 0, 100000, 0); this.nodeMetricMap.put(datanodeDetails, newStat); aggregateStat.add(newStat); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 85947e27d482..8205f1c72064 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -460,7 +460,7 @@ private void createCluster(int[] sizeArray) { } SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, datanodeCapacity - datanodeUsedSpace, 0, - datanodeCapacity - datanodeUsedSpace - 1); + datanodeCapacity - datanodeUsedSpace - 1, 0); nodesInCluster.get(i).setScmNodeStat(stat); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java index 7426fae11ba9..776bb5d14344 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java @@ -53,11 +53,11 @@ public void testFindTargetGreedyByUsage() { //create three datanodes with different usageinfo DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30, 0)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30, 0)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30, 0)); //insert in ascending order overUtilizedDatanodes.add(dui1); @@ -92,11 +92,11 @@ public void testFindTargetGreedyByUsage() { public void testResetPotentialTargets() { // create three datanodes with different usage infos DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50)); + .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50, 0)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60)); + .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60, 0)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70)); + .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70, 0)); List potentialTargets = new ArrayList<>(); potentialTargets.add(dui1); @@ -171,18 +171,18 @@ public void testFindTargetGreedyByNetworkTopology() { List overUtilizedDatanodes = new ArrayList<>(); //set the farthest target with the lowest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80))); + new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80, 0))); //set the tree targets, which have the same network topology distance //to source , with different usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10))); + new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10, 0))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30))); + new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30, 0))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50))); + new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50, 0))); //set the nearest target with the highest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5))); + new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5, 0))); FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java index 0e86dea26966..1e9591ab194b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java @@ -80,7 +80,7 @@ public final class TestableCluster { SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, datanodeCapacity - datanodeUsedSpace, 0, - datanodeCapacity - datanodeUsedSpace - 1); + datanodeCapacity - datanodeUsedSpace - 1, 0); nodesInCluster[i].setScmNodeStat(stat); clusterUsedSpace += datanodeUsedSpace; clusterCapacity += datanodeCapacity; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 885ba5528250..5b940fc543f1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -105,13 +105,13 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new ArrayList<>(datanodes)); when(mockNodeManager.getNodeStat(any())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90)); + .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90, 0)); when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9)); + .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9, 0)); when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19)); + .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19, 0)); when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20)); + .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20, 0)); when(mockNodeManager.getNode(any(DatanodeID.class))).thenAnswer( invocation -> datanodes.stream() .filter(dn -> dn.getID().equals(invocation.getArgument(0))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java index d5c465aa238e..ffca82e231bd 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java @@ -317,13 +317,13 @@ public void testDatanodesWithInSufficientDiskSpaceAreExcluded() throws NodeNotFo when(replicationManager.getNodeManager()).thenReturn(nodeManagerMock); doReturn(fullDn).when(nodeManagerMock).getNode(fullDn.getID()); doReturn(new SCMNodeMetric(50 * oneGb, 20 * oneGb, 30 * oneGb, 5 * oneGb, - 20 * oneGb)).when(nodeManagerMock).getNodeStat(fullDn); + 20 * oneGb, 0)).when(nodeManagerMock).getNodeStat(fullDn); doReturn(spaceAvailableDn).when(nodeManagerMock).getNode(spaceAvailableDn.getID()); doReturn(new SCMNodeMetric(50 * oneGb, 10 * oneGb, 40 * oneGb, 5 * oneGb, - 20 * oneGb)).when(nodeManagerMock).getNodeStat(spaceAvailableDn); + 20 * oneGb, 0)).when(nodeManagerMock).getNodeStat(spaceAvailableDn); doReturn(expiredOpDn).when(nodeManagerMock).getNode(expiredOpDn.getID()); doReturn(new SCMNodeMetric(50 * oneGb, 20 * oneGb, 30 * oneGb, 5 * oneGb, - 20 * oneGb)).when(nodeManagerMock).getNodeStat(expiredOpDn); + 20 * oneGb, 0)).when(nodeManagerMock).getNodeStat(expiredOpDn); when(replicationManager.getNodeStatus(any())).thenAnswer( invocation -> { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java index 352468baa5da..2cd399e6d750 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java @@ -53,13 +53,13 @@ public void testChoosePipeline() throws Exception { // used 0 10 20 30 NodeManager mockNodeManager = mock(NodeManager.class); when(mockNodeManager.getNodeStat(datanodes.get(0))) - .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0)); + .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0, 0)); when(mockNodeManager.getNodeStat(datanodes.get(1))) - .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0)); + .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0, 0)); when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0)); + .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0, 0)); when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0)); + .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0, 0)); PipelineChoosePolicy policy = new CapacityPipelineChoosePolicy().init(mockNodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index c997b33a2ea8..33fa6376410e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -30,13 +30,13 @@ public class TestDatanodeMetrics { @Test public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80); + SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80, 0); assertEquals((long) stat.getCapacity().get(), 100L); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric metric = new SCMNodeMetric(stat); - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80); + SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80, 0); assertEquals(100L, (long) stat.getCapacity().get()); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); @@ -52,8 +52,8 @@ public void testSCMNodeMetric() { assertTrue(metric.isGreater(zeroMetric.get())); // Another case when nodes have similar weight - SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000); - SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000); + SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000, 0); + SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000, 0); assertTrue(new SCMNodeMetric(stat2).isGreater(stat1)); } } diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java index 5ac779d6596b..ea6894831a72 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -74,6 +75,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.api.DataNodeMetricsService; import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse; +import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; import org.apache.hadoop.ozone.recon.api.types.ScmPendingDeletion; import org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; @@ -215,7 +217,21 @@ private boolean verifyStorageDistributionAfterKeyCreation() { assertEquals(0, storageResponse.getUsedSpaceBreakDown().getOpenKeyBytes()); assertEquals(60, storageResponse.getUsedSpaceBreakDown().getCommittedKeyBytes()); assertEquals(3, storageResponse.getDataNodeUsage().size()); - + List reports = storageResponse.getDataNodeUsage(); + List scmReports = + scm.getClientProtocolServer().getDatanodeUsageInfo(true, 3, 1); + for (DatanodeStorageReport report : reports) { + for (HddsProtos.DatanodeUsageInfoProto scmReport : scmReports) { + if (scmReport.getNode().getUuid().equals(report.getDatanodeUuid())) { + assertEquals(report.getMinimumFreeSpace(), scmReport.getFreeSpaceToSpare()); + assertEquals(report.getReserved(), scmReport.getReserved()); + assertEquals(report.getCapacity(), scmReport.getCapacity()); + assertEquals(report.getRemaining(), scmReport.getRemaining()); + assertEquals(report.getUsed(), scmReport.getUsed()); + assertEquals(report.getCommitted(), scmReport.getCommitted()); + } + } + } return true; } catch (Exception e) { LOG.debug("Waiting for storage distribution assertions to pass", e); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java index 39262b24f1e8..96cf36fff31a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java @@ -237,6 +237,7 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { long remaining = nodeStat.getRemaining() != null ? nodeStat.getRemaining().get() : 0L; long committed = nodeStat.getCommitted() != null ? nodeStat.getCommitted().get() : 0L; long minFreeSpace = nodeStat.getFreeSpaceToSpare() != null ? nodeStat.getFreeSpaceToSpare().get() : 0L; + long reservedSpace = nodeStat.getReserved() != null ? nodeStat.getReserved().get() : 0L; return DatanodeStorageReport.newBuilder() .setCapacity(capacity) @@ -244,6 +245,7 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { .setRemaining(remaining) .setCommitted(committed) .setMinimumFreeSpace(minFreeSpace) + .setReserved(reservedSpace) .setDatanodeUuid(datanode.getUuidString()) .setHostName(datanode.getHostName()) .build(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java index 97100bac9cf6..e26a761eb5bc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java @@ -32,6 +32,7 @@ public final class DatanodeStorageReport { private long remaining; private long committed; private long minimumFreeSpace; + private long reserved; public DatanodeStorageReport() { } @@ -44,6 +45,8 @@ private DatanodeStorageReport(Builder builder) { this.remaining = builder.remaining; this.committed = builder.committed; this.minimumFreeSpace = builder.minimumFreeSpace; + this.reserved = builder.reserved; + builder.validate(); } public String getDatanodeUuid() { @@ -74,6 +77,10 @@ public long getMinimumFreeSpace() { return minimumFreeSpace; } + public long getReserved() { + return reserved; + } + public static Builder newBuilder() { return new Builder(); } @@ -89,6 +96,7 @@ public static final class Builder { private long remaining = 0; private long committed = 0; private long minimumFreeSpace = 0; + private long reserved = 0; private static final Logger LOG = LoggerFactory.getLogger(Builder.class); @@ -131,6 +139,11 @@ public Builder setMinimumFreeSpace(long minimumFreeSpace) { return this; } + public Builder setReserved(long reserved) { + this.reserved = reserved; + return this; + } + public void validate() { Objects.requireNonNull(hostName, "hostName cannot be null"); @@ -146,6 +159,15 @@ public void validate() { if (committed < 0) { throw new IllegalArgumentException("committed cannot be negative"); } + + if (minimumFreeSpace < 0) { + throw new IllegalArgumentException("minimumFreeSpace cannot be negative"); + } + + if (reserved < 0) { + throw new IllegalArgumentException("reserved cannot be negative"); + } + // Logical consistency checks if (used + remaining > capacity) { LOG.warn("Inconsistent storage report for {}: used({}) + remaining({}) > capacity({})", diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 5b15235431f5..f08a8131d4e0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -1573,6 +1573,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1, 0); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index 8bf0e9ba3cb4..7ac14a9eb4d1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -1486,6 +1486,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1, 0); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index ba190bee4c35..146d84b400ef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -1446,7 +1446,7 @@ private static BucketLayout getLegacyBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); + ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L, 0); } } From b0f2c8d8183926cc693bbe0080e6041931b7488c Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:10:44 -0800 Subject: [PATCH 17/36] HDDS-14076. Disable SSTFilteringService when defrag feature is enabled (#9495) --- .../hadoop/ozone/om/KeyManagerImpl.java | 9 ++++ .../hadoop/ozone/om/SstFilteringService.java | 33 ++++++++++++ .../snapshot/OmSnapshotLocalDataManager.java | 8 +++ .../om/snapshot/TestSstFilteringService.java | 50 +++++++++++++++++++ 4 files changed, 100 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 604a06f57510..bc5f2ce9b961 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -376,6 +376,15 @@ public void start(OzoneConfiguration configuration) { */ public void startSnapshotSstFilteringService(OzoneConfiguration conf) { if (isSstFilteringSvcEnabled()) { + if (isDefragSvcEnabled()) { + LOG.info("SstFilteringService is disabled (despite configuration intending to enable it) " + + "because SnapshotDefragService is enabled. Defrag effectively performs filtering already."); + return; + } + + LOG.info("SstFilteringService is enabled. Note SstFilteringService is " + + "deprecated in favor of SnapshotDefragService and may be removed in a future release."); + long serviceInterval = conf.getTimeDuration( OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL_DEFAULT, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 0e09bbcbe4a2..9dc8332697be 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ratis.util.UncheckedAutoCloseable; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; @@ -132,6 +133,32 @@ private boolean isSnapshotDeleted(SnapshotInfo snapshotInfo) { return snapshotInfo == null || snapshotInfo.getSnapshotStatus() == SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; } + /** + * Checks if the snapshot has been defragged. + * @param snapshotInfo snapshotInfo + * @return true if the snapshot has been defragged, false otherwise + */ + private boolean isSnapshotDefragged(SnapshotInfo snapshotInfo) { + try { + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); + if (omSnapshotManager == null) { + return false; + } + OmSnapshotLocalDataManager localDataManager = omSnapshotManager.getSnapshotLocalDataManager(); + if (localDataManager == null) { + return false; + } + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider provider = + localDataManager.getOmSnapshotLocalData(snapshotInfo)) { + // If snapshot local data version is not 0, it means the snapshot has been defragged + return provider.getVersion() > 0; + } + } catch (IOException e) { + LOG.debug("Error checking if snapshot {} is defragged", snapshotInfo.getSnapshotId(), e); + return false; + } + } + /** * Marks the snapshot as SSTFiltered by creating a file in snapshot directory. * @param snapshotInfo snapshotInfo @@ -187,6 +214,12 @@ public BackgroundTaskResult call() throws Exception { continue; } + // Skip defragged snapshots as defrag already performs filtering + if (isSnapshotDefragged(snapshotInfo)) { + LOG.debug("Skipping SST filtering for defragged snapshot: {}", snapShotTableKey); + continue; + } + LOG.debug("Processing snapshot {} to filter relevant SST Files", snapShotTableKey); TablePrefixInfo bucketPrefixInfo = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 8d7e40e85d26..cec65701c808 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -788,6 +788,14 @@ public boolean needsDefrag() { return false; } + /** + * Returns the version of the snapshot local data. + * @return Version of the snapshot local data + */ + public long getVersion() { + return snapshotLocalData.getVersion(); + } + @Override public void close() throws IOException { if (previousLock != null) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java index b25ff5c52e47..4f6cd6d97a48 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java @@ -21,6 +21,12 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_PORT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEFRAG_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.apache.ozone.test.LambdaTestUtils.await; @@ -508,4 +514,48 @@ private void deleteSnapshot(String volumeName, String bucketName, String snapsho writeClient.deleteSnapshot(volumeName, bucketName, snapshotName); countTotalSnapshots--; } + + /** + * Test to verify that SSTFilteringService is disabled when defrag service is enabled. + * This test creates a new OzoneManager instance with defrag service enabled and verifies + * that the SST filtering service is not started. + */ + @Test + public void testSstFilteringDisabledWhenDefragEnabled(@TempDir Path folder) throws Exception { + OzoneConfiguration testConf = new OzoneConfiguration(); + testConf.set(OZONE_METADATA_DIRS, folder.toString()); + testConf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + // Enable SST filtering service + testConf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + // Enable defrag service + testConf.setTimeDuration(OZONE_SNAPSHOT_DEFRAG_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + testConf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); + testConf.setQuietMode(false); + // Configure dynamic ports to avoid conflicts with the OzoneManager instance from @BeforeAll + testConf.set(OZONE_OM_ADDRESS_KEY, "127.0.0.1:0"); + testConf.set(OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + testConf.set(OZONE_OM_HTTPS_ADDRESS_KEY, "127.0.0.1:0"); + testConf.setInt(OZONE_OM_RATIS_PORT_KEY, 0); + testConf.setInt(OZONE_OM_GRPC_PORT_KEY, 0); + + OmTestManagers testManagers = new OmTestManagers(testConf); + KeyManager testKeyManager = testManagers.getKeyManager(); + OzoneManager testOm = testManagers.getOzoneManager(); + + try { + // Verify that SST filtering service is not started when defrag is enabled + SstFilteringService sstFilteringService = testKeyManager.getSnapshotSstFilteringService(); + assertThat(sstFilteringService).as("SstFilteringService should be null when defrag is enabled").isNull(); + } finally { + if (testKeyManager != null) { + testKeyManager.stop(); + } + if (testManagers.getWriteClient() != null) { + testManagers.getWriteClient().close(); + } + if (testOm != null) { + testOm.stop(); + } + } + } } From c192b2a417d7bcfff74db5c91c364f6ab1e9ae85 Mon Sep 17 00:00:00 2001 From: Russole <54737788+Russole@users.noreply.github.com> Date: Sat, 20 Dec 2025 03:37:41 +0800 Subject: [PATCH 18/36] HDDS-14170. No need to copy BucketEncryptionKeyInfo and DefaultReplicationConfig (#9531) --- .../hdds/client/DefaultReplicationConfig.java | 4 --- .../om/helpers/BucketEncryptionKeyInfo.java | 26 ++++++++++++++++--- .../hadoop/ozone/om/helpers/OmBucketInfo.java | 12 +-------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java index 21e9cd575400..83fb2fe65f24 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java @@ -56,10 +56,6 @@ public ReplicationType getType() { return ReplicationType.fromProto(replicationConfig.getReplicationType()); } - public DefaultReplicationConfig copy() { - return new DefaultReplicationConfig(replicationConfig); - } - public ReplicationConfig getReplicationConfig() { return replicationConfig; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java index 755195d1fd02..dec49009bb32 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java @@ -17,13 +17,16 @@ package org.apache.hadoop.ozone.om.helpers; +import java.util.Objects; +import net.jcip.annotations.Immutable; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; /** * Encryption key info for bucket encryption key. */ -public class BucketEncryptionKeyInfo { +@Immutable +public final class BucketEncryptionKeyInfo { private final CryptoProtocolVersion version; private final CipherSuite suite; private final String keyName; @@ -48,14 +51,29 @@ public CryptoProtocolVersion getVersion() { return version; } - public BucketEncryptionKeyInfo copy() { - return new BucketEncryptionKeyInfo(version, suite, keyName); + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BucketEncryptionKeyInfo that = (BucketEncryptionKeyInfo) o; + return version == that.version + && suite == that.suite + && Objects.equals(keyName, that.keyName); + } + + @Override + public int hashCode() { + return Objects.hash(version, suite, keyName); } /** * Builder for BucketEncryptionKeyInfo. */ - public static class Builder { + public static final class Builder { private CryptoProtocolVersion version; private CipherSuite suite; private String keyName; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 89c9e717e4e1..bce6adb636a0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -356,17 +356,7 @@ public Map toAuditMap() { @Override public OmBucketInfo copyObject() { - Builder builder = toBuilder(); - - if (bekInfo != null) { - builder.setBucketEncryptionKey(bekInfo.copy()); - } - - if (defaultReplicationConfig != null) { - builder.setDefaultReplicationConfig(defaultReplicationConfig.copy()); - } - - return builder.build(); + return toBuilder().build(); } public Builder toBuilder() { From 3ca9d25038cfd5365c9d3f3d7089277aba45eed1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 19 Dec 2025 19:18:28 -0500 Subject: [PATCH 19/36] HDDS-14185. Fix ManagedDirectSlice usage as wrapper for ByteBuffer class (#9508) --- .../utils/db/managed/ManagedDirectSlice.java | 93 +++++++++++++++--- .../hdds/utils/db/managed/ManagedObject.java | 4 +- .../db/managed/TestManagedDirectSlice.java | 86 ++++++++++++++++ .../hdds/utils/db/RDBSstFileWriter.java | 8 +- .../hdds/utils/db/TestRDBSstFileWriter.java | 97 +++++++++++++++++++ 5 files changed, 268 insertions(+), 20 deletions(-) create mode 100644 hadoop-hdds/managed-rocksdb/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedDirectSlice.java rename hadoop-hdds/{framework => rocksdb-checkpoint-differ}/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java (96%) create mode 100644 hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDirectSlice.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDirectSlice.java index b68a250cb8b7..52b8b5aabb1e 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDirectSlice.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDirectSlice.java @@ -17,35 +17,98 @@ package org.apache.hadoop.hdds.utils.db.managed; -import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB.NOT_FOUND; +import com.google.common.annotations.VisibleForTesting; import java.nio.ByteBuffer; -import org.apache.ratis.util.UncheckedAutoCloseable; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.ratis.util.function.CheckedConsumer; +import org.apache.ratis.util.function.CheckedFunction; import org.rocksdb.DirectSlice; +import org.rocksdb.RocksDBException; /** - * Managed Direct Slice. + * ManagedDirectSlice is a managed wrapper around the DirectSlice object. It ensures + * proper handling of native resources associated with DirectSlice, utilizing + * the ManagedObject infrastructure to prevent resource leaks. It works in tandem + * with a ByteBuffer, which acts as the data source for the managed slice. + * + * This class overrides certain operations to tightly control the lifecycle and + * behavior of the DirectSlice it manages. It specifically caters to use cases + * where the slice is used in RocksDB operations, providing methods for safely + * interacting with the slice for put-like operations. */ -public class ManagedDirectSlice extends DirectSlice { - private final UncheckedAutoCloseable leakTracker = track(this); +public class ManagedDirectSlice extends ManagedObject { + + private final ByteBuffer data; public ManagedDirectSlice(ByteBuffer data) { - super(data); + super(new DirectSlice(data)); + this.data = data; } @Override - public synchronized long getNativeHandle() { - return super.getNativeHandle(); + public DirectSlice get() { + throw new UnsupportedOperationException("get() is not supported."); } - @Override - protected void disposeInternal() { - // RocksMutableObject.close is final thus can't be decorated. - // So, we decorate disposeInternal instead to track closure. + /** + * Executes the provided consumer on the internal {@code DirectSlice} after + * adjusting the slice's prefix and length based on the current position and + * remaining data in the associated {@code ByteBuffer}. If the consumer throws + * a {@code RocksDBException}, it is wrapped and rethrown as a + * {@code RocksDatabaseException}. + * + * @param consumer the operation to perform on the managed {@code DirectSlice}. + * The consumer must handle a {@code DirectSlice} and may throw + * a {@code RocksDBException}. + * @throws RocksDatabaseException if the provided consumer throws a + * {@code RocksDBException}. + */ + public void putFromBuffer(CheckedConsumer consumer) + throws RocksDatabaseException { + DirectSlice slice = super.get(); + slice.removePrefix(this.data.position()); + slice.setLength(this.data.remaining()); try { - super.disposeInternal(); - } finally { - leakTracker.close(); + consumer.accept(slice); + } catch (RocksDBException e) { + throw new RocksDatabaseException("Error while performing put op with directSlice", e); } + data.position(data.limit()); + } + + /** + * Retrieves data from the associated DirectSlice into the buffer managed by this instance. + * The supplied function is applied to the DirectSlice to process the data, and the method + * adjusts the buffer's position and limit based on the result. + * + * @param function a function that operates on a DirectSlice and returns the number + * of bytes written to the buffer, or a specific "not found" value + * if the operation fails. The function may throw a RocksDBException. + * @return the number of bytes written to the buffer if successful, or a specific + * "not found" value indicating the requested data was absent. + * @throws RocksDatabaseException if the provided function throws a RocksDBException, + * wrapping the original exception. + */ + public int getToBuffer(CheckedFunction function) + throws RocksDatabaseException { + DirectSlice slice = super.get(); + slice.removePrefix(this.data.position()); + slice.setLength(this.data.remaining()); + try { + int lengthWritten = function.apply(slice); + if (lengthWritten != NOT_FOUND) { + this.data.limit(Math.min(data.limit(), data.position() + lengthWritten)); + } + return lengthWritten; + } catch (RocksDBException e) { + throw new RocksDatabaseException("Error while performing put op with directSlice", e); + } + } + + @VisibleForTesting + DirectSlice getDirectSlice() { + return super.get(); } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java index df3a23b4bf81..a5c77748ec54 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java @@ -20,13 +20,13 @@ import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; import org.apache.ratis.util.UncheckedAutoCloseable; -import org.rocksdb.RocksObject; +import org.rocksdb.AbstractNativeReference; /** * General template for a managed RocksObject. * @param */ -class ManagedObject implements AutoCloseable { +class ManagedObject implements AutoCloseable { private final T original; private final UncheckedAutoCloseable leakTracker = track(this); diff --git a/hadoop-hdds/managed-rocksdb/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedDirectSlice.java b/hadoop-hdds/managed-rocksdb/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedDirectSlice.java new file mode 100644 index 000000000000..c332d32704f8 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedDirectSlice.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.rocksdb.DirectSlice; + +/** + * Tests for ManagedDirectSlice. + */ +public class TestManagedDirectSlice { + + static { + ManagedRocksObjectUtils.loadRocksDBLibrary(); + } + + @ParameterizedTest + @CsvSource({"0, 1024", "1024, 1024", "512, 1024", "0, 100", "10, 512", "0, 0"}) + public void testManagedDirectSliceWithOffsetMovedAheadByteBuffer(int offset, int numberOfBytesWritten) + throws RocksDatabaseException { + ByteBuffer byteBuffer = ByteBuffer.allocateDirect(1024); + byte[] randomBytes = RandomUtils.secure().nextBytes(numberOfBytesWritten); + byteBuffer.put(randomBytes); + byteBuffer.flip(); + try (ManagedDirectSlice directSlice = new ManagedDirectSlice(byteBuffer); + ManagedSlice slice = new ManagedSlice(Arrays.copyOfRange(randomBytes, offset, numberOfBytesWritten))) { + byteBuffer.position(offset); + directSlice.putFromBuffer((ds) -> { + DirectSlice directSliceFromByteBuffer = directSlice.getDirectSlice(); + assertEquals(numberOfBytesWritten - offset, ds.size()); + assertEquals(0, directSliceFromByteBuffer.compare(slice)); + assertEquals(0, slice.compare(directSliceFromByteBuffer)); + }); + Assertions.assertEquals(numberOfBytesWritten, byteBuffer.position()); + } + } + + @ParameterizedTest + @CsvSource({"0, 1024, 512", "1024, 1024, 5", "512, 1024, 600", "0, 100, 80", "10, 512, 80", "0, 0, 10", + "100, 256, -1"}) + public void testManagedDirectSliceWithOpPutToByteBuffer(int offset, int maxNumberOfBytesWrite, + int numberOfBytesToWrite) throws RocksDatabaseException { + ByteBuffer byteBuffer = ByteBuffer.allocateDirect(1024); + byte[] randomBytes = RandomUtils.secure().nextBytes(offset); + byteBuffer.put(randomBytes); + try (ManagedDirectSlice directSlice = new ManagedDirectSlice(byteBuffer)) { + byteBuffer.position(offset); + byteBuffer.limit(Math.min(offset + maxNumberOfBytesWrite, 1024)); + assertEquals(numberOfBytesToWrite, directSlice.getToBuffer((ds) -> { + assertEquals(byteBuffer.remaining(), ds.size()); + return numberOfBytesToWrite; + })); + Assertions.assertEquals(offset, byteBuffer.position()); + if (numberOfBytesToWrite == -1) { + assertEquals(offset + maxNumberOfBytesWrite, byteBuffer.limit()); + } else { + Assertions.assertEquals(Math.min(Math.min(offset + numberOfBytesToWrite, 1024), maxNumberOfBytesWrite), + byteBuffer.limit()); + } + + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java similarity index 96% rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java rename to hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java index a689e9fdea14..14f553a9b185 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java @@ -85,9 +85,11 @@ public void delete(byte[] key) throws RocksDatabaseException { public void delete(CodecBuffer key) throws RocksDatabaseException { try (ManagedDirectSlice slice = new ManagedDirectSlice(key.asReadOnlyByteBuffer())) { - sstFileWriter.delete(slice); - keyCounter.incrementAndGet(); - } catch (RocksDBException e) { + slice.putFromBuffer(directSlice -> { + sstFileWriter.delete(directSlice); + keyCounter.incrementAndGet(); + }); + } catch (RocksDatabaseException e) { closeOnFailure(); throw new RocksDatabaseException("Failed to delete key (length=" + key.readableBytes() + "), sstFile=" + sstFile.getAbsolutePath(), e); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java new file mode 100644 index 000000000000..c3129da85c4b --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.nio.file.Path; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIfSystemProperty; +import org.junit.jupiter.api.io.TempDir; +import org.rocksdb.RocksDBException; + +/** + * Test for RDBSstFileWriter. + */ +public class TestRDBSstFileWriter { + + @TempDir + private Path path; + + @EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") + @Test + public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOException, RocksDBException { + ManagedRawSSTFileReader.tryLoadLibrary(); + Path sstPath = path.resolve("test.sst").toAbsolutePath(); + try (CodecBuffer codecBuffer = CodecBuffer.allocateDirect(1024); + RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(sstPath.toFile()); + CodecBuffer emptyBuffer = CodecBuffer.getEmptyBuffer()) { + Queue keys = new LinkedList<>(ImmutableList.of("key1_renamed", "key1", "key1_renamed")); + PutToByteBuffer putFunc = byteBuffer -> { + byte[] keyBytes = StringUtils.string2Bytes(keys.peek()); + byteBuffer.put(keyBytes); + return keyBytes.length; + }; + int len = codecBuffer.putFromSource(putFunc); + assertEquals(codecBuffer.readableBytes(), len); + assertEquals(keys.poll(), StringUtils.bytes2String(codecBuffer.getArray())); + codecBuffer.clear(); + int idx = 0; + while (!keys.isEmpty()) { + codecBuffer.putFromSource(putFunc); + byte[] keyBytes = new byte[codecBuffer.readableBytes()]; + assertEquals(keyBytes.length, codecBuffer.getInputStream().read(keyBytes)); + if (idx++ % 2 == 0) { + sstFileWriter.delete(codecBuffer); + } else { + sstFileWriter.put(codecBuffer, emptyBuffer); + } + assertEquals(keys.poll(), StringUtils.bytes2String(codecBuffer.getArray())); + codecBuffer.clear(); + } + } + Assertions.assertTrue(sstPath.toFile().exists()); + try (ManagedOptions options = new ManagedOptions(); + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>(options, + sstPath.toString(), 1024); + ManagedRawSSTFileIterator itr = + reader.newIterator(kv -> kv, null, null, IteratorType.KEY_AND_VALUE)) { + + int idx = 0; + List keys = ImmutableList.of("key1", "key1_rename"); + while (itr.hasNext()) { + ManagedRawSSTFileIterator.KeyValue kv = itr.next(); + assertEquals(idx, kv.getType()); + assertEquals(keys.get(idx), keys.get(idx++)); + assertEquals(0, kv.getValue().length); + } + assertEquals(2, idx); + } + } +} From d7c27db81d518989174857bf594f3a44c23a0b11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 20 Dec 2025 16:49:12 +0800 Subject: [PATCH 20/36] HDDS-14220. Bump awssdk to 2.40.13 (#9536) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index f82abc107114..03cc1b2d9e27 100644 --- a/pom.xml +++ b/pom.xml @@ -39,7 +39,7 @@ 1.9.7 3.27.6 1.12.788 - 2.40.8 + 2.40.13 0.8.0.RELEASE 1.83 3.6.1 From b8953ecba859f3f242c372392aff89d1c97c4d9f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 21 Dec 2025 08:38:47 -0500 Subject: [PATCH 21/36] HDDS-14162. Fix Native Jni Lib to read SST files using CodecBuffer (#9489) --- .../apache/hadoop/hdds/utils/db/Buffer.java | 80 ++++++++++++++++ .../utils/db/RDBStoreCodecBufferIterator.java | 60 ------------ .../hdds/utils/db/TestCodecBufferCodec.java | 17 ++++ .../ManagedRawSSTFileIterator.java | 57 +++++++----- .../ManagedRawSSTFileReader.java | 15 +-- .../utils/db/{managed => }/package-info.java | 4 +- .../main/native/ManagedRawSSTFileIterator.cpp | 92 +++++++++++-------- .../main/native/ManagedRawSSTFileReader.cpp | 8 +- .../hdds/utils/TestNativeLibraryLoader.java | 2 +- .../TestManagedRawSSTFileIterator.java | 22 +++-- .../hdds/utils/db/ManagedSstFileIterator.java | 90 ++++++++++++++++++ .../hdds/utils/db}/SstFileSetReader.java | 62 ++----------- .../rocksdiff/RocksDBCheckpointDiffer.java | 31 +++---- .../hdds/utils/db/TestRDBSstFileWriter.java | 10 +- .../hdds/utils/db}/TestSstFileSetReader.java | 8 +- .../ozone/rocksdiff/TestCompactionDag.java | 2 +- .../TestRocksDBCheckpointDiffer.java | 69 +++++++------- .../ozone/om/snapshot/TestOmSnapshot.java | 13 +-- .../om/snapshot/SnapshotDiffManager.java | 4 +- .../defrag/SnapshotDefragService.java | 4 +- .../om/snapshot/TestSnapshotDiffManager.java | 2 +- .../defrag/TestSnapshotDefragService.java | 2 +- 22 files changed, 385 insertions(+), 269 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java rename hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/{managed => }/ManagedRawSSTFileIterator.java (61%) rename hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/{managed => }/ManagedRawSSTFileReader.java (84%) rename hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/{managed => }/package-info.java (86%) rename hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/{managed => }/TestManagedRawSSTFileIterator.java (86%) create mode 100644 hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java rename hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/{ozone/rocksdb/util => hadoop/hdds/utils/db}/SstFileSetReader.java (76%) rename hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/{ozone/rocksdb/util => hadoop/hdds/utils/db}/TestSstFileSetReader.java (98%) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java new file mode 100644 index 000000000000..77df1fb9a44c --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import org.apache.ratis.util.Preconditions; + +class Buffer { + private final CodecBuffer.Capacity initialCapacity; + private final PutToByteBuffer source; + private CodecBuffer buffer; + + Buffer(CodecBuffer.Capacity initialCapacity, + PutToByteBuffer source) { + this.initialCapacity = initialCapacity; + this.source = source; + } + + void release() { + if (buffer != null) { + buffer.release(); + } + } + + private void prepare() { + if (buffer == null) { + allocate(); + } else { + buffer.clear(); + } + } + + private void allocate() { + if (buffer != null) { + buffer.release(); + } + buffer = CodecBuffer.allocateDirect(-initialCapacity.get()); + } + + CodecBuffer getFromDb() { + if (source == null) { + return null; + } + + for (prepare(); ; allocate()) { + final Integer required = buffer.putFromSource(source); + if (required == null) { + return null; // the source is unavailable + } else if (required == buffer.readableBytes()) { + return buffer; // buffer size is big enough + } + // buffer size too small, try increasing the capacity. + if (buffer.setCapacity(required)) { + buffer.clear(); + // retry with the new capacity + final int retried = buffer.putFromSource(source); + Preconditions.assertSame(required.intValue(), retried, "required"); + return buffer; + } + + // failed to increase the capacity + // increase initial capacity and reallocate it + initialCapacity.increase(required); + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java index 4f4f959938e9..aa703249ebe0 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java @@ -97,64 +97,4 @@ public void close() { valueBuffer.release(); } } - - static class Buffer { - private final CodecBuffer.Capacity initialCapacity; - private final PutToByteBuffer source; - private CodecBuffer buffer; - - Buffer(CodecBuffer.Capacity initialCapacity, - PutToByteBuffer source) { - this.initialCapacity = initialCapacity; - this.source = source; - } - - void release() { - if (buffer != null) { - buffer.release(); - } - } - - private void prepare() { - if (buffer == null) { - allocate(); - } else { - buffer.clear(); - } - } - - private void allocate() { - if (buffer != null) { - buffer.release(); - } - buffer = CodecBuffer.allocateDirect(-initialCapacity.get()); - } - - CodecBuffer getFromDb() { - if (source == null) { - return null; - } - - for (prepare(); ; allocate()) { - final Integer required = buffer.putFromSource(source); - if (required == null) { - return null; // the source is unavailable - } else if (required == buffer.readableBytes()) { - return buffer; // buffer size is big enough - } - // buffer size too small, try increasing the capacity. - if (buffer.setCapacity(required)) { - buffer.clear(); - // retry with the new capacity - final int retried = buffer.putFromSource(source); - Preconditions.assertSame(required.intValue(), retried, "required"); - return buffer; - } - - // failed to increase the capacity - // increase initial capacity and reallocate it - initialCapacity.increase(required); - } - } - } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java index 03a2bc4f4baa..e147bba93700 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java @@ -17,10 +17,12 @@ package org.apache.hadoop.hdds.utils.db; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.ValueSource; /** @@ -50,4 +52,19 @@ public void testFromPersistedFormat(boolean direct) throws CodecException { assertEquals(testString, value); } } + + @ParameterizedTest + @CsvSource(value = {"0,true", "0,false", "1,true", "1,false", "10,true", "10,false"}) + public void testCodecBufferAllocateByteArray(int length, boolean direct) throws CodecException { + byte[] arr = new byte[length]; + Codec codec = CodecBufferCodec.get(direct); + for (int i = 0; i < length; i++) { + arr[i] = (byte)i; + } + try (CodecBuffer codecBuffer = codec.fromPersistedFormat(arr)) { + assertEquals(length, codecBuffer.asReadOnlyByteBuffer().remaining()); + assertEquals(direct || length == 0, codecBuffer.asReadOnlyByteBuffer().isDirect()); + assertArrayEquals(arr, codecBuffer.getArray()); + } + } } diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java similarity index 61% rename from hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java rename to hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java index 4c66ca5cb438..134f24942dac 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java @@ -15,14 +15,12 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.utils.db.managed; +package org.apache.hadoop.hdds.utils.db; import com.google.common.primitives.UnsignedLong; -import java.util.Arrays; +import java.nio.ByteBuffer; import java.util.NoSuchElementException; import java.util.function.Function; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.db.IteratorType; import org.apache.hadoop.ozone.util.ClosableIterator; /** @@ -33,20 +31,32 @@ public class ManagedRawSSTFileIterator implements ClosableIterator { private final long nativeHandle; private final Function transformer; private final IteratorType type; + private boolean closed; + private final Buffer keyBuffer; + private final Buffer valueBuffer; - ManagedRawSSTFileIterator(long nativeHandle, Function transformer, IteratorType type) { + ManagedRawSSTFileIterator(String name, long nativeHandle, Function transformer, IteratorType type) { this.nativeHandle = nativeHandle; this.transformer = transformer; this.type = type; + this.closed = false; + this.keyBuffer = new Buffer( + new CodecBuffer.Capacity(name + " iterator-key", 1 << 10), + this.type.readKey() ? buffer -> this.getKey(this.nativeHandle, buffer, buffer.position(), + buffer.remaining()) : null); + this.valueBuffer = new Buffer( + new CodecBuffer.Capacity(name + " iterator-value", 4 << 10), + this.type.readValue() ? buffer -> this.getValue(this.nativeHandle, buffer, buffer.position(), + buffer.remaining()) : null); } private native boolean hasNext(long handle); private native void next(long handle); - private native byte[] getKey(long handle); + private native int getKey(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); - private native byte[] getValue(long handle); + private native int getValue(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); private native long getSequenceNumber(long handle); @@ -63,10 +73,10 @@ public T next() { throw new NoSuchElementException(); } - KeyValue keyValue = new KeyValue(this.type.readKey() ? this.getKey(nativeHandle) : null, + KeyValue keyValue = new KeyValue(this.type.readKey() ? this.keyBuffer.getFromDb() : null, UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)), this.getType(nativeHandle), - this.type.readValue() ? this.getValue(nativeHandle) : null); + this.type.readValue() ? this.valueBuffer.getFromDb() : null); this.next(nativeHandle); return this.transformer.apply(keyValue); } @@ -74,8 +84,13 @@ public T next() { private native void closeInternal(long handle); @Override - public void close() { - this.closeInternal(this.nativeHandle); + public synchronized void close() { + if (!closed) { + this.closeInternal(this.nativeHandle); + keyBuffer.release(); + valueBuffer.release(); + } + closed = true; } /** @@ -83,21 +98,21 @@ public void close() { */ public static final class KeyValue { - private final byte[] key; + private final CodecBuffer key; private final UnsignedLong sequence; private final Integer type; - private final byte[] value; + private final CodecBuffer value; - private KeyValue(byte[] key, UnsignedLong sequence, Integer type, - byte[] value) { + private KeyValue(CodecBuffer key, UnsignedLong sequence, Integer type, + CodecBuffer value) { this.key = key; this.sequence = sequence; this.type = type; this.value = value; } - public byte[] getKey() { - return key == null ? null : Arrays.copyOf(key, key.length); + public CodecBuffer getKey() { + return this.key; } public UnsignedLong getSequence() { @@ -108,17 +123,17 @@ public Integer getType() { return type; } - public byte[] getValue() { - return value == null ? null : Arrays.copyOf(value, value.length); + public CodecBuffer getValue() { + return value; } @Override public String toString() { return "KeyValue{" + - "key=" + (key == null ? null : StringUtils.bytes2String(key)) + + "key=" + (key == null ? null : StringCodec.get().fromCodecBuffer(key)) + ", sequence=" + sequence + ", type=" + type + - ", value=" + (value == null ? null : StringUtils.bytes2String(value)) + + ", value=" + (value == null ? null : StringCodec.get().fromCodecBuffer(value)) + '}'; } } diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java similarity index 84% rename from hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java rename to hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java index 49153781e73b..c644bd393b50 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.utils.db.managed; +package org.apache.hadoop.hdds.utils.db; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -24,14 +24,16 @@ import java.util.function.Function; import org.apache.hadoop.hdds.utils.NativeLibraryLoader; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; -import org.apache.hadoop.hdds.utils.db.IteratorType; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * JNI for RocksDB RawSSTFileReader. */ -public class ManagedRawSSTFileReader implements Closeable { +public class ManagedRawSSTFileReader implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); @@ -62,14 +64,15 @@ public ManagedRawSSTFileReader(final ManagedOptions options, final String fileNa this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); } - public ManagedRawSSTFileIterator newIterator( + public ManagedRawSSTFileIterator newIterator( Function transformerFunction, ManagedSlice fromSlice, ManagedSlice toSlice, IteratorType type) { long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle(); long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle(); LOG.info("Iterating SST file: {} with native lib. " + - "LowerBound: {}, UpperBound: {}, type : {}", fileName, fromSlice, toSlice, type); - return new ManagedRawSSTFileIterator<>( + "LowerBound: {}, UpperBound: {}, type : {} with reader handle: {}", fileName, fromSlice, toSlice, type, + this.nativeHandle); + return new ManagedRawSSTFileIterator<>(fileName + " " + this.nativeHandle, newIterator(this.nativeHandle, fromSlice != null, fromNativeHandle, toSlice != null, toNativeHandle), transformerFunction, type); diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java similarity index 86% rename from hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java rename to hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java index dc54a777de5f..42b83808542d 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java @@ -16,6 +16,6 @@ */ /** - * This package contains utility classes related to Managed SST dump tool. + * Native rocksdb utilities. */ -package org.apache.hadoop.hdds.utils.db.managed; +package org.apache.hadoop.hdds.utils.db; diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp index 1cf222528379..7720e30b4119 100644 --- a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp @@ -16,75 +16,87 @@ * limitations under the License. */ -#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h" +#include "org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator.h" #include "rocksdb/options.h" #include "rocksdb/raw_iterator.h" #include #include "cplusplus_to_java_convert.h" #include -jboolean Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, - jlong native_handle) { +template +static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget, jint jtarget_off, jint jtarget_len); + +jboolean Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, + jlong native_handle) { return static_cast(reinterpret_cast(native_handle)->Valid()); } -void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, - jlong native_handle) { +void Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, + jlong native_handle) { reinterpret_cast(native_handle)->Next(); } -jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv *env, - jobject obj, - jlong native_handle) { +jint Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getKey(JNIEnv *env, + jobject obj, + jlong native_handle, + jobject jtarget, + jint jtarget_off, jint jtarget_len) { ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key(); - jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); - if (jkey == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - env->SetByteArrayRegion( - jkey, 0, static_cast(slice.size()), - const_cast(reinterpret_cast(slice.data()))); - return jkey; + return copyToDirect(env, slice, jtarget, jtarget_off, jtarget_len); } -jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv *env, - jobject obj, - jlong native_handle) { +jint Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getValue(JNIEnv *env, + jobject obj, + jlong native_handle, + jobject jtarget, + jint jtarget_off, jint jtarget_len) { ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value(); - jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); - if (jkey == nullptr) { - // exception thrown: OutOfMemoryError - return nullptr; - } - env->SetByteArrayRegion( - jkey, 0, static_cast(slice.size()), - const_cast(reinterpret_cast(slice.data()))); - return jkey; + return copyToDirect(env, slice, jtarget, jtarget_off, jtarget_len); } -jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, - jobject obj, - jlong native_handle) { - uint64_t sequence_number = - reinterpret_cast(native_handle)->sequenceNumber(); +jlong Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint64_t sequence_number = reinterpret_cast(native_handle)->sequenceNumber(); jlong result; std::memcpy(&result, &sequence_number, sizeof(jlong)); return result; } -jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv *env, - jobject obj, - jlong native_handle) { +jint Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getType(JNIEnv *env, + jobject obj, + jlong native_handle) { uint32_t type = reinterpret_cast(native_handle)->type(); return static_cast(type); } -void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, - jobject obj, - jlong native_handle) { +void Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, + jobject obj, + jlong native_handle) { delete reinterpret_cast(native_handle); } + +template +static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + char* target = reinterpret_cast(env->GetDirectBufferAddress(jtarget)); + if (target == nullptr || env->GetDirectBufferCapacity(jtarget) < (jtarget_off + jtarget_len)) { + jclass exClass = env->FindClass("java/lang/IllegalArgumentException"); + if (exClass != nullptr) { + env->ThrowNew(exClass, "Invalid buffer address or capacity"); + } + return -1; + } + + target += jtarget_off; + + const jint cvalue_len = static_cast(source.size()); + const jint length = std::min(jtarget_len, cvalue_len); + + memcpy(target, source.data(), length); + + return cvalue_len; +} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp index f3b8dc02639d..ff49ee56f06f 100644 --- a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp @@ -16,7 +16,7 @@ * limitations under the License. */ -#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h" +#include "org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader.h" #include "rocksdb/options.h" #include "rocksdb/raw_sst_file_reader.h" #include "rocksdb/raw_iterator.h" @@ -24,7 +24,7 @@ #include "cplusplus_to_java_convert.h" #include -jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, +jlong Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, jlong options_handle, jstring jfilename, jint readahead_size) { @@ -37,7 +37,7 @@ jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRa return GET_CPLUSPLUS_POINTER(raw_sst_file_reader); } -jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, +jlong Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, jlong native_handle, jboolean jhas_from, jlong from_slice_handle, @@ -59,7 +59,7 @@ jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIt return GET_CPLUSPLUS_POINTER(iterator); } -void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, +void Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, jlong native_handle) { delete reinterpret_cast(native_handle); } diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 3dbbdc35513a..b8ac1c132e0c 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -37,7 +37,7 @@ import java.util.Map; import java.util.stream.Stream; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.junit.jupiter.api.condition.EnabledIfSystemProperty; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java similarity index 86% rename from hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java rename to hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java index e7e2a398e541..fee69e6ba187 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.utils.db.managed; +package org.apache.hadoop.hdds.utils.db; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -35,11 +35,15 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.TestUtils; -import org.apache.hadoop.hdds.utils.db.IteratorType; +import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Named; import org.junit.jupiter.api.condition.EnabledIfSystemProperty; @@ -90,7 +94,13 @@ private static Stream keyValueFormatArgs() { Named.of("Value starting & ending with a number & containing null character & new line character", "%1$dvalue\n\0%1$d")), Arguments.of(Named.of("Key ending with a number & containing a null character", "key\0%1$d"), - Named.of("Value starting & ending with a number & elosed within quotes", "%1$dvalue\r%1$d"))) + Named.of("Value starting & ending with a number & elosed within quotes", "%1$dvalue\r%1$d")), + Arguments.of(Named.of("Key with prefix length 5k of random alphaNumeric string", + new StringBuilder(RandomStringUtils.secure().nextAlphanumeric(5 << 10)) + .append("key%1$d").toString()), + Named.of("Value with prefix length 5k of random alphaNumeric string", + new StringBuilder(RandomStringUtils.secure().nextAlphanumeric(5 << 10)) + .append("%1$dvalue%1$d").toString()))) .flatMap(i -> Arrays.stream(IteratorType.values()).map(type -> Arguments.of(i.get()[0], i.get()[1], type))); } @@ -109,7 +119,7 @@ public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueForma (v1, v2) -> v2, TreeMap::new)); File file = createSSTFileWithKeys(keys); try (ManagedOptions options = new ManagedOptions(); - ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>( + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader( options, file.getAbsolutePath(), 2 * 1024 * 1024)) { List> testBounds = TestUtils.getTestingBounds(keys.keySet().stream() .collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) -> v1, TreeMap::new))); @@ -129,10 +139,10 @@ public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueForma ManagedRawSSTFileIterator.KeyValue r = iterator.next(); assertTrue(expectedKeyItr.hasNext()); Map.Entry, String> expectedKey = expectedKeyItr.next(); - String key = r.getKey() == null ? null : StringUtils.bytes2String(r.getKey()); + String key = r.getKey() == null ? null : StringCodec.get().fromCodecBuffer(r.getKey()); assertEquals(type.readKey() ? expectedKey.getKey().getKey() : null, key); assertEquals(type.readValue() ? expectedKey.getValue() : null, - type.readValue() ? StringUtils.bytes2String(r.getValue()) : r.getValue()); + type.readValue() ? StringCodec.get().fromCodecBuffer(r.getValue()) : r.getValue()); expectedKeyItr.remove(); } assertEquals(0, expectedKeys.size()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java new file mode 100644 index 000000000000..abfbd48e347e --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; +import org.apache.hadoop.ozone.util.ClosableIterator; +import org.rocksdb.RocksDBException; + +/** + * ManagedSstFileIterator is an abstract class designed to provide a managed, resource-safe + * iteration over SST (Sorted String Table) files leveraging RocksDB. It implements the + * {@link ClosableIterator} interface to support resource management and ensures proper + * cleanup of resources upon closure. This class binds together a ManagedSstFileReader, + * ManagedSstFileReaderIterator, and Buffers for keys and values, while allowing specific + * implementations to define how the iterator values are derived. + * + * @param The type of the element to be returned by the iterator. + */ +abstract class ManagedSstFileIterator implements ClosableIterator { + private final ManagedSstFileReader fileReader; + private final ManagedSstFileReaderIterator fileReaderIterator; + private final IteratorType type; + private boolean closed; + private final Buffer keyBuffer; + private final Buffer valueBuffer; + + ManagedSstFileIterator(String path, ManagedOptions options, ManagedReadOptions readOptions, + IteratorType type) throws RocksDatabaseException { + try { + this.fileReader = new ManagedSstFileReader(options); + this.fileReader.open(path); + this.fileReaderIterator = ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions)); + fileReaderIterator.get().seekToFirst(); + this.closed = false; + this.type = type; + this.keyBuffer = new Buffer( + new CodecBuffer.Capacity(path + " iterator-key", 1 << 10), + this.type.readKey() ? buffer -> fileReaderIterator.get().key(buffer) : null); + this.valueBuffer = new Buffer( + new CodecBuffer.Capacity(path + " iterator-value", 4 << 10), + this.type.readValue() ? buffer -> fileReaderIterator.get().value(buffer) : null); + } catch (RocksDBException e) { + throw new RocksDatabaseException("Failed to open SST file: " + path, e); + } + } + + @Override + public synchronized void close() { + if (!closed) { + this.fileReaderIterator.close(); + this.fileReader.close(); + keyBuffer.release(); + valueBuffer.release(); + } + closed = true; + } + + @Override + public synchronized boolean hasNext() { + return fileReaderIterator.get().isValid(); + } + + abstract T getIteratorValue(CodecBuffer key, CodecBuffer value); + + @Override + public synchronized T next() { + T value = getIteratorValue(this.type.readKey() ? keyBuffer.getFromDb() : null, + this.type.readValue() ? valueBuffer.getFromDb() : null); + fileReaderIterator.get().next(); + return value; + } +} diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java similarity index 76% rename from hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java rename to hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java index 7fbd80cf0910..b4c39ccc9c27 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ozone.rocksdb.util; +package org.apache.hadoop.hdds.utils.db; import static org.apache.hadoop.hdds.utils.db.IteratorType.KEY_ONLY; @@ -30,19 +30,11 @@ import java.util.function.Function; import java.util.stream.Collectors; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.CodecException; -import org.apache.hadoop.hdds.utils.db.IteratorType; -import org.apache.hadoop.hdds.utils.db.MinHeapMergeIterator; -import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; -import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator.KeyValue; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator.KeyValue; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; import org.apache.hadoop.ozone.util.ClosableIterator; import org.rocksdb.RocksDBException; @@ -116,10 +108,11 @@ protected void init() throws CodecException { @Override protected ClosableIterator getKeyIteratorForFile(String file) throws RocksDatabaseException { - return new ManagedSstFileIterator(file, options, readOptions) { + return new ManagedSstFileIterator(file, options, readOptions, KEY_ONLY) { + @Override - protected String getIteratorValue(ManagedSstFileReaderIterator iterator) { - return StringCodec.get().fromPersistedFormat(iterator.get().key()); + String getIteratorValue(CodecBuffer key, CodecBuffer value) { + return StringCodec.get().fromCodecBuffer(key); } }; } @@ -159,7 +152,7 @@ protected void init() throws CodecException { @Override protected ClosableIterator getKeyIteratorForFile(String file) { return new ManagedRawSstFileIterator(file, options, lowerBoundSlice, upperBoundSlice, - keyValue -> StringCodec.get().fromPersistedFormat(keyValue.getKey()), KEY_ONLY); + keyValue -> StringCodec.get().fromCodecBuffer(keyValue.getKey()), KEY_ONLY); } @Override @@ -172,51 +165,14 @@ public void close() throws UncheckedIOException { return itr; } - private abstract static class ManagedSstFileIterator implements ClosableIterator { - private final ManagedSstFileReader fileReader; - private final ManagedSstFileReaderIterator fileReaderIterator; - - ManagedSstFileIterator(String path, ManagedOptions options, ManagedReadOptions readOptions) - throws RocksDatabaseException { - try { - this.fileReader = new ManagedSstFileReader(options); - this.fileReader.open(path); - this.fileReaderIterator = ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions)); - fileReaderIterator.get().seekToFirst(); - } catch (RocksDBException e) { - throw new RocksDatabaseException("Failed to open SST file: " + path, e); - } - } - - @Override - public void close() { - this.fileReaderIterator.close(); - this.fileReader.close(); - } - - @Override - public boolean hasNext() { - return fileReaderIterator.get().isValid(); - } - - protected abstract String getIteratorValue(ManagedSstFileReaderIterator iterator); - - @Override - public String next() { - String value = getIteratorValue(fileReaderIterator); - fileReaderIterator.get().next(); - return value; - } - } - private static class ManagedRawSstFileIterator implements ClosableIterator { - private final ManagedRawSSTFileReader fileReader; + private final ManagedRawSSTFileReader fileReader; private final ManagedRawSSTFileIterator fileReaderIterator; private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024; ManagedRawSstFileIterator(String path, ManagedOptions options, ManagedSlice lowerBound, ManagedSlice upperBound, Function keyValueFunction, IteratorType type) { - this.fileReader = new ManagedRawSSTFileReader<>(options, path, READ_AHEAD_SIZE); + this.fileReader = new ManagedRawSSTFileReader(options, path, READ_AHEAD_SIZE); this.fileReaderIterator = fileReader.newIterator(keyValueFunction, lowerBound, upperBound, type); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index d69515a1123d..956a0caac7c7 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -19,7 +19,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.function.Function.identity; -import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY; import static org.apache.hadoop.hdds.utils.db.IteratorType.KEY_ONLY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT; @@ -75,16 +74,17 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.Scheduler; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -1322,8 +1322,7 @@ public void pruneSstFileValues() { // Prune file.sst => pruned.sst.tmp Files.deleteIfExists(prunedSSTFilePath); - removeValueFromSSTFile(managedOptions, envOptions, sstFilePath.toFile().getAbsolutePath(), - prunedSSTFilePath.toFile().getAbsolutePath()); + removeValueFromSSTFile(managedOptions, sstFilePath.toFile().getAbsolutePath(), prunedSSTFilePath.toFile()); // Move pruned.sst.tmp => file.sst and replace existing file atomically. try (UncheckedAutoCloseable lock = getBootstrapStateLock().acquireReadLock()) { @@ -1363,26 +1362,20 @@ public void pruneSstFileValues() { } } - private void removeValueFromSSTFile(ManagedOptions options, ManagedEnvOptions envOptions, - String sstFilePath, String prunedFilePath) - throws IOException { - try (ManagedRawSSTFileReader> sstFileReader = new ManagedRawSSTFileReader<>( - options, sstFilePath, SST_READ_AHEAD_SIZE); - ManagedRawSSTFileIterator> itr = sstFileReader.newIterator( + private void removeValueFromSSTFile(ManagedOptions options, String sstFilePath, File prunedFile) throws IOException { + try (ManagedRawSSTFileReader sstFileReader = new ManagedRawSSTFileReader(options, sstFilePath, SST_READ_AHEAD_SIZE); + ManagedRawSSTFileIterator> itr = sstFileReader.newIterator( keyValue -> Pair.of(keyValue.getKey(), keyValue.getType()), null, null, KEY_ONLY); - ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(envOptions, options);) { - sstFileWriter.open(prunedFilePath); + RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(prunedFile); + CodecBuffer emptyCodecBuffer = CodecBuffer.getEmptyBuffer()) { while (itr.hasNext()) { - Pair keyValue = itr.next(); + Pair keyValue = itr.next(); if (keyValue.getValue() == 0) { sstFileWriter.delete(keyValue.getKey()); } else { - sstFileWriter.put(keyValue.getKey(), EMPTY_BYTE_ARRAY); + sstFileWriter.put(keyValue.getKey(), emptyCodecBuffer); } } - sstFileWriter.finish(); - } catch (RocksDBException ex) { - throw new RocksDatabaseException("Failed to write pruned entries for " + sstFilePath, ex); } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java index c3129da85c4b..4476bcb808d2 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java @@ -28,13 +28,10 @@ import java.util.Queue; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.EnabledIfSystemProperty; import org.junit.jupiter.api.io.TempDir; -import org.rocksdb.RocksDBException; /** * Test for RDBSstFileWriter. @@ -46,7 +43,7 @@ public class TestRDBSstFileWriter { @EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches = "true") @Test - public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOException, RocksDBException { + public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOException { ManagedRawSSTFileReader.tryLoadLibrary(); Path sstPath = path.resolve("test.sst").toAbsolutePath(); try (CodecBuffer codecBuffer = CodecBuffer.allocateDirect(1024); @@ -78,8 +75,7 @@ public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOExceptio } Assertions.assertTrue(sstPath.toFile().exists()); try (ManagedOptions options = new ManagedOptions(); - ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>(options, - sstPath.toString(), 1024); + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader(options, sstPath.toString(), 1024); ManagedRawSSTFileIterator itr = reader.newIterator(kv -> kv, null, null, IteratorType.KEY_AND_VALUE)) { @@ -89,7 +85,7 @@ public void testSstFileTombstoneCreationWithCodecBufferReuse() throws IOExceptio ManagedRawSSTFileIterator.KeyValue kv = itr.next(); assertEquals(idx, kv.getType()); assertEquals(keys.get(idx), keys.get(idx++)); - assertEquals(0, kv.getValue().length); + assertEquals(0, kv.getValue().readableBytes()); } assertEquals(2, idx); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java similarity index 98% rename from hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java rename to hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java index c20eb8a20ed5..fd4bcbb6d90d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ozone.rocksdb.util; +package org.apache.hadoop.hdds.utils.db; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -39,10 +39,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.TestUtils; -import org.apache.hadoop.hdds.utils.db.CodecException; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.hadoop.ozone.util.ClosableIterator; import org.junit.jupiter.api.condition.EnabledIfSystemProperty; @@ -63,8 +61,8 @@ class TestSstFileSetReader { // Key prefix containing all characters, to check if all characters can be // written & read from rocksdb through SSTDumptool - private static final String KEY_PREFIX = IntStream.range(0, 256).boxed() - .map(i -> String.format("%c", i)) + private static final String KEY_PREFIX = IntStream.range(0, 5 << 10).boxed() + .map(i -> String.format("%c", i % 256)) .collect(Collectors.joining("")); /** diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java index 5c527948fdc3..2fde23bb376e 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java @@ -63,9 +63,9 @@ import java.util.stream.Stream; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.ozone.compaction.log.CompactionLogEntry; diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index b1867a3d5ebc..9c1fb6b0a060 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -44,6 +44,7 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -56,6 +57,7 @@ import java.io.FileWriter; import java.io.IOException; import java.io.OutputStream; +import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -90,22 +92,22 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedFlushOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.hadoop.util.Time; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -1608,20 +1610,21 @@ public void testPruneSSTFileValues() throws Exception { assertEquals(0L, sstFilePruningMetrics.getCompactionsProcessed()); assertEquals(0L, sstFilePruningMetrics.getFilesRemovedTotal()); - List> keys = new ArrayList>(); - keys.add(Pair.of("key1".getBytes(UTF_8), Integer.valueOf(1))); - keys.add(Pair.of("key2".getBytes(UTF_8), Integer.valueOf(0))); - keys.add(Pair.of("key3".getBytes(UTF_8), Integer.valueOf(1))); + List> keys = new ArrayList>(); + keys.add(Pair.of("key1", Integer.valueOf(1))); + keys.add(Pair.of("key2", Integer.valueOf(0))); + keys.add(Pair.of("key3", Integer.valueOf(1))); String inputFile78 = "000078"; String inputFile73 = "000073"; String outputFile81 = "000081"; // Create src & destination files in backup & activedirectory. // Pruning job should succeed when pruned temp file is already present. - createSSTFileWithKeys(sstBackUpDir + "/" + inputFile78 + SST_FILE_EXTENSION, keys); - createSSTFileWithKeys(sstBackUpDir + "/" + inputFile73 + SST_FILE_EXTENSION, keys); - createSSTFileWithKeys(sstBackUpDir + PRUNED_SST_FILE_TEMP, keys); - createSSTFileWithKeys(activeDbDir + "/" + outputFile81 + SST_FILE_EXTENSION, keys); + Path sstBackupDirPath = sstBackUpDir.toPath(); + createSSTFileWithKeys(sstBackupDirPath.resolve(inputFile78 + SST_FILE_EXTENSION).toFile(), keys); + createSSTFileWithKeys(sstBackupDirPath.resolve(inputFile73 + SST_FILE_EXTENSION).toFile(), keys); + createSSTFileWithKeys(sstBackupDirPath.resolve(PRUNED_SST_FILE_TEMP).toFile(), keys); + createSSTFileWithKeys(activeDbDir.toPath().resolve(outputFile81 + SST_FILE_EXTENSION).toFile(), keys); // Load compaction log CompactionLogEntry compactionLogEntry = new CompactionLogEntry(178, System.currentTimeMillis(), @@ -1639,15 +1642,21 @@ public void testPruneSSTFileValues() throws Exception { // Pruning should not fail a source SST file has been removed by another pruner. Files.delete(sstBackUpDir.toPath().resolve(inputFile73 + SST_FILE_EXTENSION)); // Run the SST file pruner. - ManagedRawSSTFileIterator mockedRawSSTFileItr = mock(ManagedRawSSTFileIterator.class); - Iterator keyItr = keys.iterator(); - when(mockedRawSSTFileItr.hasNext()).thenReturn(true, true, true, false); - when(mockedRawSSTFileItr.next()).thenReturn(keyItr.next(), keyItr.next(), keyItr.next()); - try (MockedConstruction mockedRawSSTReader = Mockito.mockConstruction( - ManagedRawSSTFileReader.class, (mock, context) -> { - when(mock.newIterator(any(), any(), any(), any())).thenReturn(mockedRawSSTFileItr); - doNothing().when(mock).close(); - })) { + + try (CodecBuffer keyCodecBuffer = CodecBuffer.allocateDirect(1024); + MockedConstruction mockedRawSSTReader = Mockito.mockConstruction( + ManagedRawSSTFileReader.class, (mock, context) -> { + ManagedRawSSTFileIterator mockedRawSSTFileItr = mock(ManagedRawSSTFileIterator.class); + Iterator> keyItr = keys.stream().map(i -> { + keyCodecBuffer.clear(); + keyCodecBuffer.put(ByteBuffer.wrap(i.getKey().getBytes(UTF_8))); + return Pair.of(keyCodecBuffer, i.getValue()); + }).iterator(); + doAnswer(i -> keyItr.hasNext()).when(mockedRawSSTFileItr).hasNext(); + doAnswer(i -> keyItr.next()).when(mockedRawSSTFileItr).next(); + when(mock.newIterator(any(), any(), any(), any())).thenReturn(mockedRawSSTFileItr); + doNothing().when(mock).close(); + })) { rocksDBCheckpointDiffer.pruneSstFileValues(); } // pruned.sst.tmp should be deleted when pruning job exits successfully. @@ -1692,22 +1701,18 @@ public void testPruneSSTFileValues() throws Exception { assertEquals(1L, sstFilePruningMetrics.getFilesRemovedTotal()); } - private void createSSTFileWithKeys(String filePath, List> keys) - throws Exception { - try (ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(new ManagedEnvOptions(), new ManagedOptions())) { - sstFileWriter.open(filePath); - Iterator> itr = keys.iterator(); + private void createSSTFileWithKeys(File file, List> keys) throws RocksDatabaseException { + byte[] value = "dummyValue".getBytes(UTF_8); + try (RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(file)) { + Iterator> itr = keys.iterator(); while (itr.hasNext()) { - Pair entry = itr.next(); + Pair entry = itr.next(); if (entry.getValue() == 0) { - sstFileWriter.delete(entry.getKey()); + sstFileWriter.delete(entry.getKey().getBytes(UTF_8)); } else { - sstFileWriter.put(entry.getKey(), "dummyValue".getBytes(UTF_8)); + sstFileWriter.put(entry.getKey().getBytes(UTF_8), value); } } - sstFileWriter.finish(); - } catch (RocksDBException ex) { - throw new RocksDatabaseException("Failed to get write " + filePath, ex); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index dece2e258946..5f14451fad32 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -101,13 +101,14 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -2522,12 +2523,12 @@ public void testSnapshotCompactionDag() throws Exception { java.nio.file.Path file = sstBackUpDir.resolve(f.getFileName() + ".sst"); if (COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(f.getColumnFamily()) && java.nio.file.Files.exists(file)) { assertTrue(f.isPruned()); - try (ManagedRawSSTFileReader sstFileReader = new ManagedRawSSTFileReader<>( + try (ManagedRawSSTFileReader sstFileReader = new ManagedRawSSTFileReader( managedOptions, file.toFile().getAbsolutePath(), 2 * 1024 * 1024); - ManagedRawSSTFileIterator itr = sstFileReader.newIterator( - keyValue -> keyValue.getValue(), null, null, KEY_AND_VALUE)) { + ManagedRawSSTFileIterator itr = sstFileReader.newIterator( + ManagedRawSSTFileIterator.KeyValue::getValue, null, null, KEY_AND_VALUE)) { while (itr.hasNext()) { - assertEquals(0, itr.next().length); + assertEquals(0, itr.next().readableBytes()); } } } else { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 0c764f948860..2147fc3ec180 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -97,11 +97,12 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.db.CodecRegistry; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.SstFileSetReader; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.ozone.OFSPath; @@ -129,7 +130,6 @@ import org.apache.hadoop.ozone.util.ClosableIterator; import org.apache.logging.log4j.util.Strings; import org.apache.ozone.rocksdb.util.SstFileInfo; -import org.apache.ozone.rocksdb.util.SstFileSetReader; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.ColumnFamilyDescriptor; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java index daca37095334..87e0704d10a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java @@ -57,16 +57,17 @@ import org.apache.hadoop.hdds.utils.db.CodecException; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.hadoop.hdds.utils.db.SstFileSetReader; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -88,7 +89,6 @@ import org.apache.hadoop.util.Time; import org.apache.logging.log4j.util.Strings; import org.apache.ozone.rocksdb.util.SstFileInfo; -import org.apache.ozone.rocksdb.util.SstFileSetReader; import org.apache.ratis.util.UncheckedAutoCloseable; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 5d681640249b..5a82c3b1591e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -108,6 +108,7 @@ import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.SstFileSetReader; import org.apache.hadoop.hdds.utils.db.StringInMemoryTestTable; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; @@ -140,7 +141,6 @@ import org.apache.hadoop.ozone.util.ClosableIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ExitUtil; -import org.apache.ozone.rocksdb.util.SstFileSetReader; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java index 6a201ba88637..00c1b73398ad 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter; import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.hadoop.hdds.utils.db.SstFileSetReader; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.StringInMemoryTestTable; import org.apache.hadoop.hdds.utils.db.Table; @@ -108,7 +109,6 @@ import org.apache.hadoop.ozone.upgrade.LayoutFeature; import org.apache.hadoop.ozone.util.ClosableIterator; import org.apache.ozone.rocksdb.util.SstFileInfo; -import org.apache.ozone.rocksdb.util.SstFileSetReader; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; From 514f663e404eee4a110a9c4f6962d29d25c442ab Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sun, 21 Dec 2025 15:48:55 +0100 Subject: [PATCH 22/36] HDDS-14221. Support primitive query params (#9537) --- .../s3/commontypes/RequestParameters.java | 90 +++++++++ .../ozone/s3/endpoint/BucketEndpoint.java | 29 ++- .../ozone/s3/endpoint/EndpointBase.java | 15 +- .../ozone/s3/endpoint/TestBucketAcl.java | 5 +- .../ozone/s3/endpoint/TestBucketList.java | 172 ++++++++---------- .../s3/endpoint/TestPermissionCheck.java | 10 +- .../s3/metrics/TestS3GatewayMetrics.java | 18 +- 7 files changed, 209 insertions(+), 130 deletions(-) create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java new file mode 100644 index 000000000000..7b5e2d0d6e82 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.commontypes; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MultivaluedMap; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; + +/** Allow looking up query parameters as primitive types. */ +public interface RequestParameters { + + String get(String key); + + static MultivaluedMapImpl of(MultivaluedMap params) { + return new MultivaluedMapImpl(params); + } + + default String get(String key, String defaultValue) { + final String value = get(key); + return value != null ? value : defaultValue; + } + + default int getInt(String key, int defaultValue) { + final String value = get(key); + if (value == null) { + return defaultValue; + } + + try { + return Integer.parseInt(value); + } catch (NumberFormatException e) { + throw translateException(e); + } + } + + default WebApplicationException translateException(RuntimeException e) { + return new WebApplicationException(e.getMessage(), S3ErrorTable.INVALID_ARGUMENT.getHttpCode()); + } + + /** Additional methods for tests. */ + interface Mutable extends RequestParameters { + + void set(String key, String value); + + void unset(String key); + + default void setInt(String key, int value) { + set(key, String.valueOf(value)); + } + } + + /** Mutable implementation based on {@link MultivaluedMap}. */ + final class MultivaluedMapImpl implements Mutable { + private final MultivaluedMap params; + + private MultivaluedMapImpl(MultivaluedMap params) { + this.params = params; + } + + @Override + public String get(String key) { + return params.getFirst(key); + } + + @Override + public void set(String key, String value) { + params.putSingle(key, value); + } + + @Override + public void unset(String key) { + params.remove(key); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 491ba2a05b37..4e7aff26e9c1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -40,7 +40,6 @@ import java.util.Set; import javax.annotation.PostConstruct; import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; import javax.ws.rs.GET; import javax.ws.rs.HEAD; import javax.ws.rs.POST; @@ -116,20 +115,20 @@ private BucketEndpointContext getBucketContext() { @GET @SuppressWarnings("methodlength") public Response get( - @PathParam(BUCKET) String bucketName, - @DefaultValue("1000") @QueryParam(QueryParams.MAX_KEYS) int maxKeys, - @DefaultValue("1000") @QueryParam(QueryParams.MAX_UPLOADS) int maxUploads + @PathParam(BUCKET) String bucketName ) throws OS3Exception, IOException { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_BUCKET; PerformanceStringBuilder perf = new PerformanceStringBuilder(); - final String continueToken = getQueryParam(QueryParams.CONTINUATION_TOKEN); - final String delimiter = getQueryParam(QueryParams.DELIMITER); - final String encodingType = getQueryParam(QueryParams.ENCODING_TYPE); - final String marker = getQueryParam(QueryParams.MARKER); - String prefix = getQueryParam(QueryParams.PREFIX); - String startAfter = getQueryParam(QueryParams.START_AFTER); + final String continueToken = queryParams().get(QueryParams.CONTINUATION_TOKEN); + final String delimiter = queryParams().get(QueryParams.DELIMITER); + final String encodingType = queryParams().get(QueryParams.ENCODING_TYPE); + final String marker = queryParams().get(QueryParams.MARKER); + int maxKeys = queryParams().getInt(QueryParams.MAX_KEYS, 1000); + final int maxUploads = queryParams().getInt(QueryParams.MAX_UPLOADS, 1000); + String prefix = queryParams().get(QueryParams.PREFIX); + String startAfter = queryParams().get(QueryParams.START_AFTER); Iterator ozoneKeyIterator = null; ContinueToken decodedToken = @@ -137,7 +136,7 @@ public Response get( OzoneBucket bucket = null; try { - final String aclMarker = getQueryParam(QueryParams.ACL); + final String aclMarker = queryParams().get(QueryParams.ACL); if (aclMarker != null) { s3GAction = S3GAction.GET_ACL; S3BucketAcl result = getAcl(bucketName); @@ -146,11 +145,11 @@ public Response get( return Response.ok(result, MediaType.APPLICATION_XML_TYPE).build(); } - final String uploads = getQueryParam(QueryParams.UPLOADS); + final String uploads = queryParams().get(QueryParams.UPLOADS); if (uploads != null) { s3GAction = S3GAction.LIST_MULTIPART_UPLOAD; - final String uploadIdMarker = getQueryParam(QueryParams.UPLOAD_ID_MARKER); - final String keyMarker = getQueryParam(QueryParams.KEY_MARKER); + final String uploadIdMarker = queryParams().get(QueryParams.UPLOAD_ID_MARKER); + final String keyMarker = queryParams().get(QueryParams.KEY_MARKER); return listMultipartUploads(bucketName, prefix, keyMarker, uploadIdMarker, maxUploads); } @@ -324,7 +323,7 @@ public Response put( S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { - final String aclMarker = getQueryParam(QueryParams.ACL); + final String aclMarker = queryParams().get(QueryParams.ACL); if (aclMarker != null) { s3GAction = S3GAction.PUT_ACL; Response response = putAcl(bucketName, body); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 99d7adc3042f..dbc91c1e55e7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -69,6 +69,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.protocol.S3Auth; import org.apache.hadoop.ozone.s3.RequestIdentifier; +import org.apache.hadoop.ozone.s3.commontypes.RequestParameters; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; @@ -106,6 +107,9 @@ public abstract class EndpointBase { @Context private HttpHeaders headers; + // initialized in @PostConstruct + private RequestParameters.MultivaluedMapImpl queryParams; + private final Set excludeMetadataFields = new HashSet<>(Arrays.asList(OzoneConsts.GDPR_FLAG, STORAGE_CONFIG_HEADER)); private static final Logger LOG = @@ -114,12 +118,14 @@ public abstract class EndpointBase { protected static final AuditLogger AUDIT = new AuditLogger(AuditLoggerType.S3GLOGGER); - protected String getQueryParam(String key) { - return getQueryParameters().getFirst(key); + /** Read-only access to query parameters. */ + protected RequestParameters queryParams() { + return queryParams; } - public MultivaluedMap getQueryParameters() { - return context.getUriInfo().getQueryParameters(); + /** For setting multiple values use {@link #getContext()}. */ + public RequestParameters.Mutable queryParamsForTest() { + return queryParams; } protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) @@ -149,6 +155,7 @@ protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) */ @PostConstruct public void initialization() { + queryParams = RequestParameters.of(context.getUriInfo().getQueryParameters()); // Note: userPrincipal is initialized to be the same value as accessId, // could be updated later in RpcClient#getS3Volume s3Auth = new S3Auth(signatureInfo.getStringToSign(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java index c5bb9bcc6294..e53267111009 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java @@ -69,7 +69,7 @@ public void setup() throws IOException { .setClient(client) .setHeaders(headers) .build(); - bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); } @AfterEach @@ -82,8 +82,7 @@ public void clean() throws IOException { @Test public void testGetAcl() throws Exception { when(parameterMap.containsKey(ACL_MARKER)).thenReturn(true); - Response response = - bucketEndpoint.get(BUCKET_NAME, 0, 0); + Response response = bucketEndpoint.get(BUCKET_NAME); assertEquals(HTTP_OK, response.getStatus()); System.out.println(response.getEntity()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index 332b6eb36eb7..c62a7e8da1c8 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -54,11 +54,9 @@ public void listRoot() throws OS3Exception, IOException { .setClient(client) .build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, ""); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0) - .getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, ""); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("dir1/", @@ -74,10 +72,9 @@ public void listDir() throws OS3Exception, IOException { OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2"); BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir1"); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("dir1/", @@ -94,11 +91,9 @@ public void listSubDir() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1/"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0) - .getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir1/"); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("dir1/dir2/", @@ -127,10 +122,9 @@ public void listObjectOwner() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "key"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "key"); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(2, getBucketResponse.getContents().size()); assertEquals(user1.getShortUserName(), @@ -147,10 +141,9 @@ public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir1"); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); } @@ -163,10 +156,9 @@ public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, ""); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, ""); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(3, getBucketResponse.getCommonPrefixes().size()); assertEquals("file2", getBucketResponse.getContents().get(0) @@ -181,11 +173,10 @@ public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1bh"); - endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, "dir1/dir2/file2"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir1bh"); + endpoint.queryParamsForTest().set(QueryParams.START_AFTER, "dir1/dir2/file2"); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); } @@ -200,10 +191,9 @@ public void listWithPrefixAndEmptyStrDelimiter() BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); // Should behave the same if delimiter is null - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, ""); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir1/"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 100, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, ""); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir1/"); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(0, getBucketResponse.getCommonPrefixes().size()); assertEquals(4, getBucketResponse.getContents().size()); @@ -229,24 +219,24 @@ public void listWithContinuationToken() throws OS3Exception, IOException { // As we have 5 keys, with max keys 2 we should call list 3 times. // First time - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, ""); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, ""); + endpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, maxKeys); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); // 2nd time - endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); - getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + String value1 = getBucketResponse.getNextToken(); + endpoint.queryParamsForTest().set(QueryParams.CONTINUATION_TOKEN, value1); + getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getContents().size()); //3rd time - endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); - getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + String value = getBucketResponse.getNextToken(); + endpoint.queryParamsForTest().set(QueryParams.CONTINUATION_TOKEN, value); + getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getContents().size()); @@ -272,10 +262,10 @@ public void listWithContinuationTokenDirBreak() ListObjectResponse getBucketResponse; - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "test/"); - getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "test/"); + endpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, maxKeys); + getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(0, getBucketResponse.getContents().size()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); @@ -284,9 +274,9 @@ public void listWithContinuationTokenDirBreak() assertEquals("test/dir2/", getBucketResponse.getCommonPrefixes().get(1).getPrefix().getName()); - endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); - getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + String value = getBucketResponse.getNextToken(); + endpoint.queryParamsForTest().set(QueryParams.CONTINUATION_TOKEN, value); + getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertEquals(1, getBucketResponse.getContents().size()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); assertEquals("test/dir3/", @@ -311,25 +301,25 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { // As we have 5 keys, with max keys 2 we should call list 3 times. // First time - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir"); - ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir"); + endpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, maxKeys); + ListObjectResponse getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); // 2nd time - endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); - getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + String value1 = getBucketResponse.getNextToken(); + endpoint.queryParamsForTest().set(QueryParams.CONTINUATION_TOKEN, value1); + getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertTrue(getBucketResponse.isTruncated()); assertEquals(2, getBucketResponse.getCommonPrefixes().size()); //3rd time - endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, getBucketResponse.getNextToken()); - getBucketResponse = - (ListObjectResponse) endpoint.get("b1", maxKeys, 0).getEntity(); + String value = getBucketResponse.getNextToken(); + endpoint.queryParamsForTest().set(QueryParams.CONTINUATION_TOKEN, value); + getBucketResponse = (ListObjectResponse) endpoint.get("b1").getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(1, getBucketResponse.getCommonPrefixes().size()); @@ -343,10 +333,11 @@ public void listWithContinuationTokenFail() throws IOException { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, "/"); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, "dir"); - endpoint.getQueryParameters().putSingle(QueryParams.CONTINUATION_TOKEN, "random"); - OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get("b1", 2, 1000).getEntity()); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, "/"); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, "dir"); + endpoint.queryParamsForTest().set(QueryParams.CONTINUATION_TOKEN, "random"); + endpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, 2); + OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get("b1").getEntity()); assertEquals("random", e.getResource()); assertEquals("Invalid Argument", e.getErrorMessage()); } @@ -360,7 +351,7 @@ public void testStartAfter() throws IOException, OS3Exception { BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(ozoneClient).build(); ListObjectResponse getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1").getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(5, getBucketResponse.getContents().size()); @@ -369,16 +360,16 @@ public void testStartAfter() throws IOException, OS3Exception { // have 4 keys. String startAfter = "dir0/file1"; - endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, startAfter); + endpoint.queryParamsForTest().set(QueryParams.START_AFTER, startAfter); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1").getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(4, getBucketResponse.getContents().size()); - endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, "random"); + endpoint.queryParamsForTest().set(QueryParams.START_AFTER, "random"); getBucketResponse = - (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); + (ListObjectResponse) endpoint.get("b1").getEntity(); assertFalse(getBucketResponse.isTruncated()); assertEquals(0, getBucketResponse.getContents().size()); @@ -419,11 +410,11 @@ public void testEncodingType() throws IOException, OS3Exception { String startAfter = "data="; String encodingType = ENCODING_TYPE; - endpoint.getQueryParameters().putSingle(QueryParams.DELIMITER, delimiter); - endpoint.getQueryParameters().putSingle(QueryParams.PREFIX, prefix); - endpoint.getQueryParameters().putSingle(QueryParams.ENCODING_TYPE, encodingType); - endpoint.getQueryParameters().putSingle(QueryParams.START_AFTER, startAfter); - ListObjectResponse response = (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); + endpoint.queryParamsForTest().set(QueryParams.DELIMITER, delimiter); + endpoint.queryParamsForTest().set(QueryParams.PREFIX, prefix); + endpoint.queryParamsForTest().set(QueryParams.ENCODING_TYPE, encodingType); + endpoint.queryParamsForTest().set(QueryParams.START_AFTER, startAfter); + ListObjectResponse response = (ListObjectResponse) endpoint.get("b1").getEntity(); // Assert encodingType == url. // The Object name will be encoded by ObjectKeyNameAdapter @@ -439,8 +430,8 @@ public void testEncodingType() throws IOException, OS3Exception { assertEquals(encodingType, response.getContents().get(0).getKey().getEncodingType()); - endpoint.getQueryParameters().remove(QueryParams.ENCODING_TYPE); - response = (ListObjectResponse) endpoint.get("b1", 1000, 0).getEntity(); + endpoint.queryParamsForTest().unset(QueryParams.ENCODING_TYPE); + response = (ListObjectResponse) endpoint.get("b1").getEntity(); // Assert encodingType == null. // The Object name will not be encoded by ObjectKeyNameAdapter @@ -461,9 +452,8 @@ public void testEncodingTypeException() throws IOException { client.getObjectStore().createS3Bucket("b1"); BucketEndpoint endpoint = newBucketEndpointBuilder().setClient(client).build(); - endpoint.getQueryParameters().putSingle(QueryParams.ENCODING_TYPE, "unSupportType"); - OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get( - "b1", 1000, 0).getEntity()); + endpoint.queryParamsForTest().set(QueryParams.ENCODING_TYPE, "unSupportType"); + OS3Exception e = assertThrows(OS3Exception.class, () -> endpoint.get("b1").getEntity()); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e.getCode()); } @@ -476,9 +466,8 @@ public void testListObjectsWithNegativeMaxKeys() throws Exception { .build(); // maxKeys < 0 should throw InvalidArgument - OS3Exception e1 = assertThrows(OS3Exception.class, () -> - bucketEndpoint.get("bucket", -1, 1000) - ); + bucketEndpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, -1); + OS3Exception e1 = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("bucket")); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getCode(), e1.getCode()); } @@ -491,8 +480,8 @@ public void testListObjectsWithZeroMaxKeys() throws Exception { .build(); // maxKeys = 0, should return empty list and not throw. - ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get( - "bucket", 0, 1000).getEntity(); + bucketEndpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, 0); + ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get("bucket").getEntity(); assertEquals(0, response.getContents().size()); assertFalse(response.isTruncated()); @@ -505,15 +494,15 @@ public void testListObjectsWithZeroMaxKeysInNonEmptyBucket() throws Exception { .setClient(client) .build(); - ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get( - "b1", 0, 1000).getEntity(); + bucketEndpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, 0); + ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get("b1").getEntity(); // Should return empty list and not throw. assertEquals(0, response.getContents().size()); assertFalse(response.isTruncated()); - ListObjectResponse fullResponse = (ListObjectResponse) bucketEndpoint.get( - "b1", 1000, 1000).getEntity(); + bucketEndpoint.queryParamsForTest().unset(QueryParams.MAX_KEYS); + ListObjectResponse fullResponse = (ListObjectResponse) bucketEndpoint.get("b1").getEntity(); assertEquals(5, fullResponse.getContents().size()); } @@ -540,9 +529,8 @@ public void testListObjectsRespectsConfiguredMaxKeysLimit() throws Exception { // Act: Request more keys than the configured max-keys limit final int requestedMaxKeys = Integer.parseInt(configuredMaxKeysLimit) + 1; - ListObjectResponse response = (ListObjectResponse) - bucketEndpoint.get("b1", requestedMaxKeys, - 1000).getEntity(); + bucketEndpoint.queryParamsForTest().setInt(QueryParams.MAX_KEYS, requestedMaxKeys); + ListObjectResponse response = (ListObjectResponse) bucketEndpoint.get("b1").getEntity(); // Assert: The number of returned keys should be capped at the configured limit assertEquals(Integer.parseInt(configuredMaxKeysLimit), response.getContents().size()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 9872a711c639..8e9eef2d974d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -169,8 +169,7 @@ public void testListKey() throws IOException { BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .build(); - OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "bucketName", 1000, 0)); + OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("bucketName")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -214,9 +213,8 @@ public void testGetAcl() throws Exception { .setClient(client) .setHeaders(headers) .build(); - bucketEndpoint.getQueryParameters().putSingle(QueryParams.ACL, "acl"); - OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "bucketName", 1000, 0)); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, "acl"); + OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("bucketName")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -237,7 +235,7 @@ public void testSetAcl() throws Exception { .setClient(client) .setHeaders(headers) .build(); - bucketEndpoint.getQueryParameters().putSingle(QueryParams.ACL, "acl"); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, "acl"); try { bucketEndpoint.put("bucketName", null); } catch (Exception e) { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 018ad0f1f5e2..bfc471e22d5d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -134,7 +134,7 @@ public void testListBucketSuccess() throws Exception { public void testGetBucketSuccess() throws Exception { long oriMetric = metrics.getGetBucketSuccess(); - bucketEndpoint.get(bucketName, 1000, 0).getEntity(); + bucketEndpoint.get(bucketName).getEntity(); long curMetric = metrics.getGetBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -145,8 +145,7 @@ public void testGetBucketFailure() throws Exception { long oriMetric = metrics.getGetBucketFailure(); // Searching for a bucket that does not exist - OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( - "newBucket", 1000, 0)); + OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("newBucket")); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -207,9 +206,8 @@ public void testDeleteBucketFailure() throws Exception { public void testGetAclSuccess() throws Exception { long oriMetric = metrics.getGetAclSuccess(); - bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); - Response response = - bucketEndpoint.get(bucketName, 0, 0); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); + Response response = bucketEndpoint.get(bucketName); long curMetric = metrics.getGetAclSuccess(); assertEquals(HTTP_OK, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -219,9 +217,9 @@ public void testGetAclSuccess() throws Exception { public void testGetAclFailure() throws Exception { long oriMetric = metrics.getGetAclFailure(); - bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); // Failing the getACL endpoint by applying ACL on a non-Existent Bucket - OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("random_bucket", 0, 0)); + OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get("random_bucket")); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -237,7 +235,7 @@ public void testPutAclSuccess() throws Exception { InputStream inputBody = TestBucketAcl.class.getClassLoader() .getResourceAsStream("userAccessControlList.xml"); - bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); bucketEndpoint.put("b1", inputBody); inputBody.close(); long curMetric = metrics.getPutAclSuccess(); @@ -251,7 +249,7 @@ public void testPutAclFailure() throws Exception { InputStream inputBody = TestBucketAcl.class.getClassLoader() .getResourceAsStream("userAccessControlList.xml"); - bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); + bucketEndpoint.queryParamsForTest().set(QueryParams.ACL, ACL_MARKER); try { assertThrows(OS3Exception.class, () -> bucketEndpoint.put("unknown_bucket", inputBody)); } finally { From 9a4986b3bf8b7bfd1742984d800e02b85cc23f79 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Mon, 22 Dec 2025 04:35:08 -0800 Subject: [PATCH 23/36] HDDS-14117. Add nonStreamRead and fileRead cases to tests. (#9476) --- .../scm/storage/MultipartInputStream.java | 4 + .../ozone/client/rpc/read/TestStreamRead.java | 201 ++++++++++++------ 2 files changed, 144 insertions(+), 61 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java index a28658b1ebbf..221a48be828d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java @@ -82,6 +82,10 @@ public MultipartInputStream(String keyName, this.length = streamLength; } + public boolean isStreamBlockInputStream() { + return isStreamBlockInputStream; + } + @Override protected synchronized int readWithStrategy(ByteReaderStrategy strategy) throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestStreamRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestStreamRead.java index 29a438766303..9fc217b6df3a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestStreamRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestStreamRead.java @@ -19,12 +19,21 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import java.io.BufferedInputStream; +import java.io.File; +import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.Files; import java.security.MessageDigest; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -33,15 +42,22 @@ import org.apache.hadoop.hdds.scm.storage.StreamBlockInputStream; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.KeyInputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.om.TestBucket; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ratis.util.JavaUtils; import org.apache.ratis.util.SizeInBytes; +import org.apache.ratis.util.function.CheckedBiConsumer; import org.junit.jupiter.api.Test; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -52,15 +68,11 @@ public class TestStreamRead { { GenericTestUtils.setLogLevel(LoggerFactory.getLogger("com"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.ipc"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.hdds.server.http"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.hdds.scm.container"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.hdds.scm.ha"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.hdds.scm.safemode"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.hdds.utils"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.ozone.container.common"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.hadoop.ozone.om"), Level.ERROR); - GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org.apache.ratis"), Level.ERROR); + GenericTestUtils.setLogLevel(LoggerFactory.getLogger("org"), Level.ERROR); + + GenericTestUtils.setLogLevel(LoggerFactory.getLogger("BackgroundPipelineScrubber"), Level.ERROR); + GenericTestUtils.setLogLevel(LoggerFactory.getLogger("ExpiredContainerReplicaOpScrubber"), Level.ERROR); + GenericTestUtils.setLogLevel(LoggerFactory.getLogger("SCMHATransactionMonitor"), Level.ERROR); GenericTestUtils.setLogLevel(LoggerFactory.getLogger(CodecBuffer.class), Level.ERROR); } @@ -68,8 +80,10 @@ public class TestStreamRead { static final int FLUSH_SIZE = 2 * CHUNK_SIZE; // 2MB static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE; // 4MB - static final int BLOCK_SIZE = 64 << 20; static final SizeInBytes KEY_SIZE = SizeInBytes.valueOf("128M"); + static final int BLOCK_SIZE = KEY_SIZE.getSizeInt(); + + static final String DUMMY_KEY = "dummyKey"; static MiniOzoneCluster newCluster(int bytesPerChecksum) throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); @@ -79,9 +93,8 @@ static MiniOzoneCluster newCluster(int bytesPerChecksum) throws Exception { conf.setFromObject(config); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 1); conf.setQuietMode(true); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) @@ -114,53 +127,127 @@ void testReadKey256k() throws Exception { } void runTestReadKey(SizeInBytes keySize, SizeInBytes bytesPerChecksum) throws Exception { + System.out.println("cluster starting ..."); try (MiniOzoneCluster cluster = newCluster(bytesPerChecksum.getSizeInt())) { cluster.waitForClusterToBeReady(); - System.out.println("cluster ready"); + final List datanodes = cluster.getHddsDatanodes(); + assertEquals(1, datanodes.size()); + final HddsDatanodeService datanode = datanodes.get(0); + OzoneConfiguration conf = cluster.getConf(); OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); clientConfig.setStreamReadBlock(true); - OzoneConfiguration copy = new OzoneConfiguration(conf); - copy.setFromObject(clientConfig); + final OzoneConfiguration steamReadConf = new OzoneConfiguration(conf); + steamReadConf.setFromObject(clientConfig); + + clientConfig.setStreamReadBlock(false); + final OzoneConfiguration nonSteamReadConf = new OzoneConfiguration(conf); + nonSteamReadConf.setFromObject(clientConfig); - final int n = 5; - final SizeInBytes writeBufferSize = SizeInBytes.valueOf("8MB"); - final SizeInBytes[] readBufferSizes = { + final SizeInBytes[] bufferSizes = { SizeInBytes.valueOf("32M"), SizeInBytes.valueOf("8M"), SizeInBytes.valueOf("1M"), SizeInBytes.valueOf("4k"), }; - try (OzoneClient client = OzoneClientFactory.getRpcClient(copy)) { - final TestBucket bucket = TestBucket.newBuilder(client).build(); + try (OzoneClient streamReadClient = OzoneClientFactory.getRpcClient(steamReadConf); + OzoneClient nonStreamReadClient = OzoneClientFactory.getRpcClient(nonSteamReadConf)) { + final TestBucket testBucket = TestBucket.newBuilder(streamReadClient).build(); + final String volume = testBucket.delegate().getVolumeName(); + final String bucket = testBucket.delegate().getName(); + final String keyName = "key0"; + + // get the client ready by writing a dummy key + createKey(testBucket.delegate(), DUMMY_KEY, SizeInBytes.ONE_KB, SizeInBytes.ONE_KB); + + for (SizeInBytes bufferSize : bufferSizes) { + // create key + System.out.println("---------------------------------------------------------"); + createKey(testBucket.delegate(), keyName, keySize, bufferSize); + + // get block file and generate md5 + final OmKeyInfo info = nonStreamReadClient.getProxy().getKeyInfo(volume, bucket, keyName, false); + final List locations = info.getLatestVersionLocations().getLocationList(); + assertEquals(1, locations.size()); + final BlockID blockId = locations.get(0).getBlockID(); + final ContainerData containerData = datanode.getDatanodeStateMachine().getContainer().getContainerSet() + .getContainer(blockId.getContainerID()).getContainerData(); + final File blockFile = ContainerLayoutVersion.FILE_PER_BLOCK.getChunkFile(containerData, blockId, null); + assertTrue(blockFile.exists()); + assertEquals(BLOCK_SIZE, blockFile.length()); + final String expectedMd5 = generateMd5(keySize, SizeInBytes.ONE_MB, blockFile); - for (int i = 0; i < n; i++) { - final String keyName = "key" + i; + // run tests System.out.println("---------------------------------------------------------"); System.out.printf("%s with %s bytes and %s bytesPerChecksum%n", keyName, keySize, bytesPerChecksum); - final String md5 = createKey(bucket.delegate(), keyName, keySize, writeBufferSize); - for (SizeInBytes readBufferSize : readBufferSizes) { - runTestReadKey(keyName, keySize, readBufferSize, null, bucket); - runTestReadKey(keyName, keySize, readBufferSize, md5, bucket); + final CheckedBiConsumer streamRead = (readBufferSize, md5) + -> streamRead(keySize, readBufferSize, md5, testBucket, keyName); + final CheckedBiConsumer nonStreamRead = (readBufferSize, md5) + -> nonStreamRead(keySize, readBufferSize, md5, nonStreamReadClient, volume, bucket, keyName); + final CheckedBiConsumer fileRead = (readBufferSize, md5) + -> fileRead(keySize, readBufferSize, md5, blockFile); + final List> operations + = Arrays.asList(streamRead, nonStreamRead, fileRead); + Collections.shuffle(operations); + + for (CheckedBiConsumer op : operations) { + for (int i = 0; i < 5; i++) { + op.accept(bufferSize, null); + } + op.accept(bufferSize, expectedMd5); } } } } } + static void streamRead(SizeInBytes keySize, SizeInBytes bufferSize, String expectedMD5, + TestBucket bucket, String keyName) throws Exception { + try (KeyInputStream in = bucket.getKeyInputStream(keyName)) { + assertTrue(in.isStreamBlockInputStream()); + runTestReadKey(keySize, bufferSize, expectedMD5, in); + } + } + + static void nonStreamRead(SizeInBytes keySize, SizeInBytes bufferSize, String expectedMD5, + OzoneClient nonStreamReadClient, String volume, String bucket, String keyName) throws Exception { + final ClientProtocol proxy = nonStreamReadClient.getProxy(); + try (KeyInputStream in = (KeyInputStream) proxy.getKey(volume, bucket, keyName).getInputStream()) { + assertFalse(in.isStreamBlockInputStream()); + runTestReadKey(keySize, bufferSize, expectedMD5, in); + } + } + + static void fileRead(SizeInBytes keySize, SizeInBytes bufferSize, String expectedMD5, + File blockFile) throws Exception { + try (InputStream in = new BufferedInputStream(Files.newInputStream(blockFile.toPath()), bufferSize.getSizeInt())) { + runTestReadKey(keySize, bufferSize, expectedMD5, in); + } + } + + static String generateMd5(SizeInBytes keySize, SizeInBytes bufferSize, File blockFile) throws Exception { + try (InputStream in = new BufferedInputStream(Files.newInputStream(blockFile.toPath()), bufferSize.getSizeInt())) { + return runTestReadKey("generateMd5", keySize, bufferSize, true, in); + } + } + static void print(String name, long keySizeByte, long elapsedNanos, SizeInBytes bufferSize, String computedMD5) { final double keySizeMb = keySizeByte * 1.0 / (1 << 20); final double elapsedSeconds = elapsedNanos / 1_000_000_000.0; - System.out.printf("%16s: %8.2f MB/s (%7.3f s, buffer %16s, keySize %8.2f MB, md5=%s)%n", - name, keySizeMb / elapsedSeconds, elapsedSeconds, bufferSize, keySizeMb, computedMD5); + if (computedMD5 == null) { + System.out.printf("%16s: %8.2f MB/s (%7.3f s, buffer %16s, keySize %8.2f MB)%n", + name, keySizeMb / elapsedSeconds, elapsedSeconds, bufferSize, keySizeMb); + } else { + System.out.printf("%16s md5=%s%n", name, computedMD5); + } } - static String createKey(OzoneBucket bucket, String keyName, SizeInBytes keySize, SizeInBytes bufferSize) + static void createKey(OzoneBucket bucket, String keyName, SizeInBytes keySize, SizeInBytes bufferSize) throws Exception { final byte[] buffer = new byte[bufferSize.getSizeInt()]; ThreadLocalRandom.current().nextBytes(buffer); @@ -176,50 +263,42 @@ static String createKey(OzoneBucket bucket, String keyName, SizeInBytes keySize, } } final long elapsedNanos = System.nanoTime() - startTime; - - final MessageDigest md5 = MessageDigest.getInstance("MD5"); - for (long pos = 0; pos < keySizeByte;) { - final int writeSize = Math.toIntExact(Math.min(buffer.length, keySizeByte - pos)); - md5.update(buffer, 0, writeSize); - pos += writeSize; + if (!keyName.startsWith(DUMMY_KEY)) { + print("createStreamKey", keySizeByte, elapsedNanos, bufferSize, null); } + } - final String computedMD5 = StringUtils.bytes2Hex(md5.digest()); - print("createStreamKey", keySizeByte, elapsedNanos, bufferSize, computedMD5); - return computedMD5; + static void runTestReadKey(SizeInBytes keySize, SizeInBytes bufferSize, String expectedMD5, + InputStream in) throws Exception { + final String method = JavaUtils.getCallerStackTraceElement().getMethodName(); + final String computedMD5 = runTestReadKey(method, keySize, bufferSize, expectedMD5 != null, in); + assertEquals(expectedMD5, computedMD5); } - private void runTestReadKey(String keyName, SizeInBytes keySize, SizeInBytes bufferSize, String expectedMD5, - TestBucket bucket) throws Exception { + static String runTestReadKey(String name, SizeInBytes keySize, SizeInBytes bufferSize, boolean generateMd5, + InputStream in) throws Exception { final long keySizeByte = keySize.getSize(); final MessageDigest md5 = MessageDigest.getInstance("MD5"); // Read the data fully into a large enough byte array final byte[] buffer = new byte[bufferSize.getSizeInt()]; final long startTime = System.nanoTime(); - try (KeyInputStream keyInputStream = bucket.getKeyInputStream(keyName)) { - int pos = 0; - for (; pos < keySizeByte;) { - final int read = keyInputStream.read(buffer, 0, buffer.length); - if (read == -1) { - break; - } + int pos = 0; + for (; pos < keySizeByte;) { + final int read = in.read(buffer, 0, buffer.length); + if (read == -1) { + break; + } - if (expectedMD5 != null) { - md5.update(buffer, 0, read); - } - pos += read; + if (generateMd5) { + md5.update(buffer, 0, read); } - assertEquals(keySizeByte, pos); + pos += read; } + assertEquals(keySizeByte, pos); final long elapsedNanos = System.nanoTime() - startTime; - final String computedMD5; - if (expectedMD5 == null) { - computedMD5 = null; - } else { - computedMD5 = StringUtils.bytes2Hex(md5.digest()); - assertEquals(expectedMD5, computedMD5); - } - print("readStreamKey", keySizeByte, elapsedNanos, bufferSize, computedMD5); + final String computedMD5 = generateMd5 ? StringUtils.bytes2Hex(md5.digest()) : null; + print(name, keySizeByte, elapsedNanos, bufferSize, computedMD5); + return computedMD5; } } From a959346268c643f1c526870bceb37a5fd717272c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 22 Dec 2025 16:49:25 +0100 Subject: [PATCH 24/36] HDDS-14216. Update ban of annotations (#9530) --- hadoop-hdds/client/pom.xml | 2 -- hadoop-hdds/common/pom.xml | 2 -- hadoop-hdds/container-service/pom.xml | 1 - hadoop-hdds/framework/pom.xml | 1 - hadoop-hdds/server-scm/pom.xml | 1 - hadoop-ozone/cli-admin/pom.xml | 2 -- hadoop-ozone/cli-shell/pom.xml | 2 -- hadoop-ozone/common/pom.xml | 2 -- hadoop-ozone/csi/pom.xml | 1 - hadoop-ozone/freon/pom.xml | 2 ++ hadoop-ozone/insight/pom.xml | 4 +++- hadoop-ozone/recon/pom.xml | 4 +++- hadoop-ozone/tools/pom.xml | 2 ++ pom.xml | 4 +++- 14 files changed, 13 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index 5e50aaabd942..6240d7be1144 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -137,8 +137,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator - org.apache.hadoop.hdds.scm.metadata.Replicate org.kohsuke.MetaInfServices diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index b5f7cfa059e0..95d7ebe39f47 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -287,8 +287,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator - org.apache.hadoop.hdds.scm.metadata.Replicate org.kohsuke.MetaInfServices diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index ce6c7863b94c..b2b390af9e4d 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -290,7 +290,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.apache.hadoop.hdds.scm.metadata.Replicate org.kohsuke.MetaInfServices diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index c30cde6de28f..ea7f08edd82b 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -378,7 +378,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.kohsuke.MetaInfServices diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 68c17ecdf3ab..d8cfb03f5b74 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -263,7 +263,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.kohsuke.MetaInfServices diff --git a/hadoop-ozone/cli-admin/pom.xml b/hadoop-ozone/cli-admin/pom.xml index 9d713e43bf91..e68105141389 100644 --- a/hadoop-ozone/cli-admin/pom.xml +++ b/hadoop-ozone/cli-admin/pom.xml @@ -185,8 +185,6 @@ org.apache.hadoop.hdds.conf.Config org.apache.hadoop.hdds.conf.ConfigGroup - org.apache.hadoop.hdds.scm.metadata.Replicate - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-ozone/cli-shell/pom.xml b/hadoop-ozone/cli-shell/pom.xml index 89d326efbd31..ba3475203a81 100644 --- a/hadoop-ozone/cli-shell/pom.xml +++ b/hadoop-ozone/cli-shell/pom.xml @@ -173,8 +173,6 @@ org.apache.hadoop.hdds.conf.Config org.apache.hadoop.hdds.conf.ConfigGroup - org.apache.hadoop.hdds.scm.metadata.Replicate - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index afbb9c4f14f8..6e673fe33652 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -248,8 +248,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator - org.apache.hadoop.hdds.scm.metadata.Replicate org.kohsuke.MetaInfServices diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index 5352add1a0ea..ea6f8cbad75f 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -254,7 +254,6 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.apache.hadoop.hdds.scm.metadata.Replicate org.kohsuke.MetaInfServices diff --git a/hadoop-ozone/freon/pom.xml b/hadoop-ozone/freon/pom.xml index 968b2873567c..7e4d7142835a 100644 --- a/hadoop-ozone/freon/pom.xml +++ b/hadoop-ozone/freon/pom.xml @@ -262,6 +262,8 @@ org.apache.hadoop.hdds.conf.Config org.apache.hadoop.hdds.conf.ConfigGroup org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.OMClientVersionValidator + org.apache.hadoop.ozone.om.request.validation.OMLayoutVersionValidator org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index 75e6ee1410eb..df338e221eda 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -133,8 +133,10 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.OMClientVersionValidator + org.apache.hadoop.ozone.om.request.validation.OMLayoutVersionValidator + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.kohsuke.MetaInfServices diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 13b39d5c91a4..90eb48d0921b 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -337,8 +337,10 @@ Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.OMClientVersionValidator + org.apache.hadoop.ozone.om.request.validation.OMLayoutVersionValidator + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.kohsuke.MetaInfServices diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 1eb9c0605e60..e82de6c6e211 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -319,6 +319,8 @@ org.apache.hadoop.hdds.conf.Config org.apache.hadoop.hdds.conf.ConfigGroup org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.OMClientVersionValidator + org.apache.hadoop.ozone.om.request.validation.OMLayoutVersionValidator org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/pom.xml b/pom.xml index 03cc1b2d9e27..9fcee28d5af3 100644 --- a/pom.xml +++ b/pom.xml @@ -2140,8 +2140,10 @@ org.apache.hadoop.hdds.conf.Config org.apache.hadoop.hdds.conf.ConfigGroup - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.OMClientVersionValidator + org.apache.hadoop.ozone.om.request.validation.OMLayoutVersionValidator + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator org.kohsuke.MetaInfServices From 9b2f4e2e2e4cd84337ad78441b1df9fe43b46984 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 22 Dec 2025 18:55:21 +0100 Subject: [PATCH 25/36] HDDS-14227. Introduce factory method for RDBBatchOperation (#9545) --- .../hdds/utils/db/RDBBatchOperation.java | 16 ++++++++++------ .../apache/hadoop/hdds/utils/db/RDBStore.java | 2 +- .../hdds/utils/db/TestRDBBatchOperation.java | 2 +- .../scm/ha/SCMHADBTransactionBufferStub.java | 2 +- .../hadoop/ozone/recon/TestReconTasks.java | 6 +++--- .../hadoop/ozone/om/TestOmSnapshotManager.java | 8 ++++---- .../impl/OzoneManagerServiceProviderImpl.java | 2 +- .../recon/tasks/ContainerKeyMapperHelper.java | 2 +- .../recon/tasks/FileSizeCountTaskHelper.java | 2 +- .../tasks/NSSummaryTaskDbEventHandler.java | 2 +- .../ozone/recon/tasks/OmTableInsightTask.java | 2 +- .../ozone/recon/api/TestContainerEndpoint.java | 4 ++-- .../TestReconContainerMetadataManagerImpl.java | 18 +++++++++--------- .../TestReconNamespaceSummaryManagerImpl.java | 2 +- .../recon/tasks/AbstractNSSummaryTaskTest.java | 2 +- 15 files changed, 38 insertions(+), 34 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index f7b025ed98f8..49693bd29674 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -49,7 +49,7 @@ * Note that a {@link RDBBatchOperation} object only for one batch. * Also, this class is not threadsafe. */ -public class RDBBatchOperation implements BatchOperation { +public final class RDBBatchOperation implements BatchOperation { static final Logger LOG = LoggerFactory.getLogger(RDBBatchOperation.class); private static final AtomicInteger BATCH_COUNT = new AtomicInteger(); @@ -62,6 +62,14 @@ public class RDBBatchOperation implements BatchOperation { private enum Op { DELETE, PUT, DELETE_RANGE } + public static RDBBatchOperation newAtomicOperation() { + return newAtomicOperation(new ManagedWriteBatch()); + } + + public static RDBBatchOperation newAtomicOperation(ManagedWriteBatch writeBatch) { + return new RDBBatchOperation(writeBatch); + } + private static void debug(Supplier message) { if (LOG.isTraceEnabled()) { LOG.trace("\n{}", message.get()); @@ -627,11 +635,7 @@ String getCommitString() { } } - public RDBBatchOperation() { - writeBatch = new ManagedWriteBatch(); - } - - public RDBBatchOperation(ManagedWriteBatch writeBatch) { + private RDBBatchOperation(ManagedWriteBatch writeBatch) { this.writeBatch = writeBatch; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index c833fed6ab49..efccdf31aef2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -274,7 +274,7 @@ public long getEstimatedKeyCount() throws RocksDatabaseException { @Override public BatchOperation initBatchOperation() { - return new RDBBatchOperation(); + return RDBBatchOperation.newAtomicOperation(); } @Override diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java index bbf53b9a9608..bd33ab070ce4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java @@ -80,7 +80,7 @@ public void testBatchOperationWithDeleteRange() throws RocksDatabaseException { }).when(writeBatch).delete(Mockito.any(ColumnFamilyHandle.class), Mockito.any(byte[].class)); }); - RDBBatchOperation batchOperation = new RDBBatchOperation()) { + RDBBatchOperation batchOperation = RDBBatchOperation.newAtomicOperation()) { ColumnFamilyHandle columnFamilyHandle = Mockito.mock(ColumnFamilyHandle.class); RocksDatabase.ColumnFamily columnFamily = Mockito.mock(RocksDatabase.ColumnFamily.class); doAnswer((i) -> { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferStub.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferStub.java index 3e7803407f05..2e7b3fdb0dd5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferStub.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferStub.java @@ -49,7 +49,7 @@ private BatchOperation getCurrentBatchOperation() { if (dbStore != null) { currentBatchOperation = dbStore.initBatchOperation(); } else { - currentBatchOperation = new RDBBatchOperation(); + currentBatchOperation = RDBBatchOperation.newAtomicOperation(); } } return currentBatchOperation; diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index ed0ddde04c98..3de70d665700 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -148,7 +148,7 @@ public void testMissingContainerDownNode() throws Exception { .allocateContainer(RatisReplicationConfig.getInstance(ONE), "test"); long containerID = containerInfo.getContainerID(); - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation, containerID, 2L); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); @@ -264,7 +264,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation, containerID, 3L); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); @@ -302,7 +302,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { // Now remove keys from container. This data is used to // identify if container is empty in terms of keys mapped to container. - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation, containerID, 0L); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index dd7195802ed1..fc4e5b7408d5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -223,7 +223,7 @@ public void testCloseOnEviction() throws IOException, snapshotChainManager.addSnapshot(first); snapshotChainManager.addSnapshot(second); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); // create the first snapshot checkpoint OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), first, rdbBatchOperation); @@ -238,7 +238,7 @@ public void testCloseOnEviction() throws IOException, firstSnapshot.getMetadataManager(), "store", firstSnapshotStore); // create second snapshot checkpoint (which will be used for eviction) - rdbBatchOperation = new RDBBatchOperation(); + rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), second, rdbBatchOperation); om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); @@ -749,7 +749,7 @@ public void testCreateSnapshotIdempotent() throws Exception { when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); // Create first checkpoint for the snapshot checkpoint - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), first, rdbBatchOperation); om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); @@ -758,7 +758,7 @@ public void testCreateSnapshotIdempotent() throws Exception { logCapturer.clearOutput(); // Create checkpoint again for the same snapshot. - rdbBatchOperation = new RDBBatchOperation(); + rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), first, rdbBatchOperation); om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index d4edd2894f9d..dca33c759b80 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -656,7 +656,7 @@ ImmutablePair innerGetAndApplyDeltaUpdatesFromOM(long fromSequenc writeBatch.iterate(omdbUpdatesHandler); // Commit the OM DB transactions in recon rocks DB and sync here. try (RDBBatchOperation rdbBatchOperation = - new RDBBatchOperation(writeBatch)) { + RDBBatchOperation.newAtomicOperation(writeBatch)) { try (ManagedWriteOptions wOpts = new ManagedWriteOptions()) { rdbBatchOperation.commit(rocksDB, wOpts); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java index 626376ac09a9..3e60ceceb6ba 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java @@ -430,7 +430,7 @@ private static void writeToTheDB(Map localContainer ReconContainerMetadataManager reconContainerMetadataManager) throws IOException { - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { // Write container key mappings (local per-task data) localContainerKeyMap.keySet().forEach((ContainerKeyPrefix key) -> { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java index b82fcd556a1b..4981083b5025 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java @@ -274,7 +274,7 @@ public static void writeCountsToDB(Map fileSizeCountMap, return; } - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { for (Map.Entry entry : fileSizeCountMap.entrySet()) { FileSizeCountKey key = entry.getKey(); Long deltaCount = entry.getValue(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index 974c109bb42b..3569bae71a18 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -61,7 +61,7 @@ public ReconOMMetadataManager getReconOMMetadataManager() { private void updateNSSummariesToDB(Map nsSummaryMap, Collection objectIdsToBeDeleted) throws IOException { - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { for (Map.Entry entry : nsSummaryMap.entrySet()) { try { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, entry.getKey(), entry.getValue()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index 8027966231de..59862cebb81f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -352,7 +352,7 @@ private void handleUpdateEvent(OMDBUpdateEvent event, * @param dataMap Map containing the updated count and size information. */ private void writeDataToDB(Map dataMap) { - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { for (Entry entry : dataMap.entrySet()) { String key = entry.getKey(); Long value = entry.getValue(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 801177d25c47..a1b62acd63d6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -1469,7 +1469,7 @@ public void testGetContainerInsightsNonOMContainers() .stream().map(entry -> entry.getKey()).collect( Collectors.toList()); deletedContainerKeyList.forEach((ContainerKeyPrefix key) -> { - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { reconContainerMetadataManager .batchDeleteContainerMapping(rdbBatchOperation, key); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); @@ -1506,7 +1506,7 @@ public void testGetContainerInsightsNonOMContainersWithPrevKey() reconContainerMetadataManager.getKeyPrefixesForContainer(2).entrySet() .stream().map(entry -> entry.getKey()).collect(Collectors.toList()); deletedContainerKeyList.forEach((ContainerKeyPrefix key) -> { - try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + try (RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation()) { reconContainerMetadataManager.batchDeleteContainerMapping( rdbBatchOperation, key); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java index 0a3d9429a7c8..22c3d6da4f44 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java @@ -80,7 +80,7 @@ public void setUp() throws Exception { private void populateKeysInContainers(long containerId1, long containerId2) throws Exception { - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); ContainerKeyPrefix containerKeyPrefix1 = ContainerKeyPrefix.get( containerId1, keyPrefix1, 0); reconContainerMetadataManager @@ -119,7 +119,7 @@ public void testInitNewContainerDB() throws Exception { "V1/B2/K3", 0); prefixCounts.put(ckp3, 3); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); for (Map.Entry entry : prefixCounts.entrySet()) { reconContainerMetadataManager.batchStoreContainerKeyMapping( @@ -164,7 +164,7 @@ public void testBatchStoreContainerKeyMapping() throws Exception { prefixCounts.put(keyPrefix2, 2); prefixCounts.put(keyPrefix3, 3); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); for (Map.Entry entry : prefixCounts.entrySet()) { ContainerKeyPrefix containerKeyPrefix = ContainerKeyPrefix.get( containerId, entry.getKey(), 0); @@ -192,7 +192,7 @@ public void testBatchStoreContainerKeyMapping() throws Exception { public void testStoreContainerKeyCount() throws Exception { long containerId = 1L; long nextContainerId = 2L; - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation, containerId, 2L); reconContainerMetadataManager @@ -204,7 +204,7 @@ public void testStoreContainerKeyCount() throws Exception { assertEquals(3, reconContainerMetadataManager.getKeyCountForContainer(nextContainerId)); - RDBBatchOperation rdbBatchOperation2 = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation2 = RDBBatchOperation.newAtomicOperation(); reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation2, containerId, 20L); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation2); @@ -216,7 +216,7 @@ public void testStoreContainerKeyCount() throws Exception { public void testGetKeyCountForContainer() throws Exception { long containerId = 1L; long nextContainerId = 2L; - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation, containerId, 2L); reconContainerMetadataManager @@ -236,7 +236,7 @@ public void testGetKeyCountForContainer() throws Exception { public void testDoesContainerExists() throws Exception { long containerId = 1L; long nextContainerId = 2L; - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); reconContainerMetadataManager .batchStoreContainerKeyCounts(rdbBatchOperation, containerId, 2L); reconContainerMetadataManager @@ -254,7 +254,7 @@ public void testDoesContainerExists() throws Exception { public void testGetCountForContainerKeyPrefix() throws Exception { long containerId = System.currentTimeMillis(); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); reconContainerMetadataManager.batchStoreContainerKeyMapping( rdbBatchOperation, ContainerKeyPrefix.get(containerId, keyPrefix1), 2); reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); @@ -413,7 +413,7 @@ public void testDeleteContainerMapping() throws Exception { } }); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); ContainerKeyPrefix prefixForDelete = ContainerKeyPrefix.get( containerId, keyPrefix2, 0); reconContainerMetadataManager diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index b4e62e9d03c5..339e4160c317 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -114,7 +114,7 @@ private void putThreeNSMetadata() throws IOException { hmap.put(1L, new NSSummary(1, 2, 2 * 3, testBucket, TEST_CHILD_DIR, "dir1", -1)); hmap.put(2L, new NSSummary(3, 4, 4 * 3, testBucket, TEST_CHILD_DIR, "dir2", -1)); hmap.put(3L, new NSSummary(5, 6, 6 * 3, testBucket, TEST_CHILD_DIR, "dir3", -1)); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, (long)entry.getKey(), (NSSummary)entry.getValue()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java index fe461e18857c..833ca449b43d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/AbstractNSSummaryTaskTest.java @@ -159,7 +159,7 @@ public List commonSetUpTestReprocess(Runnable reprocessTask, long... List result = new ArrayList<>(); NSSummary staleNSSummary = new NSSummary(); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + RDBBatchOperation rdbBatchOperation = RDBBatchOperation.newAtomicOperation(); getReconNamespaceSummaryManager().batchStoreNSSummaries(rdbBatchOperation, -1L, staleNSSummary); getReconNamespaceSummaryManager().commitBatchOperation(rdbBatchOperation); From b900f256dabc354d6a6b2498df66bc0169e3eafe Mon Sep 17 00:00:00 2001 From: "Eric C. Ho" Date: Tue, 23 Dec 2025 15:58:10 +0800 Subject: [PATCH 26/36] HDDS-14172. Reduce copying in OMFileRequest.getDirectoryInfo (#9541) --- .../hadoop/ozone/om/helpers/AclListBuilder.java | 13 +++++++++++++ .../hadoop/ozone/om/helpers/OmDirectoryInfo.java | 14 +++++++++++++- .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java | 4 ++++ .../ozone/om/request/file/OMFileRequest.java | 11 +---------- 4 files changed, 31 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/AclListBuilder.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/AclListBuilder.java index ae2ff8857625..5e097ff16639 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/AclListBuilder.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/AclListBuilder.java @@ -43,6 +43,19 @@ public static AclListBuilder of(ImmutableList list) { return new AclListBuilder(list); } + /** + * Overload accepting List instead of ImmutableList for binary compatibility + * across different Guava versions (especially for Hadoop 2.x compatibility). + * This method can be removed when Hadoop 2.x support is dropped and all + * callers are guaranteed to use the same Guava version. + */ + public static AclListBuilder of(List list) { + if (list instanceof ImmutableList) { + return new AclListBuilder((ImmutableList) list); + } + return copyOf(list); + } + public static AclListBuilder copyOf(List list) { return new AclListBuilder(list == null ? ImmutableList.of() : ImmutableList.copyOf(list)); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 816f34d71672..13a09fd0d946 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -75,6 +75,10 @@ public Builder toBuilder() { return new Builder(this); } + public static Builder builderFromOmKeyInfo(OmKeyInfo keyInfo) { + return new Builder(keyInfo); + } + /** * Builder for Directory Info. */ @@ -100,6 +104,14 @@ private Builder(OmDirectoryInfo obj) { this.acls = AclListBuilder.of(obj.acls); } + private Builder(OmKeyInfo keyInfo) { + super(keyInfo); + this.name = keyInfo.getFileName(); + this.creationTime = keyInfo.getCreationTime(); + this.modificationTime = keyInfo.getModificationTime(); + this.acls = AclListBuilder.of(keyInfo.getAcls()); + } + @Override public Builder setParentObjectID(long parentObjectId) { super.setParentObjectID(parentObjectId); @@ -180,7 +192,7 @@ public long getModificationTime() { return modificationTime; } - public List getAcls() { + public ImmutableList getAcls() { return acls; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 76e0cac3f462..e0e900ccf2df 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -930,6 +930,10 @@ public Builder toBuilder() { return new Builder(this); } + public OmDirectoryInfo.Builder toDirectoryInfoBuilder() { + return OmDirectoryInfo.builderFromOmKeyInfo(this); + } + /** * Return a new copy of the object. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index a02c16188efe..6a23ab21537a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -786,16 +786,7 @@ public static String getAbsolutePath(String prefixName, String fileName) { * @return omDirectoryInfo object */ public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo) { - return OmDirectoryInfo.newBuilder() - .setParentObjectID(keyInfo.getParentObjectID()) - .setAcls(keyInfo.getAcls()) - .addAllMetadata(keyInfo.getMetadata()) - .setCreationTime(keyInfo.getCreationTime()) - .setModificationTime(keyInfo.getModificationTime()) - .setObjectID(keyInfo.getObjectID()) - .setUpdateID(keyInfo.getUpdateID()) - .setName(OzoneFSUtils.getFileName(keyInfo.getKeyName())) - .build(); + return keyInfo.toDirectoryInfoBuilder().build(); } /** From a0a8be9c08025d2946a3b7ab80c0a66a8478a268 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 23 Dec 2025 11:46:29 +0100 Subject: [PATCH 27/36] HDDS-14222. Reduce duplication in TestObjectPut (#9539) --- hadoop-ozone/client/pom.xml | 5 + .../ozone/client/OzoneClientTestUtils.java | 56 ++ hadoop-ozone/integration-test/pom.xml | 6 + .../ozone/client/rpc/OzoneRpcClientTests.java | 15 +- ...estOzoneRpcClientWithKeyLatestVersion.java | 11 +- .../ozone/client/rpc/TestReadRetries.java | 2 +- hadoop-ozone/s3gateway/pom.xml | 6 + .../ozone/s3/endpoint/EndpointTestUtils.java | 90 +++ .../ozone/s3/endpoint/TestObjectPut.java | 537 +++++------------- pom.xml | 6 + 10 files changed, 321 insertions(+), 413 deletions(-) create mode 100644 hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/OzoneClientTestUtils.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 603a87e3fe64..6d817fa31632 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -97,6 +97,11 @@ + + commons-io + commons-io + test + org.apache.ozone hdds-client diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/OzoneClientTestUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/OzoneClientTestUtils.java new file mode 100644 index 000000000000..7fb2345bc15b --- /dev/null +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/OzoneClientTestUtils.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + +import java.io.IOException; +import java.io.InputStream; +import org.apache.commons.io.IOUtils; + +/** Utilities for tests using Ozone client. */ +public final class OzoneClientTestUtils { + + /** Verify contents of a key. + * @return key details for convenience (further checks) */ + public static OzoneKeyDetails assertKeyContent( + OzoneBucket bucket, + String keyName, + String expected + ) throws IOException { + return assertKeyContent(bucket, keyName, expected.getBytes(UTF_8)); + } + + /** Verify contents of a key. + * @return key details for convenience (further checks) */ + public static OzoneKeyDetails assertKeyContent( + OzoneBucket bucket, + String keyName, + byte[] expected + ) throws IOException { + try (InputStream is = bucket.readKey(keyName)) { + assertArrayEquals(expected, IOUtils.readFully(is, expected.length)); + } + return bucket.getKey(keyName); + } + + private OzoneClientTestUtils() { + // no instances + } +} diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index df9da45b3b6d..10e851ba9d43 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -384,6 +384,12 @@ ozone-client test + + org.apache.ozone + ozone-client + test-jar + test + org.apache.ozone ozone-common diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index 554b125ddbec..2832a281ebb2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -38,6 +38,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.client.OzoneClientTestUtils.assertKeyContent; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; @@ -1405,20 +1406,6 @@ private static void rewriteKey( } } - private static OzoneKeyDetails assertKeyContent( - OzoneBucket bucket, String keyName, byte[] expectedContent - ) throws IOException { - OzoneKeyDetails updatedKeyDetails = bucket.getKey(keyName); - - try (OzoneInputStream is = bucket.readKey(keyName)) { - byte[] fileContent = new byte[expectedContent.length]; - IOUtils.readFully(is, fileContent); - assertArrayEquals(expectedContent, fileContent); - } - - return updatedKeyDetails; - } - private OzoneBucket createBucket(BucketLayout layout) throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java index b437aa720269..703b37964f2b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java @@ -19,15 +19,13 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_LATEST_VERSION_LOCATION; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.apache.hadoop.ozone.client.OzoneClientTestUtils.assertKeyContent; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import java.io.IOException; -import java.io.InputStream; import java.util.List; import java.util.UUID; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -107,13 +105,6 @@ private static void writeKey(OzoneBucket bucket, String key, byte[] content, TestDataUtil.createKey(bucket, key, replication, content); } - public static void assertKeyContent(OzoneBucket bucket, String key, - byte[] expected) throws Exception { - try (InputStream in = bucket.readKey(key)) { - assertArrayEquals(expected, IOUtils.readFully(in, expected.length)); - } - } - private void assertListStatus(OzoneBucket bucket, String keyName, int expectedVersionCount) throws Exception { List files = bucket.listStatus(keyName, false, "", 1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index af28265e8230..70989f07e5fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client.rpc; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.ozone.client.rpc.TestOzoneRpcClientWithKeyLatestVersion.assertKeyContent; +import static org.apache.hadoop.ozone.client.OzoneClientTestUtils.assertKeyContent; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.configureFSOptimizedPaths; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index af45d3117983..aa510ae6a0f2 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -227,6 +227,12 @@ weld-servlet-shaded runtime + + org.apache.ozone + ozone-client + test-jar + test + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java new file mode 100644 index 000000000000..c6eff8066cd5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointTestUtils.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.http.HttpStatus; +import org.apache.ratis.util.function.CheckedSupplier; + +/** Utilities for unit-testing S3 endpoints. */ +public final class EndpointTestUtils { + + /** Put without content. */ + public static Response putDir( + ObjectEndpoint subject, + String bucket, + String key + ) throws IOException, OS3Exception { + return put(subject, bucket, key, 0, null, null); + } + + /** Put with content. */ + public static Response put( + ObjectEndpoint subject, + String bucket, + String key, + String content + ) throws IOException, OS3Exception { + return put(subject, bucket, key, 0, null, content); + } + + /** Put with content, part number, upload ID. */ + public static Response put( + ObjectEndpoint subject, + String bucket, + String key, + int partNumber, + String uploadID, + String content + ) throws IOException, OS3Exception { + if (content == null) { + return subject.put(bucket, key, 0, partNumber, uploadID, null, null, null); + } else { + final long length = content.length(); + try (ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8))) { + return subject.put(bucket, key, length, partNumber, uploadID, null, null, body); + } + } + } + + /** Verify response is success for {@code request}. */ + public static void assertSucceeds(CheckedSupplier request) throws E { + try (Response response = request.get()) { + assertEquals(HttpStatus.SC_OK, response.getStatus()); + } + } + + /** Verify error response for {@code request} matching {@code expected} {@link OS3Exception}. */ + public static OS3Exception assertErrorResponse(OS3Exception expected, CheckedSupplier request) { + OS3Exception actual = assertThrows(OS3Exception.class, () -> request.get().close()); + assertEquals(expected.getCode(), actual.getCode()); + assertEquals(expected.getHttpCode(), actual.getHttpCode()); + return actual; + } + + private EndpointTestUtils() { + // no instances + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index e5c34fb4e465..a561343a5182 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -17,14 +17,21 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.client.OzoneClientTestUtils.assertKeyContent; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.put; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.putDir; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_TAG; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.STREAMING_AWS4_HMAC_SHA256_PAYLOAD; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; @@ -36,9 +43,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.spy; @@ -46,7 +52,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.ByteArrayInputStream; +import com.google.common.collect.ImmutableMap; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -63,28 +69,23 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.MockedStatic; -import org.mockito.Mockito; /** * Test put object. @@ -96,12 +97,12 @@ class TestObjectPut { private static final String KEY_NAME = "key=value/1"; private static final String DEST_BUCKET_NAME = "b2"; private static final String DEST_KEY = "key=value/2"; - private static final String NO_SUCH_BUCKET = "nonexist"; + private static final String NONEXISTENT_BUCKET = "nonexist"; - private OzoneClient clientStub; private ObjectEndpoint objectEndpoint; private HttpHeaders headers; private OzoneBucket bucket; + private OzoneBucket destBucket; private OzoneBucket fsoBucket; static Stream argumentsForPutObject() { @@ -117,29 +118,17 @@ static Stream argumentsForPutObject() { @BeforeEach void setup() throws IOException { - OzoneConfiguration config = new OzoneConfiguration(); + headers = newMockHttpHeaders(); + objectEndpoint = spy(EndpointBuilder.newObjectEndpointBuilder().setHeaders(headers).build()); - //Create client stub and object store stub. - clientStub = new OzoneClientStub(); - - // Create bucket + // Create buckets + OzoneClient clientStub = objectEndpoint.getClient(); clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); bucket = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME); clientStub.getObjectStore().createS3Bucket(DEST_BUCKET_NAME); + destBucket = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME); - headers = mock(HttpHeaders.class); - when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); - - // Create PutObject and setClient to OzoneClientStub - objectEndpoint = EndpointBuilder.newObjectEndpointBuilder() - .setClient(clientStub) - .setConfig(config) - .setHeaders(headers) - .build(); - - objectEndpoint = spy(objectEndpoint); - - String volumeName = config.get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, + String volumeName = objectEndpoint.getOzoneConfiguration().get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT); OzoneVolume volume = clientStub.getObjectStore().getVolume(volumeName); BucketArgs fsoBucketArgs = BucketArgs.newBuilder() @@ -151,25 +140,17 @@ void setup() throws IOException { @ParameterizedTest @MethodSource("argumentsForPutObject") - void testPutObject(int length, ReplicationConfig replication) throws IOException, OS3Exception { + void testPutObject(int length, ReplicationConfig replication) throws Exception { //GIVEN final String content = RandomStringUtils.secure().nextAlphanumeric(length); - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, null, body); + assertSucceeds(() -> putObject(content)); //THEN - assertEquals(200, response.getStatus()); - - String keyContent; - try (InputStream input = bucket.readKey(KEY_NAME)) { - keyContent = IOUtils.toString(input, UTF_8); - } - assertEquals(content, keyContent); - - OzoneKeyDetails keyDetails = bucket.getKey(KEY_NAME); + OzoneKeyDetails keyDetails = assertKeyContent(bucket, KEY_NAME, content); + assertEquals(content.length(), keyDetails.getDataSize()); assertEquals(replication, keyDetails.getReplicationConfig()); assertNotNull(keyDetails.getMetadata()); assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); @@ -177,145 +158,54 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException } @Test - void testPutObjectContentLength() throws IOException, OS3Exception { - // The contentLength specified when creating the Key should be the same as - // the Content-Length, the key Commit will compare the Content-Length with - // the actual length of the data written. - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - long dataSize = CONTENT.length(); - - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, null, body); - assertEquals(dataSize, getKeyDataSize()); - } - - @Test - void testPutObjectContentLengthForStreaming() - throws IOException, OS3Exception { - String chunkedContent = "0a;chunk-signature=signature\r\n" - + "1234567890\r\n" - + "05;chunk-signature=signature\r\n" - + "abcde\r\n"; - - when(headers.getHeaderString("x-amz-content-sha256")) - .thenReturn("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"); - - when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) - .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, - null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); - assertEquals(15, getKeyDataSize()); - } - - @Test - public void testPutObjectWithTags() throws IOException, OS3Exception { - HttpHeaders headersWithTags = Mockito.mock(HttpHeaders.class); - when(headersWithTags.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); - when(headersWithTags.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + public void testPutObjectWithTags() throws Exception { + when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headersWithTags); + assertSucceeds(() -> putObject(CONTENT)); - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); - - assertEquals(200, response.getStatus()); - - OzoneKeyDetails keyDetails = - clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); - Map tags = keyDetails.getTags(); + Map tags = bucket.getKey(KEY_NAME).getTags(); assertEquals(2, tags.size()); assertEquals("value1", tags.get("tag1")); assertEquals("value2", tags.get("tag2")); } @Test - public void testPutObjectWithOnlyTagKey() throws Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - HttpHeaders headerWithOnlyTagKey = Mockito.mock(HttpHeaders.class); - when(headerWithOnlyTagKey.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); + public void testPutObjectWithOnlyTagKey() { // Try to send with only the key (no value) - when(headerWithOnlyTagKey.getHeaderString(TAG_HEADER)).thenReturn("tag1"); - objectEndpoint.setHeaders(headerWithOnlyTagKey); - - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); - fail("request with invalid query param should fail"); - } catch (OS3Exception ex) { - assertEquals(INVALID_TAG.getCode(), ex.getCode()); - assertThat(ex.getErrorMessage()).contains("Some tag values are not specified"); - assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); - } + when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1"); + + OS3Exception ex = assertErrorResponse(INVALID_TAG, () -> putObject(CONTENT)); + assertThat(ex.getErrorMessage()).contains("Some tag values are not specified"); } @Test - public void testPutObjectWithDuplicateTagKey() throws Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - HttpHeaders headersWithDuplicateTagKey = Mockito.mock(HttpHeaders.class); - when(headersWithDuplicateTagKey.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); - when(headersWithDuplicateTagKey.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag1=value2"); - objectEndpoint.setHeaders(headersWithDuplicateTagKey); - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); - fail("request with duplicate tag key should fail"); - } catch (OS3Exception ex) { - assertEquals(INVALID_TAG.getCode(), ex.getCode()); - assertThat(ex.getErrorMessage()).contains("There are tags with duplicate tag keys"); - assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); - } + public void testPutObjectWithDuplicateTagKey() { + when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag1=value2"); + + OS3Exception ex = assertErrorResponse(INVALID_TAG, () -> putObject(CONTENT)); + assertThat(ex.getErrorMessage()).contains("There are tags with duplicate tag keys"); } @Test - public void testPutObjectWithLongTagKey() throws Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - HttpHeaders headersWithLongTagKey = Mockito.mock(HttpHeaders.class); - when(headersWithLongTagKey.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); + public void testPutObjectWithLongTagKey() { String longTagKey = StringUtils.repeat('k', TAG_KEY_LENGTH_LIMIT + 1); - when(headersWithLongTagKey.getHeaderString(TAG_HEADER)).thenReturn(longTagKey + "=value1"); - objectEndpoint.setHeaders(headersWithLongTagKey); - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); - fail("request with tag key exceeding the length limit should fail"); - } catch (OS3Exception ex) { - assertEquals(INVALID_TAG.getCode(), ex.getCode()); - assertThat(ex.getErrorMessage()).contains("The tag key exceeds the maximum length"); - assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); - } + when(headers.getHeaderString(TAG_HEADER)).thenReturn(longTagKey + "=value1"); + + OS3Exception ex = assertErrorResponse(INVALID_TAG, () -> putObject(CONTENT)); + assertThat(ex.getErrorMessage()).contains("The tag key exceeds the maximum length"); } @Test - public void testPutObjectWithLongTagValue() throws Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - HttpHeaders headersWithLongTagValue = Mockito.mock(HttpHeaders.class); - when(headersWithLongTagValue.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); - objectEndpoint.setHeaders(headersWithLongTagValue); + public void testPutObjectWithLongTagValue() { String longTagValue = StringUtils.repeat('v', TAG_VALUE_LENGTH_LIMIT + 1); - when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); - fail("request with tag value exceeding the length limit should fail"); - } catch (OS3Exception ex) { - assertEquals(INVALID_TAG.getCode(), ex.getCode()); - assertThat(ex.getErrorMessage()).contains("The tag value exceeds the maximum length"); - assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); - } + when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); + + OS3Exception ex = assertErrorResponse(INVALID_TAG, () -> putObject(CONTENT)); + assertThat(ex.getErrorMessage()).contains("The tag value exceeds the maximum length"); } @Test - public void testPutObjectWithTooManyTags() throws Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - HttpHeaders headersWithTooManyTags = Mockito.mock(HttpHeaders.class); - when(headersWithTooManyTags.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); + public void testPutObjectWithTooManyTags() { StringBuilder sb = new StringBuilder(); for (int i = 0; i < TAG_NUM_LIMIT + 1; i++) { sb.append(String.format("tag%d=value%d", i, i)); @@ -323,57 +213,37 @@ public void testPutObjectWithTooManyTags() throws Exception { sb.append('&'); } } - when(headersWithTooManyTags.getHeaderString(TAG_HEADER)).thenReturn(sb.toString()); - objectEndpoint.setHeaders(headersWithTooManyTags); - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); - fail("request with number of tags exceeding limit should fail"); - } catch (OS3Exception ex) { - assertEquals(INVALID_TAG.getCode(), ex.getCode()); - assertThat(ex.getErrorMessage()).contains("exceeded the maximum number of tags"); - assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); - } - } + when(headers.getHeaderString(TAG_HEADER)).thenReturn(sb.toString()); - private long getKeyDataSize() throws IOException { - return clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) - .getKey(KEY_NAME).getDataSize(); + OS3Exception ex = assertErrorResponse(INVALID_TAG, () -> putObject(CONTENT)); + assertThat(ex.getErrorMessage()).contains("exceeded the maximum number of tags"); } @Test - void testPutObjectWithSignedChunks() throws IOException, OS3Exception { + void testPutObjectWithSignedChunks() throws Exception { //GIVEN String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" + "abcde\r\n"; - when(headers.getHeaderString("x-amz-content-sha256")) - .thenReturn("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"); + when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) + .thenReturn(STREAMING_AWS4_HMAC_SHA256_PAYLOAD); when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, null, null, null, - new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + assertSucceeds(() -> putObject(chunkedContent)); //THEN - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) - .readKey(KEY_NAME); - String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); - - assertEquals(200, response.getStatus()); - assertEquals("1234567890abcde", keyContent); + OzoneKeyDetails keyDetails = assertKeyContent(bucket, KEY_NAME, "1234567890abcde"); assertNotNull(keyDetails.getMetadata()); assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + assertEquals(15, keyDetails.getDataSize()); } @Test - public void testPutObjectMessageDigestResetDuringException() throws OS3Exception { + public void testPutObjectMessageDigestResetDuringException() { MessageDigest messageDigest = mock(MessageDigest.class); try (MockedStatic mocked = mockStatic(IOUtils.class)) { // For example, EOFException during put-object due to client cancelling the operation before it completes @@ -382,52 +252,35 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception .thenThrow(IOException.class); when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, null, null, body); - fail("Should throw IOException"); - } catch (IOException ignored) { - // Verify that the message digest is reset so that the instance can be reused for the - // next request in the same thread - verify(messageDigest, times(1)).reset(); - } + assertThrows(IOException.class, () -> putObject(CONTENT).close()); + + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); } } @Test - void testCopyObject() throws IOException, OS3Exception { + void testCopyObject() throws Exception { // Put object in to source bucket - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Add some custom metadata + Map customMetadata = ImmutableMap.of( + "custom-key-1", "custom-value-1", + "custom-key-2", "custom-value-2"); MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); - metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1", "custom-value-1"); - metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2", "custom-value-2"); + customMetadata.forEach((k, v) -> metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + k, v)); when(headers.getRequestHeaders()).thenReturn(metadataHeaders); // Add COPY metadata directive (default) when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, null, null, body); - - OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(BUCKET_NAME) - .readKey(KEY_NAME); + assertSucceeds(() -> putObject(CONTENT)); - String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); - - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + OzoneKeyDetails keyDetails = assertKeyContent(bucket, KEY_NAME, CONTENT); assertNotNull(keyDetails.getMetadata()); - assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); - assertThat(keyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); - assertThat(keyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); - String sourceETag = keyDetails.getMetadata().get(OzoneConsts.ETAG); + assertThat(sourceETag).isNotEmpty(); + assertThat(keyDetails.getMetadata()).containsAllEntriesOf(customMetadata); // This will be ignored since the copy directive is COPY metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-3", "custom-value-3"); @@ -436,126 +289,79 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + assertSucceeds(() -> put(objectEndpoint, DEST_BUCKET_NAME, DEST_KEY, CONTENT)); // Check destination key and response - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) - .readKey(DEST_KEY); - - keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() - .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); - OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); - - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); - assertNotNull(keyDetails.getMetadata()); - assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + OzoneKeyDetails destKeyDetails = assertKeyContent(destBucket, DEST_KEY, CONTENT); + keyDetails = bucket.getKey(KEY_NAME); // Source key eTag should remain unchanged and the dest key should have // the same Etag since the key content is the same - assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, keyDetails.getMetadata().get(OzoneConsts.ETAG)); assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); - assertThat(destKeyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); - assertThat(destKeyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); - assertThat(destKeyDetails.getMetadata().containsKey("custom-key-3")).isFalse(); + assertThat(destKeyDetails.getMetadata()) + .containsAllEntriesOf(customMetadata) + .doesNotContainKey("custom-key-3"); // Now use REPLACE metadata directive (default) and remove some custom metadata used in the source key when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("REPLACE"); metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1"); metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); - response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); - - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) - .readKey(DEST_KEY); + assertSucceeds(() -> put(objectEndpoint, DEST_BUCKET_NAME, DEST_KEY, CONTENT)); - keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - sourceKeyDetails = clientStub.getObjectStore() - .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); - destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); - - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + destKeyDetails = assertKeyContent(destBucket, DEST_KEY, CONTENT); assertNotNull(keyDetails.getMetadata()); - assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); // Source key eTag should remain unchanged and the dest key should have // the same Etag since the key content is the same - assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, keyDetails.getMetadata().get(OzoneConsts.ETAG)); assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); - assertThat(destKeyDetails.getMetadata().containsKey("custom-key-1")).isFalse(); - assertThat(destKeyDetails.getMetadata().containsKey("custom-key-2")).isFalse(); - assertThat(destKeyDetails.getMetadata().get("custom-key-3")).isEqualTo("custom-value-3"); + assertThat(destKeyDetails.getMetadata()) + .doesNotContainKeys("custom-key-1", "custom-key-2") + .containsEntry("custom-key-3", "custom-value-3"); // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); - OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, null, body), - "test copy object failed"); - assertThat(e.getHttpCode()).isEqualTo(400); - assertThat(e.getCode()).isEqualTo("InvalidArgument"); + OS3Exception e = assertErrorResponse(INVALID_ARGUMENT, + () -> put(objectEndpoint, DEST_BUCKET_NAME, DEST_KEY, CONTENT)); assertThat(e.getErrorMessage()).contains("The metadata copy directive specified is invalid"); when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); // source and dest same - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body), - "test copy object failed"); + e = assertErrorResponse(INVALID_REQUEST, () -> putObject(CONTENT)); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); // source bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); - assertThat(e.getCode()).contains("NoSuchBucket"); + NONEXISTENT_BUCKET + "/" + urlEncode(KEY_NAME)); + assertErrorResponse(NO_SUCH_BUCKET, + () -> put(objectEndpoint, DEST_BUCKET_NAME, DEST_KEY, CONTENT)); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); - assertThat(e.getCode()).contains("NoSuchBucket"); + assertErrorResponse(NO_SUCH_BUCKET, + () -> put(objectEndpoint, NONEXISTENT_BUCKET, DEST_KEY, CONTENT)); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); - assertThat(e.getCode()).contains("NoSuchBucket"); + NONEXISTENT_BUCKET + "/" + urlEncode(KEY_NAME)); + assertErrorResponse(NO_SUCH_BUCKET, + () -> put(objectEndpoint, NONEXISTENT_BUCKET, DEST_KEY, CONTENT)); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, null, body), - "test copy object failed"); - assertThat(e.getCode()).contains("NoSuchBucket"); + BUCKET_NAME + "/" + urlEncode(NONEXISTENT_BUCKET)); + assertErrorResponse(NO_SUCH_BUCKET, + () -> put(objectEndpoint, "nonexistent", KEY_NAME, CONTENT)); } @Test - public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception { - // Put object in to source bucket - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, null, null, body); - - OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(BUCKET_NAME) - .readKey(KEY_NAME); - - String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + public void testCopyObjectMessageDigestResetDuringException() throws Exception { + assertSucceeds(() -> putObject(CONTENT)); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + OzoneKeyDetails keyDetails = assertKeyContent(bucket, KEY_NAME, CONTENT); assertNotNull(keyDetails.getMetadata()); assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); @@ -571,55 +377,40 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - try { - objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); - fail("Should throw IOException"); - } catch (IOException ignored) { - // Verify that the message digest is reset so that the instance can be reused for the - // next request in the same thread - verify(messageDigest, times(1)).reset(); - } + assertThrows(IOException.class, () -> putObject(DEST_BUCKET_NAME, DEST_KEY).close()); + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); } } @Test - public void testCopyObjectWithTags() throws IOException, OS3Exception { + public void testCopyObjectWithTags() throws Exception { // Put object in to source bucket - HttpHeaders headersForPut = Mockito.mock(HttpHeaders.class); - when(headersForPut.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); + HttpHeaders headersForPut = newMockHttpHeaders(); when(headersForPut.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); objectEndpoint.setHeaders(headersForPut); String sourceKeyName = "sourceKey"; - Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, null, null, null, body); - OzoneKeyDetails keyDetails = - clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); + assertSucceeds(() -> putObject(BUCKET_NAME, sourceKeyName)); - assertEquals(200, putResponse.getStatus()); - Map tags = keyDetails.getTags(); + Map tags = bucket.getKey(sourceKeyName).getTags(); assertEquals(2, tags.size()); assertEquals("value1", tags.get("tag1")); assertEquals("value2", tags.get("tag2")); // Copy object without x-amz-tagging-directive (default to COPY) String destKey = "key=value/2"; - HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); - when(headersForCopy.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); + HttpHeaders headersForCopy = newMockHttpHeaders(); when(headersForCopy.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(sourceKeyName)); - objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); - OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); + assertSucceeds(() -> putObject(DEST_BUCKET_NAME, destKey)); + + OzoneKeyDetails destKeyDetails = destBucket.getKey(destKey); - assertEquals(200, copyResponse.getStatus()); Map destKeyTags = destKeyDetails.getTags(); // Since the default directive is COPY, it will copy the source key's tags @@ -633,11 +424,9 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); - assertEquals(200, copyResponse.getStatus()); + assertSucceeds(() -> putObject(DEST_BUCKET_NAME, destKey)); - destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); + destKeyDetails = destBucket.getKey(destKey); destKeyTags = destKeyDetails.getTags(); // Since the x-amz-tagging-directive is COPY, we ignore the x-amz-tagging @@ -648,11 +437,9 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); - assertEquals(200, copyResponse.getStatus()); + assertSucceeds(() -> putObject(DEST_BUCKET_NAME, destKey)); - destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); + destKeyDetails = destBucket.getKey(destKey); destKeyTags = destKeyDetails.getTags(); // Since the x-amz-tagging-directive is REPLACE, we replace the source key @@ -660,66 +447,41 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { assertEquals(1, destKeyTags.size()); assertEquals("value3", destKeyTags.get("tag3")); assertThat(destKeyTags).doesNotContainKeys("tag1", "tag2"); - } - @Test - public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Copy object with invalid x-amz-tagging-directive - HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); - try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, null, body); - } catch (OS3Exception ex) { - assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); - assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); - assertEquals(INVALID_ARGUMENT.getHttpCode(), ex.getHttpCode()); - } + OS3Exception e = assertErrorResponse(INVALID_ARGUMENT, + () -> put(objectEndpoint, DEST_BUCKET_NAME, "somekey", CONTENT)); + assertThat(e.getErrorMessage()).contains("The tagging copy directive specified is invalid"); } @Test void testInvalidStorageType() { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); - OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); - assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getErrorMessage(), - e.getErrorMessage()); + OS3Exception e = assertErrorResponse(S3ErrorTable.INVALID_STORAGE_CLASS, () -> putObject(CONTENT)); assertEquals("random", e.getResource()); } @Test - void testEmptyStorageType() throws IOException, OS3Exception { - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + void testEmptyStorageType() throws Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, null, null, body); - OzoneKeyDetails key = - clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) - .getKey(KEY_NAME); + assertSucceeds(() -> putObject(CONTENT)); //default type is set assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), - key.getReplicationConfig()); + bucket.getKey(KEY_NAME).getReplicationConfig()); } @Test - void testDirectoryCreation() throws IOException, - OS3Exception { + void testDirectoryCreation() throws Exception { // GIVEN final String path = "dir/"; // WHEN - try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, "", null, null, null)) { - assertEquals(HttpStatus.SC_OK, response.getStatus()); - } + assertSucceeds(() -> putDir(objectEndpoint, fsoBucket.getName(), path)); // THEN OzoneKeyDetails key = fsoBucket.getKey(path); @@ -727,35 +489,34 @@ void testDirectoryCreation() throws IOException, } @Test - void testDirectoryCreationOverFile() throws IOException, OS3Exception { + void testDirectoryCreationOverFile() throws Exception { // GIVEN final String path = "key"; - final ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, null, body); - - // WHEN - final OS3Exception exception = assertThrows(OS3Exception.class, - () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null, null) - .close()); + assertSucceeds(() -> putObject(FSO_BUCKET_NAME, path)); - // THEN - assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); - assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); + assertErrorResponse(S3ErrorTable.NO_OVERWRITE, + () -> putDir(objectEndpoint, FSO_BUCKET_NAME, path + "/")); } @Test - public void testPutEmptyObject() throws IOException, OS3Exception { - HttpHeaders headersWithTags = Mockito.mock(HttpHeaders.class); - when(headersWithTags.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); - String emptyString = ""; - ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); - objectEndpoint.setHeaders(headersWithTags); - - Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, null, body); - assertEquals(200, putResponse.getStatus()); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); - assertEquals(0, keyDetails.getDataSize()); + public void testPutEmptyObject() throws Exception { + assertSucceeds(() -> putObject("")); + assertEquals(0, bucket.getKey(KEY_NAME).getDataSize()); + } + + private HttpHeaders newMockHttpHeaders() { + HttpHeaders httpHeaders = mock(HttpHeaders.class); + when(httpHeaders.getHeaderString(X_AMZ_CONTENT_SHA256)).thenReturn("mockSignature"); + return httpHeaders; + } + + /** Put object at {@code bucketName}/{@code keyName} with pre-defined {@link #CONTENT}. */ + private Response putObject(String bucketName, String keyName) throws IOException, OS3Exception { + return put(objectEndpoint, bucketName, keyName, CONTENT); + } + + /** Put object at {@link #BUCKET_NAME}/{@link #KEY_NAME} with the specified content. */ + private Response putObject(String content) throws IOException, OS3Exception { + return put(objectEndpoint, BUCKET_NAME, KEY_NAME, content); } } diff --git a/pom.xml b/pom.xml index 9fcee28d5af3..73ed23ae01ff 100644 --- a/pom.xml +++ b/pom.xml @@ -1129,6 +1129,12 @@ ozone-client ${ozone.version} + + org.apache.ozone + ozone-client + ${ozone.version} + test-jar + org.apache.ozone ozone-common From a0cd761d004e5762bb00dd4b03be9143f735e6f8 Mon Sep 17 00:00:00 2001 From: "Eric C. Ho" Date: Wed, 24 Dec 2025 01:35:43 +0800 Subject: [PATCH 28/36] HDDS-14231. Reduce copying in OMFileRequest.getOmKeyInfo (#9547) --- .../ozone/om/helpers/OmDirectoryInfo.java | 4 ++++ .../hadoop/ozone/om/helpers/OmKeyInfo.java | 20 +++++++++++++++++++ .../ozone/om/request/file/OMFileRequest.java | 20 ++----------------- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 13a09fd0d946..3657d5ee68b6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -79,6 +79,10 @@ public static Builder builderFromOmKeyInfo(OmKeyInfo keyInfo) { return new Builder(keyInfo); } + public OmKeyInfo.Builder toKeyInfoBuilder() { + return OmKeyInfo.builderFromOmDirectoryInfo(this); + } + /** * Builder for Directory Info. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index e0e900ccf2df..b0e26c49d695 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -32,7 +32,9 @@ import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; @@ -520,6 +522,20 @@ public Builder(OmKeyInfo obj) { keyLocationVersion.isMultipartKey()))); } + private Builder(OmDirectoryInfo dirInfo) { + super(dirInfo); + this.acls = AclListBuilder.of(dirInfo.getAcls()); + this.keyName = dirInfo.getName(); + this.creationTime = dirInfo.getCreationTime(); + this.modificationTime = dirInfo.getModificationTime(); + this.ownerName = dirInfo.getOwner(); + this.tags = MapBuilder.empty(); + this.replicationConfig = RatisReplicationConfig + .getInstance(ReplicationFactor.ONE); + this.omKeyLocationInfoGroups.add( + new OmKeyLocationInfoGroup(0, new ArrayList<>())); + } + public Builder setVolumeName(String volume) { this.volumeName = volume; return this; @@ -930,6 +946,10 @@ public Builder toBuilder() { return new Builder(this); } + public static Builder builderFromOmDirectoryInfo(OmDirectoryInfo dirInfo) { + return new Builder(dirInfo); + } + public OmDirectoryInfo.Builder toDirectoryInfoBuilder() { return OmDirectoryInfo.builderFromOmKeyInfo(this); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 6a23ab21537a..8403828f395a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -30,16 +30,13 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -54,7 +51,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.slf4j.Logger; @@ -744,22 +740,10 @@ public static OmKeyInfo getKeyInfoWithFullPath(OmKeyInfo parentInfo, OmKeyInfo o public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, OmDirectoryInfo dirInfo, String keyName) { - return new OmKeyInfo.Builder() - .setParentObjectID(dirInfo.getParentObjectID()) - .setKeyName(keyName) - .setAcls(dirInfo.getAcls()) - .addAllMetadata(dirInfo.getMetadata()) + return dirInfo.toKeyInfoBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) - .setCreationTime(dirInfo.getCreationTime()) - .setModificationTime(dirInfo.getModificationTime()) - .setObjectID(dirInfo.getObjectID()) - .setUpdateID(dirInfo.getUpdateID()) - .setReplicationConfig(RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setOwnerName(dirInfo.getOwner()) + .setKeyName(keyName) .build(); } From fcbf2b57cfefb4dd169ad6032ed9911bbb0e1891 Mon Sep 17 00:00:00 2001 From: echonesis Date: Mon, 15 Dec 2025 01:04:33 +0800 Subject: [PATCH 29/36] HDDS-14123. Refactor BucketEndpoint#Put method --- .../ozone/s3/endpoint/BucketEndpoint.java | 182 +++++++++++++++++- 1 file changed, 179 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 4e7aff26e9c1..b7fb4beaf4c3 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -94,6 +95,12 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; + private static final BucketOperationHandlerFactory HANDLER_FACTORY = + new BucketOperationHandlerFactory(); + + @Context + private HttpHeaders headers; + private BucketEndpointContext context; public BucketEndpoint() { @@ -101,11 +108,10 @@ public BucketEndpoint() { this.context = new BucketEndpointContext(this); } - private BucketEndpointContext getBucketContext() { + private BucketEndpointContext getContext() { return context; } - - + /** * Rest endpoint to list objects in a specific bucket. *

@@ -595,6 +601,176 @@ public S3BucketAcl getAcl(String bucketName) } } +<<<<<<< HEAD +======= + /** + * Implement acl put. + *

+ * see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html + */ + public Response putAcl(String bucketName, + InputStream body) throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ); + String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE); + String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP); + String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL); + + try { + OzoneBucket bucket = getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); + OzoneVolume volume = getVolume(); + + List ozoneAclListOnBucket = new ArrayList<>(); + List ozoneAclListOnVolume = new ArrayList<>(); + + if (grantReads == null && grantWrites == null && grantReadACP == null + && grantWriteACP == null && grantFull == null) { + S3BucketAcl putBucketAclRequest = + new PutBucketAclRequestUnmarshaller().readFrom(body); + // Handle grants in body + ozoneAclListOnBucket.addAll( + S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); + ozoneAclListOnVolume.addAll( + S3Acl.s3AclToOzoneNativeAclOnVolume(putBucketAclRequest)); + } else { + + // Handle grants in headers + if (grantReads != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReads, + S3Acl.ACLType.READ.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReads, + S3Acl.ACLType.READ.getValue())); + } + if (grantWrites != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWrites, + S3Acl.ACLType.WRITE.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWrites, + S3Acl.ACLType.WRITE.getValue())); + } + if (grantReadACP != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantReadACP, + S3Acl.ACLType.READ_ACP.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantReadACP, + S3Acl.ACLType.READ_ACP.getValue())); + } + if (grantWriteACP != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantWriteACP, + S3Acl.ACLType.WRITE_ACP.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantWriteACP, + S3Acl.ACLType.WRITE_ACP.getValue())); + } + if (grantFull != null) { + ozoneAclListOnBucket.addAll(getAndConvertAclOnBucket(grantFull, + S3Acl.ACLType.FULL_CONTROL.getValue())); + ozoneAclListOnVolume.addAll(getAndConvertAclOnVolume(grantFull, + S3Acl.ACLType.FULL_CONTROL.getValue())); + } + } + // A put request will reset all previous ACLs on bucket + bucket.setAcl(ozoneAclListOnBucket); + // A put request will reset input user/group's permission on volume + List acls = bucket.getAcls(); + List aclsToRemoveOnVolume = new ArrayList<>(); + List currentAclsOnVolume = volume.getAcls(); + // Remove input user/group's permission from Volume first + if (!currentAclsOnVolume.isEmpty()) { + for (OzoneAcl acl : acls) { + if (acl.getAclScope() == ACCESS) { + aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList( + acl.getName(), acl.getType(), currentAclsOnVolume)); + } + } + for (OzoneAcl acl : aclsToRemoveOnVolume) { + volume.removeAcl(acl); + } + } + // Add new permission on Volume + for (OzoneAcl acl : ozoneAclListOnVolume) { + volume.addAcl(acl); + } + } catch (OMException exception) { + getMetrics().updatePutAclFailureStats(startNanos); + auditWriteFailure(S3GAction.PUT_ACL, exception); + if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); + } else if (isAccessDenied(exception)) { + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); + } + throw exception; + } catch (OS3Exception ex) { + getMetrics().updatePutAclFailureStats(startNanos); + throw ex; + } + getMetrics().updatePutAclSuccessStats(startNanos); + return Response.status(HttpStatus.SC_OK).build(); + } + + /** + * Example: x-amz-grant-write: \ + * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", \ + * id="555566667777". + */ + private List getAndConvertAclOnBucket(String value, + String permission) + throws OS3Exception { + List ozoneAclList = new ArrayList<>(); + if (StringUtils.isEmpty(value)) { + return ozoneAclList; + } + String[] subValues = value.split(","); + for (String acl : subValues) { + String[] part = acl.split("="); + if (part.length != 2) { + throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); + } + S3Acl.ACLIdentityType type = + S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); + if (type == null || !type.isSupported()) { + LOG.warn("S3 grantee {} is null or not supported", part[0]); + throw newError(NOT_IMPLEMENTED, part[0]); + } + // Build ACL on Bucket + EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission); + OzoneAcl defaultOzoneAcl = OzoneAcl.of( + IAccessAuthorizer.ACLIdentityType.USER, part[1], OzoneAcl.AclScope.DEFAULT, aclsOnBucket + ); + OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnBucket); + ozoneAclList.add(defaultOzoneAcl); + ozoneAclList.add(accessOzoneAcl); + } + return ozoneAclList; + } + + private List getAndConvertAclOnVolume(String value, + String permission) + throws OS3Exception { + List ozoneAclList = new ArrayList<>(); + if (StringUtils.isEmpty(value)) { + return ozoneAclList; + } + String[] subValues = value.split(","); + for (String acl : subValues) { + String[] part = acl.split("="); + if (part.length != 2) { + throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); + } + S3Acl.ACLIdentityType type = + S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); + if (type == null || !type.isSupported()) { + LOG.warn("S3 grantee {} is null or not supported", part[0]); + throw newError(NOT_IMPLEMENTED, part[0]); + } + // Build ACL on Volume + EnumSet aclsOnVolume = + S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); + OzoneAcl accessOzoneAcl = OzoneAcl.of(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnVolume); + ozoneAclList.add(accessOzoneAcl); + } + return ozoneAclList; + } +>>>>>>> 210aa3e9a3 (HDDS-14123. Refactor BucketEndpoint#Put method) private void addKey(ListObjectResponse response, OzoneKey next) { KeyMetadata keyMetadata = new KeyMetadata(); From 1ce6982af128999faaf1ef53af998e09e2f2a4a3 Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 14:21:07 +0800 Subject: [PATCH 30/36] fix: update CR --- .../hadoop/ozone/s3/endpoint/AclHandler.java | 76 ++++--- .../ozone/s3/endpoint/BucketEndpoint.java | 65 ++---- .../s3/endpoint/BucketEndpointContext.java | 108 --------- .../s3/endpoint/BucketOperationHandler.java | 32 +-- .../BucketOperationHandlerFactory.java | 124 ---------- .../ozone/s3/endpoint/EndpointBuilder.java | 4 + .../ozone/s3/endpoint/TestAclHandler.java | 131 ++++++----- .../endpoint/TestBucketEndpointContext.java | 211 ------------------ .../TestBucketOperationHandlerFactory.java | 175 --------------- 9 files changed, 149 insertions(+), 777 deletions(-) delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java index 9d1e05eaa6b9..499d355e831f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; -import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -38,47 +37,59 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; +import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Handler for bucket ACL operations (?acl query parameter). * Implements PUT operations for bucket Access Control Lists. + * + * This handler extends EndpointBase to inherit all required functionality + * (configuration, headers, request context, audit logging, metrics, etc.). */ -public class AclHandler implements BucketOperationHandler { - +public class AclHandler extends EndpointBase implements BucketOperationHandler { + private static final Logger LOG = LoggerFactory.getLogger(AclHandler.class); - - @Override - public String getQueryParamName() { - return "acl"; + + /** + * Determine if this handler should handle the current request. + * @return true if the request has the "acl" query parameter + */ + private boolean shouldHandle() { + return queryParams().get(QueryParams.ACL) != null; } - + /** * Implement acl put. *

* see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html */ @Override - public Response handlePutRequest( - String bucketName, - InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos) throws IOException, OS3Exception { + public Response handlePutRequest(String bucketName, InputStream body) + throws IOException, OS3Exception { - String grantReads = headers.getHeaderString(S3Acl.GRANT_READ); - String grantWrites = headers.getHeaderString(S3Acl.GRANT_WRITE); - String grantReadACP = headers.getHeaderString(S3Acl.GRANT_READ_ACP); - String grantWriteACP = headers.getHeaderString(S3Acl.GRANT_WRITE_ACP); - String grantFull = headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL); + if (!shouldHandle()) { + return null; // Not responsible for this request + } + + long startNanos = Time.monotonicNowNanos(); + S3GAction s3GAction = S3GAction.PUT_ACL; + + String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ); + String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE); + String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP); + String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL); try { - OzoneBucket bucket = context.getBucket(bucketName); - S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner()); - OzoneVolume volume = context.getVolume(); + OzoneBucket bucket = getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); + OzoneVolume volume = getVolume(); List ozoneAclListOnBucket = new ArrayList<>(); List ozoneAclListOnVolume = new ArrayList<>(); @@ -152,25 +163,26 @@ public Response handlePutRequest( volume.addAcl(acl); } - context.getEndpoint().getMetrics().updatePutAclSuccessStats(startNanos); + getMetrics().updatePutAclSuccessStats(startNanos); + auditWriteSuccess(s3GAction); return Response.status(HttpStatus.SC_OK).build(); } catch (OMException exception) { - context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); - context.auditWriteFailure(S3GAction.PUT_ACL, exception); + getMetrics().updatePutAclFailureStats(startNanos); + auditWriteFailure(s3GAction, exception); if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); - } else if (context.isAccessDenied(exception)) { + } else if (isAccessDenied(exception)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); } throw exception; } catch (OS3Exception ex) { - context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); - context.auditWriteFailure(S3GAction.PUT_ACL, ex); + getMetrics().updatePutAclFailureStats(startNanos); + auditWriteFailure(s3GAction, ex); throw ex; } } - + /** * Convert ACL string to Ozone ACL on bucket. * @@ -258,4 +270,10 @@ private List parseAndConvertAcl(String value, String permission, return ozoneAclList; } + + @Override + @PostConstruct + public void init() { + // No initialization needed for AclHandler + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index b7fb4beaf4c3..25c040fed12b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -31,8 +31,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.EnumSet; -import java.util.HashMap; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -95,23 +95,11 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; - private static final BucketOperationHandlerFactory HANDLER_FACTORY = - new BucketOperationHandlerFactory(); - - @Context - private HttpHeaders headers; - - private BucketEndpointContext context; + private static final List PUT_HANDLERS = + Collections.unmodifiableList(Arrays.asList( + new AclHandler() + )); - public BucketEndpoint() { - super(); - this.context = new BucketEndpointContext(this); - } - - private BucketEndpointContext getContext() { - return context; - } - /** * Rest endpoint to list objects in a specific bucket. *

@@ -325,18 +313,28 @@ public Response put( @PathParam(BUCKET) String bucketName, InputStream body ) throws IOException, OS3Exception { + + // Chain of responsibility: let each handler try to handle the request + for (BucketOperationHandler handler : PUT_HANDLERS) { + Response response = handler.handlePutRequest(bucketName, body); + if (response != null) { + return response; // Handler handled the request + } + } + + // No handler handled the request, execute default operation: create bucket + return handleCreateBucket(bucketName); + } + + /** + * Default PUT bucket operation (create bucket). + */ + private Response handleCreateBucket(String bucketName) + throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { - final String aclMarker = queryParams().get(QueryParams.ACL); - if (aclMarker != null) { - s3GAction = S3GAction.PUT_ACL; - Response response = putAcl(bucketName, body); - auditWriteSuccess(s3GAction); - return response; - } - String location = createS3Bucket(bucketName); auditWriteSuccess(s3GAction); getMetrics().updateCreateBucketSuccessStats(startNanos); @@ -355,18 +353,6 @@ public Response put( } } - /** - * Map query parameter to corresponding S3GAction for audit logging. - */ - private S3GAction getActionForQueryParam(String queryParam) { - switch (queryParam) { - case "acl": - return S3GAction.PUT_ACL; - default: - return S3GAction.GET_BUCKET; - } - } - public Response listMultipartUploads( String bucketName, String prefix, @@ -601,8 +587,6 @@ public S3BucketAcl getAcl(String bucketName) } } -<<<<<<< HEAD -======= /** * Implement acl put. *

@@ -770,7 +754,6 @@ private List getAndConvertAclOnVolume(String value, } return ozoneAclList; } ->>>>>>> 210aa3e9a3 (HDDS-14123. Refactor BucketEndpoint#Put method) private void addKey(ListObjectResponse response, OzoneKey next) { KeyMetadata keyMetadata = new KeyMetadata(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java deleted file mode 100644 index 0cefb0300b26..000000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import java.io.IOException; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -/** - * Context object that provides access to BucketEndpoint resources. - * This allows handlers to access endpoint functionality without - * tight coupling to the BucketEndpoint class. - * - * Since BucketEndpoint extends EndpointBase, handlers can access: - * - Bucket and Volume operations - * - Methods inherited from EndpointBase - */ -public class BucketEndpointContext { - - private final BucketEndpoint endpoint; - - public BucketEndpointContext(BucketEndpoint endpoint) { - this.endpoint = endpoint; - } - - /** - * Get the bucket object. - * Delegates to BucketEndpoint's inherited getBucket() from EndpointBase. - * - * @param bucketName the bucket name - * @return OzoneBucket instance - * @throws IOException if bucket cannot be retrieved - * @throws OS3Exception if S3-specific error occurs - */ - public OzoneBucket getBucket(String bucketName) - throws IOException, OS3Exception { - return endpoint.getBucket(bucketName); - } - - /** - * Get the volume object. - * Delegates to BucketEndpoint's inherited getVolume() from EndpointBase. - * - * @return OzoneVolume instance - * @throws IOException if volume cannot be retrieved - * @throws OS3Exception if S3-specific error occurs - */ - public OzoneVolume getVolume() throws IOException, OS3Exception { - return endpoint.getVolume(); - } - - /** - * Check if an exception indicates access denied. - * This checks for OMException.ResultCodes that indicate permission issues. - * - * @param ex the exception to check - * @return true if access is denied - */ - public boolean isAccessDenied(Exception ex) { - // Check if it's an OMException with ACCESS_DENIED result code - if (ex instanceof OMException) { - OMException omEx = (OMException) ex; - return omEx.getResult() == OMException.ResultCodes.PERMISSION_DENIED || - omEx.getResult() == OMException.ResultCodes.ACCESS_DENIED; - } - return false; - } - - /** - * Audit a write operation failure. - * Delegates to BucketEndpoint's inherited auditWriteFailure() from EndpointBase. - * - * @param action the audit action being performed - * @param ex the exception that occurred - */ - public void auditWriteFailure(AuditAction action, Throwable ex) { - endpoint.auditWriteFailure(action, ex); - } - - /** - * Get reference to the endpoint for accessing other methods. - * Use with caution - prefer adding specific methods to this context - * rather than exposing the entire endpoint. - * - * @return BucketEndpoint instance - */ - protected BucketEndpoint getEndpoint() { - return endpoint; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java index b42c59b257c1..31d771750bae 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java @@ -19,42 +19,30 @@ import java.io.IOException; import java.io.InputStream; -import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.s3.exception.OS3Exception; /** - * Interface for handling bucket operations based on query parameters. + * Interface for handling bucket operations using chain of responsibility pattern. * Each implementation handles a specific S3 bucket subresource operation * (e.g., ?acl, ?lifecycle, ?notification). + * + * Implementations should extend EndpointBase to inherit all required functionality + * (configuration, headers, request context, audit logging, metrics, etc.). */ public interface BucketOperationHandler { /** - * Handle the bucket operation. + * Handle the bucket PUT operation if this handler is responsible for it. + * The handler inspects the request (query parameters, headers, etc.) to determine + * if it should handle the request. * * @param bucketName the name of the bucket * @param body the request body stream - * @param headers the HTTP headers - * @param context the endpoint context containing shared dependencies - * @param startNanos the start time in nanoseconds for metrics tracking - * @return HTTP response + * @return Response if this handler handles the request, null otherwise * @throws IOException if an I/O error occurs * @throws OS3Exception if an S3-specific error occurs */ - Response handlePutRequest( - String bucketName, - InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos - ) throws IOException, OS3Exception; - - /** - * Get the query parameter name this handler is responsible for. - * For example: "acl", "lifecycle", "notification" - * - * @return the query parameter name - */ - String getQueryParamName(); + Response handlePutRequest(String bucketName, InputStream body) + throws IOException, OS3Exception; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java deleted file mode 100644 index 1edb19165ded..000000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import com.google.common.annotations.VisibleForTesting; -import java.util.HashMap; -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory class that manages all bucket operation handlers. - * Provides a registry pattern for looking up handlers based on query parameters. - */ -public class BucketOperationHandlerFactory { - - private static final Logger LOG = - LoggerFactory.getLogger(BucketOperationHandlerFactory.class); - - private final Map handlers = new HashMap<>(); - - /** - * Register all available bucket operation handlers. - */ - public BucketOperationHandlerFactory() { - registerDefaultHandlers(); - } - - /** - * Register default handlers for S3 bucket operations. - */ - private void registerDefaultHandlers() { - register(new AclHandler()); - } - - /** - * Register a bucket operation handler. - * - * @param handler the handler to register - */ - @VisibleForTesting - public void register(BucketOperationHandler handler) { - String queryParam = handler.getQueryParamName(); - if (handlers.containsKey(queryParam)) { - LOG.warn("Overwriting existing handler for query parameter: {}", - queryParam); - } - handlers.put(queryParam, handler); - LOG.debug("Registered handler for query parameter: {}", queryParam); - } - - /** - * Get a handler for the specified query parameter. - * - * @param queryParam the query parameter name - * @return the corresponding handler, or null if not found - */ - public BucketOperationHandler getHandler(String queryParam) { - return handlers.get(queryParam); - } - - /** - * Check if a handler exists for the specified query parameter. - * - * @param queryParam the query parameter name - * @return true if a handler exists - */ - public boolean hasHandler(String queryParam) { - return handlers.containsKey(queryParam); - } - - /** - * Find the first supported query parameter that has a non-null value. - * - * This method iterates through all registered handlers and checks if the - * corresponding query parameter has a non-null value in the provided map. - * - * @param queryParams map of query parameter names to their values - * @return the name of the first query parameter that has both a non-null value - * and a registered handler, or null if none found - */ - public String findFirstSupportedQueryParam(Map queryParams) { - if (queryParams == null || queryParams.isEmpty()) { - return null; - } - - // Iterate through registered handlers and find the first one with a value - for (Map.Entry entry : handlers.entrySet()) { - String paramName = entry.getKey(); - String paramValue = queryParams.get(paramName); - - if (paramValue != null) { - return paramName; - } - } - - return null; - } - - /** - * Get all registered query parameter names. - * - * @return set of query parameter names - */ - @VisibleForTesting - public java.util.Set getRegisteredQueryParams() { - return handlers.keySet(); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java index 13db3962a89b..07132839a6fb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java @@ -150,6 +150,10 @@ public static EndpointBuilder newBucketEndpointBuilder() { return new EndpointBuilder<>(BucketEndpoint::new); } + public static EndpointBuilder newAclHandlerBuilder() { + return new EndpointBuilder<>(AclHandler::new); + } + public static EndpointBuilder newObjectEndpointBuilder() { return new EndpointBuilder<>(ObjectEndpoint::new); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java index 9e7cc3f3280d..f59c2d80e3f5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java @@ -20,6 +20,8 @@ import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; import static java.net.HttpURLConnection.HTTP_OK; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; @@ -51,7 +53,6 @@ public class TestAclHandler { private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; private OzoneClient client; - private BucketEndpointContext context; private AclHandler aclHandler; private HttpHeaders headers; @@ -62,13 +63,11 @@ public void setup() throws IOException { headers = mock(HttpHeaders.class); - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + // Build AclHandler using EndpointBuilder since it extends EndpointBase + aclHandler = EndpointBuilder.newAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build(); - - context = new BucketEndpointContext(bucketEndpoint); - aclHandler = new AclHandler(); } @AfterEach @@ -79,19 +78,37 @@ public void clean() throws IOException { } @Test - public void testGetQueryParamName() { - assertEquals("acl", aclHandler.getQueryParamName(), - "Query param name should be 'acl'"); + public void testHandlePutRequestWithAclQueryParam() throws Exception { + // Set up query parameter to indicate ACL operation + aclHandler.queryParamsForTest().set("acl", ""); + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + + assertNotNull(response, "Handler should handle request with ?acl param"); + assertEquals(HTTP_OK, response.getStatus(), + "PUT ACL should return 200 OK"); + } + + @Test + public void testHandlePutRequestWithoutAclQueryParam() throws Exception { + // No "acl" query parameter - handler should not handle request + when(headers.getHeaderString(S3Acl.GRANT_READ)) + .thenReturn("id=\"testuser\""); + + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + + assertNull(response, "Handler should return null without ?acl param"); } @Test public void testHandlePutRequestWithReadHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -99,12 +116,11 @@ public void testHandlePutRequestWithReadHeader() throws Exception { @Test public void testHandlePutRequestWithWriteHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -112,12 +128,11 @@ public void testHandlePutRequestWithWriteHeader() throws Exception { @Test public void testHandlePutRequestWithReadAcpHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -125,12 +140,11 @@ public void testHandlePutRequestWithReadAcpHeader() throws Exception { @Test public void testHandlePutRequestWithWriteAcpHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -138,12 +152,11 @@ public void testHandlePutRequestWithWriteAcpHeader() throws Exception { @Test public void testHandlePutRequestWithFullControlHeader() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -151,14 +164,13 @@ public void testHandlePutRequestWithFullControlHeader() throws Exception { @Test public void testHandlePutRequestWithMultipleHeaders() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser2\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with multiple headers should return 200 OK"); @@ -166,13 +178,12 @@ public void testHandlePutRequestWithMultipleHeaders() throws Exception { @Test public void testHandlePutRequestWithUnsupportedGranteeType() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("uri=\"http://example.com\""); - long startNanos = System.nanoTime(); OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); }, "Should throw OS3Exception for unsupported grantee type"); assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), @@ -181,13 +192,12 @@ public void testHandlePutRequestWithUnsupportedGranteeType() { @Test public void testHandlePutRequestWithEmailAddressType() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("emailAddress=\"test@example.com\""); - long startNanos = System.nanoTime(); OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); }, "Should throw OS3Exception for email address grantee type"); assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), @@ -196,18 +206,18 @@ public void testHandlePutRequestWithEmailAddressType() { @Test public void testHandlePutRequestBucketNotFound() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest("nonexistent-bucket", null, headers, - context, startNanos); + aclHandler.handlePutRequest("nonexistent-bucket", null); }, "Should throw OS3Exception for non-existent bucket"); } @Test public void testHandlePutRequestWithBody() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); String aclXml = "\n" + "\n" + " \n" + @@ -228,9 +238,7 @@ public void testHandlePutRequestWithBody() throws Exception { InputStream body = new ByteArrayInputStream( aclXml.getBytes(StandardCharsets.UTF_8)); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, body, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, body); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with body should return 200 OK"); @@ -238,24 +246,22 @@ public void testHandlePutRequestWithBody() throws Exception { @Test public void testHandlePutRequestWithInvalidHeaderFormat() { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); - long startNanos = System.nanoTime(); assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); }, "Should throw OS3Exception for invalid header format"); } @Test public void testHandlePutRequestWithMultipleGrantees() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\",id=\"user2\""); - long startNanos = System.nanoTime(); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with multiple grantees should return 200 OK"); @@ -263,15 +269,15 @@ public void testHandlePutRequestWithMultipleGrantees() throws Exception { @Test public void testPutAclReplacesExistingAcls() throws Exception { + aclHandler.queryParamsForTest().set("acl", ""); + // Set initial ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn(null); - long startNanos = System.nanoTime(); - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, - startNanos); + aclHandler.handlePutRequest(BUCKET_NAME, null); // Replace with new ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) @@ -279,8 +285,7 @@ public void testPutAclReplacesExistingAcls() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"user2\""); - Response response = aclHandler.handlePutRequest( - BUCKET_NAME, null, headers, context, startNanos); + Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should replace existing ACLs"); @@ -288,55 +293,47 @@ public void testPutAclReplacesExistingAcls() throws Exception { @Test public void testAuditLoggingOnBucketNotFound() throws Exception { - // Create a spy of BucketEndpoint to verify audit logging - BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + // Create a spy of AclHandler to verify audit logging + AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build()); - BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); - + spyHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - long startNanos = System.nanoTime(); - // This should throw exception for non-existent bucket assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest("nonexistent-bucket", null, headers, - spyContext, startNanos); + spyHandler.handlePutRequest("nonexistent-bucket", null); }); // Verify that auditWriteFailure was called with PUT_ACL action - // Note: getBucket() wraps OMException as OS3Exception, so we catch OS3Exception - verify(spyEndpoint, times(1)).auditWriteFailure( + verify(spyHandler, times(1)).auditWriteFailure( eq(S3GAction.PUT_ACL), any(OS3Exception.class)); } @Test public void testAuditLoggingOnInvalidArgument() throws Exception { - // Create a spy of BucketEndpoint to verify audit logging - BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() + // Create a spy of AclHandler to verify audit logging + AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build()); - BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + spyHandler.queryParamsForTest().set("acl", ""); // Invalid format will trigger OS3Exception when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); - long startNanos = System.nanoTime(); - assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null, headers, - spyContext, startNanos); + spyHandler.handlePutRequest(BUCKET_NAME, null); }); // Verify that auditWriteFailure was called with PUT_ACL action - verify(spyEndpoint, times(1)).auditWriteFailure( + verify(spyHandler, times(1)).auditWriteFailure( eq(S3GAction.PUT_ACL), any(OS3Exception.class)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java deleted file mode 100644 index 7d27ae23d664..000000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; - -import java.io.IOException; -import javax.ws.rs.core.HttpHeaders; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -/** - * Test class for BucketEndpointContext. - */ -public class TestBucketEndpointContext { - - private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; - private OzoneClient client; - private BucketEndpointContext context; - - @BeforeEach - public void setup() throws IOException { - client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket(BUCKET_NAME); - - HttpHeaders headers = mock(HttpHeaders.class); - - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() - .setClient(client) - .setHeaders(headers) - .build(); - - context = new BucketEndpointContext(bucketEndpoint); - } - - @AfterEach - public void clean() throws IOException { - if (client != null) { - client.close(); - } - } - - @Test - public void testGetBucket() throws IOException, OS3Exception { - OzoneBucket bucket = context.getBucket(BUCKET_NAME); - assertNotNull(bucket, "Bucket should not be null"); - assertEquals(BUCKET_NAME, bucket.getName(), - "Bucket name should match"); - } - - @Test - public void testGetBucketNotFound() { - assertThrows(OS3Exception.class, () -> { - context.getBucket("nonexistent-bucket"); - }, "Should throw OS3Exception for non-existent bucket"); - } - - @Test - public void testGetVolume() throws IOException, OS3Exception { - OzoneVolume volume = context.getVolume(); - assertNotNull(volume, "Volume should not be null"); - } - - @Test - public void testIsAccessDeniedWithPermissionDenied() { - OMException exception = new OMException("Access denied", - OMException.ResultCodes.PERMISSION_DENIED); - - assertTrue(context.isAccessDenied(exception), - "Should return true for PERMISSION_DENIED"); - } - - @Test - public void testIsAccessDeniedWithAccessDenied() { - OMException exception = new OMException("Access denied", - OMException.ResultCodes.ACCESS_DENIED); - - assertTrue(context.isAccessDenied(exception), - "Should return true for ACCESS_DENIED"); - } - - @Test - public void testIsAccessDeniedWithBucketNotFound() { - OMException exception = new OMException("Bucket not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); - - assertFalse(context.isAccessDenied(exception), - "Should return false for BUCKET_NOT_FOUND"); - } - - @Test - public void testIsAccessDeniedWithKeyNotFound() { - OMException exception = new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); - - assertFalse(context.isAccessDenied(exception), - "Should return false for KEY_NOT_FOUND"); - } - - @Test - public void testIsAccessDeniedWithIOException() { - IOException exception = new IOException("I/O error"); - - assertFalse(context.isAccessDenied(exception), - "Should return false for non-OMException"); - } - - @Test - public void testIsAccessDeniedWithNullException() { - assertFalse(context.isAccessDenied(null), - "Should return false for null exception"); - } - - @Test - public void testIsAccessDeniedWithRuntimeException() { - RuntimeException exception = new RuntimeException("Runtime error"); - - assertFalse(context.isAccessDenied(exception), - "Should return false for RuntimeException"); - } - - @Test - public void testGetEndpoint() { - BucketEndpoint endpoint = context.getEndpoint(); - assertNotNull(endpoint, "Endpoint should not be null"); - } - - @Test - public void testContextDelegatesCorrectly() throws IOException, OS3Exception { - // Test that context properly delegates to endpoint methods - OzoneBucket bucket = context.getBucket(BUCKET_NAME); - OzoneVolume volume = context.getVolume(); - - assertNotNull(bucket, "Delegated getBucket should work"); - assertNotNull(volume, "Delegated getVolume should work"); - } - - @Test - public void testIsAccessDeniedWithMultipleResultCodes() { - // Test all OMException result codes to ensure only access-related ones - // return true - - OMException[] accessDeniedExceptions = { - new OMException("", OMException.ResultCodes.PERMISSION_DENIED), - new OMException("", OMException.ResultCodes.ACCESS_DENIED) - }; - - for (OMException ex : accessDeniedExceptions) { - assertTrue(context.isAccessDenied(ex), - "Should return true for " + ex.getResult()); - } - - OMException[] otherExceptions = { - new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND), - new OMException("", OMException.ResultCodes.KEY_NOT_FOUND), - new OMException("", OMException.ResultCodes.VOLUME_NOT_FOUND), - new OMException("", OMException.ResultCodes.INTERNAL_ERROR) - }; - - for (OMException ex : otherExceptions) { - assertFalse(context.isAccessDenied(ex), - "Should return false for " + ex.getResult()); - } - } - - @Test - public void testBucketOperationsWithContext() throws Exception { - // Create a second bucket to test multiple operations - String secondBucket = "test-bucket-2"; - client.getObjectStore().createS3Bucket(secondBucket); - - // Test getting different buckets through context - OzoneBucket bucket1 = context.getBucket(BUCKET_NAME); - OzoneBucket bucket2 = context.getBucket(secondBucket); - - assertNotNull(bucket1, "First bucket should not be null"); - assertNotNull(bucket2, "Second bucket should not be null"); - assertEquals(BUCKET_NAME, bucket1.getName(), - "First bucket name should match"); - assertEquals(secondBucket, bucket2.getName(), - "Second bucket name should match"); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java deleted file mode 100644 index 1aeb8dc85fb4..000000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.io.IOException; -import java.io.InputStream; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -/** - * Test class for BucketOperationHandlerFactory. - */ -public class TestBucketOperationHandlerFactory { - - private BucketOperationHandlerFactory factory; - - @BeforeEach - public void setup() { - factory = new BucketOperationHandlerFactory(); - } - - @Test - public void testDefaultHandlersRegistered() { - // Verify that the default ACL handler is registered - assertTrue(factory.hasHandler("acl"), - "ACL handler should be registered by default"); - assertNotNull(factory.getHandler("acl"), - "ACL handler should not be null"); - } - - @Test - public void testGetHandlerForAcl() { - BucketOperationHandler handler = factory.getHandler("acl"); - assertNotNull(handler, "ACL handler should exist"); - assertTrue(handler instanceof AclHandler, - "Handler should be an instance of AclHandler"); - assertEquals("acl", handler.getQueryParamName(), - "Handler query param name should be 'acl'"); - } - - @Test - public void testGetHandlerForNonExistentParam() { - BucketOperationHandler handler = factory.getHandler("nonexistent"); - assertNull(handler, "Handler for non-existent param should be null"); - } - - @Test - public void testHasHandlerReturnsTrueForExisting() { - assertTrue(factory.hasHandler("acl"), - "Should return true for existing handler"); - } - - @Test - public void testHasHandlerReturnsFalseForNonExisting() { - assertFalse(factory.hasHandler("nonexistent"), - "Should return false for non-existing handler"); - } - - @Test - public void testRegisterNewHandler() { - // Create a mock handler - BucketOperationHandler mockHandler = new MockBucketOperationHandler("test"); - - // Register the handler - factory.register(mockHandler); - - // Verify registration - assertTrue(factory.hasHandler("test"), - "Newly registered handler should exist"); - assertEquals(mockHandler, factory.getHandler("test"), - "Retrieved handler should be the same instance"); - } - - @Test - public void testRegisterOverwritesExistingHandler() { - // Register a new handler with the same query param as ACL - BucketOperationHandler mockHandler = new MockBucketOperationHandler("acl"); - - factory.register(mockHandler); - - // Verify the handler was overwritten - BucketOperationHandler handler = factory.getHandler("acl"); - assertEquals(mockHandler, handler, - "Handler should be the newly registered one"); - assertTrue(handler instanceof MockBucketOperationHandler, - "Handler should be an instance of MockBucketOperationHandler"); - } - - @Test - public void testGetRegisteredQueryParams() { - // Default should have at least "acl" - assertTrue(factory.getRegisteredQueryParams().contains("acl"), - "Registered query params should contain 'acl'"); - - // Register additional handlers - factory.register(new MockBucketOperationHandler("lifecycle")); - factory.register(new MockBucketOperationHandler("notification")); - - // Verify all are present - assertEquals(3, factory.getRegisteredQueryParams().size(), - "Should have 3 registered handlers"); - assertTrue(factory.getRegisteredQueryParams().contains("lifecycle"), - "Should contain 'lifecycle'"); - assertTrue(factory.getRegisteredQueryParams().contains("notification"), - "Should contain 'notification'"); - } - - @Test - public void testMultipleHandlerRegistration() { - BucketOperationHandler handler1 = new MockBucketOperationHandler("test1"); - BucketOperationHandler handler2 = new MockBucketOperationHandler("test2"); - BucketOperationHandler handler3 = new MockBucketOperationHandler("test3"); - - factory.register(handler1); - factory.register(handler2); - factory.register(handler3); - - assertTrue(factory.hasHandler("test1"), "Handler test1 should exist"); - assertTrue(factory.hasHandler("test2"), "Handler test2 should exist"); - assertTrue(factory.hasHandler("test3"), "Handler test3 should exist"); - - assertEquals(handler1, factory.getHandler("test1")); - assertEquals(handler2, factory.getHandler("test2")); - assertEquals(handler3, factory.getHandler("test3")); - } - - /** - * Mock implementation of BucketOperationHandler for testing. - */ - private static class MockBucketOperationHandler implements BucketOperationHandler { - private final String queryParamName; - - MockBucketOperationHandler(String queryParamName) { - this.queryParamName = queryParamName; - } - - @Override - public Response handlePutRequest(String bucketName, InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos) - throws IOException, OS3Exception { - return Response.ok().build(); - } - - @Override - public String getQueryParamName() { - return queryParamName; - } - } -} From 5cb12fac26e23faffeb8e65aa9c48db6969071e5 Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 14:45:28 +0800 Subject: [PATCH 31/36] fix: checkstyle fix --- .../java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java index 499d355e831f..1252df4db5e8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; +import javax.annotation.PostConstruct; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -41,7 +42,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; -import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From b9b8977d4e5344bc77000b6cb6c526860ab893df Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 16:36:38 +0800 Subject: [PATCH 32/36] fix: test update --- .../ozone/s3/endpoint/BucketEndpoint.java | 17 ++++++++++------- .../hadoop/ozone/s3/endpoint/EndpointBase.java | 13 +++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 25c040fed12b..0b950c8e8fe8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -31,8 +31,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -95,10 +93,7 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; - private static final List PUT_HANDLERS = - Collections.unmodifiableList(Arrays.asList( - new AclHandler() - )); + private List putHandlers; /** * Rest endpoint to list objects in a specific bucket. @@ -315,7 +310,7 @@ public Response put( ) throws IOException, OS3Exception { // Chain of responsibility: let each handler try to handle the request - for (BucketOperationHandler handler : PUT_HANDLERS) { + for (BucketOperationHandler handler : putHandlers) { Response response = handler.handlePutRequest(bucketName, body); if (response != null) { return response; // Handler handled the request @@ -781,5 +776,13 @@ public void init() { maxKeysLimit = getOzoneConfiguration().getInt( OZONE_S3G_LIST_MAX_KEYS_LIMIT, OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT); + + // Initialize PUT handlers + AclHandler aclHandler = new AclHandler(); + copyDependenciesTo(aclHandler); + aclHandler.initialization(); + + putHandlers = new ArrayList<>(); + putHandlers.add(aclHandler); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index dbc91c1e55e7..ce13f7545443 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -543,6 +543,19 @@ void setOzoneConfiguration(OzoneConfiguration conf) { ozoneConfiguration = conf; } + /** + * Copy dependencies from this endpoint to another endpoint. + * Used for initializing handler instances. + */ + protected void copyDependenciesTo(EndpointBase target) { + target.setClient(this.client); + target.setOzoneConfiguration(this.ozoneConfiguration); + target.setContext(this.context); + target.setHeaders(this.headers); + target.setRequestIdentifier(this.requestIdentifier); + target.setSignatureInfo(this.signatureInfo); + } + protected OzoneConfiguration getOzoneConfiguration() { return ozoneConfiguration; } From bfe8295e90c69b2997014ddbbf0e7769cb4e7541 Mon Sep 17 00:00:00 2001 From: echonesis Date: Mon, 15 Dec 2025 01:04:33 +0800 Subject: [PATCH 33/36] HDDS-14123. Refactor BucketEndpoint#Put method --- .../hadoop/ozone/s3/endpoint/AclHandler.java | 76 +++---- .../ozone/s3/endpoint/BucketEndpoint.java | 49 +++- .../s3/endpoint/BucketEndpointContext.java | 108 +++++++++ .../s3/endpoint/BucketOperationHandler.java | 32 ++- .../BucketOperationHandlerFactory.java | 124 ++++++++++ .../ozone/s3/endpoint/TestAclHandler.java | 131 +++++------ .../endpoint/TestBucketEndpointContext.java | 211 ++++++++++++++++++ .../TestBucketOperationHandlerFactory.java | 175 +++++++++++++++ 8 files changed, 784 insertions(+), 122 deletions(-) create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java index 1252df4db5e8..9d1e05eaa6b9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java @@ -26,7 +26,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; -import javax.annotation.PostConstruct; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -38,9 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.util.Time; import org.apache.http.HttpStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,48 +46,39 @@ /** * Handler for bucket ACL operations (?acl query parameter). * Implements PUT operations for bucket Access Control Lists. - * - * This handler extends EndpointBase to inherit all required functionality - * (configuration, headers, request context, audit logging, metrics, etc.). */ -public class AclHandler extends EndpointBase implements BucketOperationHandler { - +public class AclHandler implements BucketOperationHandler { + private static final Logger LOG = LoggerFactory.getLogger(AclHandler.class); - - /** - * Determine if this handler should handle the current request. - * @return true if the request has the "acl" query parameter - */ - private boolean shouldHandle() { - return queryParams().get(QueryParams.ACL) != null; + + @Override + public String getQueryParamName() { + return "acl"; } - + /** * Implement acl put. *

* see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html */ @Override - public Response handlePutRequest(String bucketName, InputStream body) - throws IOException, OS3Exception { + public Response handlePutRequest( + String bucketName, + InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos) throws IOException, OS3Exception { - if (!shouldHandle()) { - return null; // Not responsible for this request - } - - long startNanos = Time.monotonicNowNanos(); - S3GAction s3GAction = S3GAction.PUT_ACL; - - String grantReads = getHeaders().getHeaderString(S3Acl.GRANT_READ); - String grantWrites = getHeaders().getHeaderString(S3Acl.GRANT_WRITE); - String grantReadACP = getHeaders().getHeaderString(S3Acl.GRANT_READ_ACP); - String grantWriteACP = getHeaders().getHeaderString(S3Acl.GRANT_WRITE_ACP); - String grantFull = getHeaders().getHeaderString(S3Acl.GRANT_FULL_CONTROL); + String grantReads = headers.getHeaderString(S3Acl.GRANT_READ); + String grantWrites = headers.getHeaderString(S3Acl.GRANT_WRITE); + String grantReadACP = headers.getHeaderString(S3Acl.GRANT_READ_ACP); + String grantWriteACP = headers.getHeaderString(S3Acl.GRANT_WRITE_ACP); + String grantFull = headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL); try { - OzoneBucket bucket = getBucket(bucketName); - S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); - OzoneVolume volume = getVolume(); + OzoneBucket bucket = context.getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner()); + OzoneVolume volume = context.getVolume(); List ozoneAclListOnBucket = new ArrayList<>(); List ozoneAclListOnVolume = new ArrayList<>(); @@ -163,26 +152,25 @@ public Response handlePutRequest(String bucketName, InputStream body) volume.addAcl(acl); } - getMetrics().updatePutAclSuccessStats(startNanos); - auditWriteSuccess(s3GAction); + context.getEndpoint().getMetrics().updatePutAclSuccessStats(startNanos); return Response.status(HttpStatus.SC_OK).build(); } catch (OMException exception) { - getMetrics().updatePutAclFailureStats(startNanos); - auditWriteFailure(s3GAction, exception); + context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); + context.auditWriteFailure(S3GAction.PUT_ACL, exception); if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); - } else if (isAccessDenied(exception)) { + } else if (context.isAccessDenied(exception)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); } throw exception; } catch (OS3Exception ex) { - getMetrics().updatePutAclFailureStats(startNanos); - auditWriteFailure(s3GAction, ex); + context.getEndpoint().getMetrics().updatePutAclFailureStats(startNanos); + context.auditWriteFailure(S3GAction.PUT_ACL, ex); throw ex; } } - + /** * Convert ACL string to Ozone ACL on bucket. * @@ -270,10 +258,4 @@ private List parseAndConvertAcl(String value, String permission, return ozoneAclList; } - - @Override - @PostConstruct - public void init() { - // No initialization needed for AclHandler - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 0b950c8e8fe8..16e2bf86a21c 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -90,10 +91,25 @@ public class BucketEndpoint extends EndpointBase { private static final BucketOperationHandlerFactory HANDLER_FACTORY = new BucketOperationHandlerFactory(); + @Context + private HttpHeaders headers; + private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; - private List putHandlers; + private BucketEndpointContext context; + + @Inject + private OzoneConfiguration ozoneConfiguration; + + public BucketEndpoint() { + super(); + this.context = new BucketEndpointContext(this); + } + + private BucketEndpointContext getContext() { + return context; + } /** * Rest endpoint to list objects in a specific bucket. @@ -330,6 +346,25 @@ private Response handleCreateBucket(String bucketName) S3GAction s3GAction = S3GAction.CREATE_BUCKET; try { + // Build map of query parameters + Map queryParams = new HashMap<>(); + queryParams.put("acl", aclMarker); + // Future handlers: queryParams.put("lifecycle", lifecycleMarker); + + // Check for subresource operations using handlers + String queryParam = HANDLER_FACTORY.findFirstSupportedQueryParam(queryParams); + + if (queryParam != null) { + BucketOperationHandler handler = HANDLER_FACTORY.getHandler(queryParam); + // Delegate to specific handler + s3GAction = getActionForQueryParam(queryParam); + Response response = handler.handlePutRequest( + bucketName, body, headers, getContext(), startNanos); + AUDIT.logWriteSuccess( + buildAuditMessageForSuccess(s3GAction, getAuditParameters())); + return response; + } + String location = createS3Bucket(bucketName); auditWriteSuccess(s3GAction); getMetrics().updateCreateBucketSuccessStats(startNanos); @@ -348,6 +383,18 @@ private Response handleCreateBucket(String bucketName) } } + /** + * Map query parameter to corresponding S3GAction for audit logging. + */ + private S3GAction getActionForQueryParam(String queryParam) { + switch (queryParam) { + case "acl": + return S3GAction.PUT_ACL; + default: + return S3GAction.GET_BUCKET; + } + } + public Response listMultipartUploads( String bucketName, String prefix, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java new file mode 100644 index 000000000000..0cefb0300b26 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import java.io.IOException; +import org.apache.hadoop.ozone.audit.AuditAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; + +/** + * Context object that provides access to BucketEndpoint resources. + * This allows handlers to access endpoint functionality without + * tight coupling to the BucketEndpoint class. + * + * Since BucketEndpoint extends EndpointBase, handlers can access: + * - Bucket and Volume operations + * - Methods inherited from EndpointBase + */ +public class BucketEndpointContext { + + private final BucketEndpoint endpoint; + + public BucketEndpointContext(BucketEndpoint endpoint) { + this.endpoint = endpoint; + } + + /** + * Get the bucket object. + * Delegates to BucketEndpoint's inherited getBucket() from EndpointBase. + * + * @param bucketName the bucket name + * @return OzoneBucket instance + * @throws IOException if bucket cannot be retrieved + * @throws OS3Exception if S3-specific error occurs + */ + public OzoneBucket getBucket(String bucketName) + throws IOException, OS3Exception { + return endpoint.getBucket(bucketName); + } + + /** + * Get the volume object. + * Delegates to BucketEndpoint's inherited getVolume() from EndpointBase. + * + * @return OzoneVolume instance + * @throws IOException if volume cannot be retrieved + * @throws OS3Exception if S3-specific error occurs + */ + public OzoneVolume getVolume() throws IOException, OS3Exception { + return endpoint.getVolume(); + } + + /** + * Check if an exception indicates access denied. + * This checks for OMException.ResultCodes that indicate permission issues. + * + * @param ex the exception to check + * @return true if access is denied + */ + public boolean isAccessDenied(Exception ex) { + // Check if it's an OMException with ACCESS_DENIED result code + if (ex instanceof OMException) { + OMException omEx = (OMException) ex; + return omEx.getResult() == OMException.ResultCodes.PERMISSION_DENIED || + omEx.getResult() == OMException.ResultCodes.ACCESS_DENIED; + } + return false; + } + + /** + * Audit a write operation failure. + * Delegates to BucketEndpoint's inherited auditWriteFailure() from EndpointBase. + * + * @param action the audit action being performed + * @param ex the exception that occurred + */ + public void auditWriteFailure(AuditAction action, Throwable ex) { + endpoint.auditWriteFailure(action, ex); + } + + /** + * Get reference to the endpoint for accessing other methods. + * Use with caution - prefer adding specific methods to this context + * rather than exposing the entire endpoint. + * + * @return BucketEndpoint instance + */ + protected BucketEndpoint getEndpoint() { + return endpoint; + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java index 31d771750bae..b42c59b257c1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java @@ -19,30 +19,42 @@ import java.io.IOException; import java.io.InputStream; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.s3.exception.OS3Exception; /** - * Interface for handling bucket operations using chain of responsibility pattern. + * Interface for handling bucket operations based on query parameters. * Each implementation handles a specific S3 bucket subresource operation * (e.g., ?acl, ?lifecycle, ?notification). - * - * Implementations should extend EndpointBase to inherit all required functionality - * (configuration, headers, request context, audit logging, metrics, etc.). */ public interface BucketOperationHandler { /** - * Handle the bucket PUT operation if this handler is responsible for it. - * The handler inspects the request (query parameters, headers, etc.) to determine - * if it should handle the request. + * Handle the bucket operation. * * @param bucketName the name of the bucket * @param body the request body stream - * @return Response if this handler handles the request, null otherwise + * @param headers the HTTP headers + * @param context the endpoint context containing shared dependencies + * @param startNanos the start time in nanoseconds for metrics tracking + * @return HTTP response * @throws IOException if an I/O error occurs * @throws OS3Exception if an S3-specific error occurs */ - Response handlePutRequest(String bucketName, InputStream body) - throws IOException, OS3Exception; + Response handlePutRequest( + String bucketName, + InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos + ) throws IOException, OS3Exception; + + /** + * Get the query parameter name this handler is responsible for. + * For example: "acl", "lifecycle", "notification" + * + * @return the query parameter name + */ + String getQueryParamName(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java new file mode 100644 index 000000000000..1edb19165ded --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import com.google.common.annotations.VisibleForTesting; +import java.util.HashMap; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Factory class that manages all bucket operation handlers. + * Provides a registry pattern for looking up handlers based on query parameters. + */ +public class BucketOperationHandlerFactory { + + private static final Logger LOG = + LoggerFactory.getLogger(BucketOperationHandlerFactory.class); + + private final Map handlers = new HashMap<>(); + + /** + * Register all available bucket operation handlers. + */ + public BucketOperationHandlerFactory() { + registerDefaultHandlers(); + } + + /** + * Register default handlers for S3 bucket operations. + */ + private void registerDefaultHandlers() { + register(new AclHandler()); + } + + /** + * Register a bucket operation handler. + * + * @param handler the handler to register + */ + @VisibleForTesting + public void register(BucketOperationHandler handler) { + String queryParam = handler.getQueryParamName(); + if (handlers.containsKey(queryParam)) { + LOG.warn("Overwriting existing handler for query parameter: {}", + queryParam); + } + handlers.put(queryParam, handler); + LOG.debug("Registered handler for query parameter: {}", queryParam); + } + + /** + * Get a handler for the specified query parameter. + * + * @param queryParam the query parameter name + * @return the corresponding handler, or null if not found + */ + public BucketOperationHandler getHandler(String queryParam) { + return handlers.get(queryParam); + } + + /** + * Check if a handler exists for the specified query parameter. + * + * @param queryParam the query parameter name + * @return true if a handler exists + */ + public boolean hasHandler(String queryParam) { + return handlers.containsKey(queryParam); + } + + /** + * Find the first supported query parameter that has a non-null value. + * + * This method iterates through all registered handlers and checks if the + * corresponding query parameter has a non-null value in the provided map. + * + * @param queryParams map of query parameter names to their values + * @return the name of the first query parameter that has both a non-null value + * and a registered handler, or null if none found + */ + public String findFirstSupportedQueryParam(Map queryParams) { + if (queryParams == null || queryParams.isEmpty()) { + return null; + } + + // Iterate through registered handlers and find the first one with a value + for (Map.Entry entry : handlers.entrySet()) { + String paramName = entry.getKey(); + String paramValue = queryParams.get(paramName); + + if (paramValue != null) { + return paramName; + } + } + + return null; + } + + /** + * Get all registered query parameter names. + * + * @return set of query parameter names + */ + @VisibleForTesting + public java.util.Set getRegisteredQueryParams() { + return handlers.keySet(); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java index f59c2d80e3f5..9e7cc3f3280d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java @@ -20,8 +20,6 @@ import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; import static java.net.HttpURLConnection.HTTP_OK; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; @@ -53,6 +51,7 @@ public class TestAclHandler { private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; private OzoneClient client; + private BucketEndpointContext context; private AclHandler aclHandler; private HttpHeaders headers; @@ -63,11 +62,13 @@ public void setup() throws IOException { headers = mock(HttpHeaders.class); - // Build AclHandler using EndpointBuilder since it extends EndpointBase - aclHandler = EndpointBuilder.newAclHandlerBuilder() + BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .setHeaders(headers) .build(); + + context = new BucketEndpointContext(bucketEndpoint); + aclHandler = new AclHandler(); } @AfterEach @@ -78,37 +79,19 @@ public void clean() throws IOException { } @Test - public void testHandlePutRequestWithAclQueryParam() throws Exception { - // Set up query parameter to indicate ACL operation - aclHandler.queryParamsForTest().set("acl", ""); - when(headers.getHeaderString(S3Acl.GRANT_READ)) - .thenReturn("id=\"testuser\""); - - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertNotNull(response, "Handler should handle request with ?acl param"); - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); - } - - @Test - public void testHandlePutRequestWithoutAclQueryParam() throws Exception { - // No "acl" query parameter - handler should not handle request - when(headers.getHeaderString(S3Acl.GRANT_READ)) - .thenReturn("id=\"testuser\""); - - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertNull(response, "Handler should return null without ?acl param"); + public void testGetQueryParamName() { + assertEquals("acl", aclHandler.getQueryParamName(), + "Query param name should be 'acl'"); } @Test public void testHandlePutRequestWithReadHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -116,11 +99,12 @@ public void testHandlePutRequestWithReadHeader() throws Exception { @Test public void testHandlePutRequestWithWriteHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -128,11 +112,12 @@ public void testHandlePutRequestWithWriteHeader() throws Exception { @Test public void testHandlePutRequestWithReadAcpHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -140,11 +125,12 @@ public void testHandlePutRequestWithReadAcpHeader() throws Exception { @Test public void testHandlePutRequestWithWriteAcpHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -152,11 +138,12 @@ public void testHandlePutRequestWithWriteAcpHeader() throws Exception { @Test public void testHandlePutRequestWithFullControlHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should return 200 OK"); @@ -164,13 +151,14 @@ public void testHandlePutRequestWithFullControlHeader() throws Exception { @Test public void testHandlePutRequestWithMultipleHeaders() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser2\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with multiple headers should return 200 OK"); @@ -178,12 +166,13 @@ public void testHandlePutRequestWithMultipleHeaders() throws Exception { @Test public void testHandlePutRequestWithUnsupportedGranteeType() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("uri=\"http://example.com\""); + long startNanos = System.nanoTime(); OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); }, "Should throw OS3Exception for unsupported grantee type"); assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), @@ -192,12 +181,13 @@ public void testHandlePutRequestWithUnsupportedGranteeType() { @Test public void testHandlePutRequestWithEmailAddressType() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("emailAddress=\"test@example.com\""); + long startNanos = System.nanoTime(); OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); }, "Should throw OS3Exception for email address grantee type"); assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), @@ -206,18 +196,18 @@ public void testHandlePutRequestWithEmailAddressType() { @Test public void testHandlePutRequestBucketNotFound() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); + long startNanos = System.nanoTime(); assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest("nonexistent-bucket", null); + aclHandler.handlePutRequest("nonexistent-bucket", null, headers, + context, startNanos); }, "Should throw OS3Exception for non-existent bucket"); } @Test public void testHandlePutRequestWithBody() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); String aclXml = "\n" + "\n" + " \n" + @@ -238,7 +228,9 @@ public void testHandlePutRequestWithBody() throws Exception { InputStream body = new ByteArrayInputStream( aclXml.getBytes(StandardCharsets.UTF_8)); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, body); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, body, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with body should return 200 OK"); @@ -246,22 +238,24 @@ public void testHandlePutRequestWithBody() throws Exception { @Test public void testHandlePutRequestWithInvalidHeaderFormat() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); + long startNanos = System.nanoTime(); assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); }, "Should throw OS3Exception for invalid header format"); } @Test public void testHandlePutRequestWithMultipleGrantees() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\",id=\"user2\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL with multiple grantees should return 200 OK"); @@ -269,15 +263,15 @@ public void testHandlePutRequestWithMultipleGrantees() throws Exception { @Test public void testPutAclReplacesExistingAcls() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - // Set initial ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn(null); - aclHandler.handlePutRequest(BUCKET_NAME, null); + long startNanos = System.nanoTime(); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, context, + startNanos); // Replace with new ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) @@ -285,7 +279,8 @@ public void testPutAclReplacesExistingAcls() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"user2\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); + Response response = aclHandler.handlePutRequest( + BUCKET_NAME, null, headers, context, startNanos); assertEquals(HTTP_OK, response.getStatus(), "PUT ACL should replace existing ACLs"); @@ -293,47 +288,55 @@ public void testPutAclReplacesExistingAcls() throws Exception { @Test public void testAuditLoggingOnBucketNotFound() throws Exception { - // Create a spy of AclHandler to verify audit logging - AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() + // Create a spy of BucketEndpoint to verify audit logging + BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .setHeaders(headers) .build()); - spyHandler.queryParamsForTest().set("acl", ""); + BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); + when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); + long startNanos = System.nanoTime(); + // This should throw exception for non-existent bucket assertThrows(OS3Exception.class, () -> { - spyHandler.handlePutRequest("nonexistent-bucket", null); + aclHandler.handlePutRequest("nonexistent-bucket", null, headers, + spyContext, startNanos); }); // Verify that auditWriteFailure was called with PUT_ACL action - verify(spyHandler, times(1)).auditWriteFailure( + // Note: getBucket() wraps OMException as OS3Exception, so we catch OS3Exception + verify(spyEndpoint, times(1)).auditWriteFailure( eq(S3GAction.PUT_ACL), any(OS3Exception.class)); } @Test public void testAuditLoggingOnInvalidArgument() throws Exception { - // Create a spy of AclHandler to verify audit logging - AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() + // Create a spy of BucketEndpoint to verify audit logging + BucketEndpoint spyEndpoint = spy(EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .setHeaders(headers) .build()); - spyHandler.queryParamsForTest().set("acl", ""); + BucketEndpointContext spyContext = new BucketEndpointContext(spyEndpoint); // Invalid format will trigger OS3Exception when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); + long startNanos = System.nanoTime(); + assertThrows(OS3Exception.class, () -> { - spyHandler.handlePutRequest(BUCKET_NAME, null); + aclHandler.handlePutRequest(BUCKET_NAME, null, headers, + spyContext, startNanos); }); // Verify that auditWriteFailure was called with PUT_ACL action - verify(spyHandler, times(1)).auditWriteFailure( + verify(spyEndpoint, times(1)).auditWriteFailure( eq(S3GAction.PUT_ACL), any(OS3Exception.class)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java new file mode 100644 index 000000000000..7d27ae23d664 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +import java.io.IOException; +import javax.ws.rs.core.HttpHeaders; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for BucketEndpointContext. + */ +public class TestBucketEndpointContext { + + private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; + private OzoneClient client; + private BucketEndpointContext context; + + @BeforeEach + public void setup() throws IOException { + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + HttpHeaders headers = mock(HttpHeaders.class); + + BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build(); + + context = new BucketEndpointContext(bucketEndpoint); + } + + @AfterEach + public void clean() throws IOException { + if (client != null) { + client.close(); + } + } + + @Test + public void testGetBucket() throws IOException, OS3Exception { + OzoneBucket bucket = context.getBucket(BUCKET_NAME); + assertNotNull(bucket, "Bucket should not be null"); + assertEquals(BUCKET_NAME, bucket.getName(), + "Bucket name should match"); + } + + @Test + public void testGetBucketNotFound() { + assertThrows(OS3Exception.class, () -> { + context.getBucket("nonexistent-bucket"); + }, "Should throw OS3Exception for non-existent bucket"); + } + + @Test + public void testGetVolume() throws IOException, OS3Exception { + OzoneVolume volume = context.getVolume(); + assertNotNull(volume, "Volume should not be null"); + } + + @Test + public void testIsAccessDeniedWithPermissionDenied() { + OMException exception = new OMException("Access denied", + OMException.ResultCodes.PERMISSION_DENIED); + + assertTrue(context.isAccessDenied(exception), + "Should return true for PERMISSION_DENIED"); + } + + @Test + public void testIsAccessDeniedWithAccessDenied() { + OMException exception = new OMException("Access denied", + OMException.ResultCodes.ACCESS_DENIED); + + assertTrue(context.isAccessDenied(exception), + "Should return true for ACCESS_DENIED"); + } + + @Test + public void testIsAccessDeniedWithBucketNotFound() { + OMException exception = new OMException("Bucket not found", + OMException.ResultCodes.BUCKET_NOT_FOUND); + + assertFalse(context.isAccessDenied(exception), + "Should return false for BUCKET_NOT_FOUND"); + } + + @Test + public void testIsAccessDeniedWithKeyNotFound() { + OMException exception = new OMException("Key not found", + OMException.ResultCodes.KEY_NOT_FOUND); + + assertFalse(context.isAccessDenied(exception), + "Should return false for KEY_NOT_FOUND"); + } + + @Test + public void testIsAccessDeniedWithIOException() { + IOException exception = new IOException("I/O error"); + + assertFalse(context.isAccessDenied(exception), + "Should return false for non-OMException"); + } + + @Test + public void testIsAccessDeniedWithNullException() { + assertFalse(context.isAccessDenied(null), + "Should return false for null exception"); + } + + @Test + public void testIsAccessDeniedWithRuntimeException() { + RuntimeException exception = new RuntimeException("Runtime error"); + + assertFalse(context.isAccessDenied(exception), + "Should return false for RuntimeException"); + } + + @Test + public void testGetEndpoint() { + BucketEndpoint endpoint = context.getEndpoint(); + assertNotNull(endpoint, "Endpoint should not be null"); + } + + @Test + public void testContextDelegatesCorrectly() throws IOException, OS3Exception { + // Test that context properly delegates to endpoint methods + OzoneBucket bucket = context.getBucket(BUCKET_NAME); + OzoneVolume volume = context.getVolume(); + + assertNotNull(bucket, "Delegated getBucket should work"); + assertNotNull(volume, "Delegated getVolume should work"); + } + + @Test + public void testIsAccessDeniedWithMultipleResultCodes() { + // Test all OMException result codes to ensure only access-related ones + // return true + + OMException[] accessDeniedExceptions = { + new OMException("", OMException.ResultCodes.PERMISSION_DENIED), + new OMException("", OMException.ResultCodes.ACCESS_DENIED) + }; + + for (OMException ex : accessDeniedExceptions) { + assertTrue(context.isAccessDenied(ex), + "Should return true for " + ex.getResult()); + } + + OMException[] otherExceptions = { + new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND), + new OMException("", OMException.ResultCodes.KEY_NOT_FOUND), + new OMException("", OMException.ResultCodes.VOLUME_NOT_FOUND), + new OMException("", OMException.ResultCodes.INTERNAL_ERROR) + }; + + for (OMException ex : otherExceptions) { + assertFalse(context.isAccessDenied(ex), + "Should return false for " + ex.getResult()); + } + } + + @Test + public void testBucketOperationsWithContext() throws Exception { + // Create a second bucket to test multiple operations + String secondBucket = "test-bucket-2"; + client.getObjectStore().createS3Bucket(secondBucket); + + // Test getting different buckets through context + OzoneBucket bucket1 = context.getBucket(BUCKET_NAME); + OzoneBucket bucket2 = context.getBucket(secondBucket); + + assertNotNull(bucket1, "First bucket should not be null"); + assertNotNull(bucket2, "Second bucket should not be null"); + assertEquals(BUCKET_NAME, bucket1.getName(), + "First bucket name should match"); + assertEquals(secondBucket, bucket2.getName(), + "Second bucket name should match"); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java new file mode 100644 index 000000000000..1aeb8dc85fb4 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.io.InputStream; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +/** + * Test class for BucketOperationHandlerFactory. + */ +public class TestBucketOperationHandlerFactory { + + private BucketOperationHandlerFactory factory; + + @BeforeEach + public void setup() { + factory = new BucketOperationHandlerFactory(); + } + + @Test + public void testDefaultHandlersRegistered() { + // Verify that the default ACL handler is registered + assertTrue(factory.hasHandler("acl"), + "ACL handler should be registered by default"); + assertNotNull(factory.getHandler("acl"), + "ACL handler should not be null"); + } + + @Test + public void testGetHandlerForAcl() { + BucketOperationHandler handler = factory.getHandler("acl"); + assertNotNull(handler, "ACL handler should exist"); + assertTrue(handler instanceof AclHandler, + "Handler should be an instance of AclHandler"); + assertEquals("acl", handler.getQueryParamName(), + "Handler query param name should be 'acl'"); + } + + @Test + public void testGetHandlerForNonExistentParam() { + BucketOperationHandler handler = factory.getHandler("nonexistent"); + assertNull(handler, "Handler for non-existent param should be null"); + } + + @Test + public void testHasHandlerReturnsTrueForExisting() { + assertTrue(factory.hasHandler("acl"), + "Should return true for existing handler"); + } + + @Test + public void testHasHandlerReturnsFalseForNonExisting() { + assertFalse(factory.hasHandler("nonexistent"), + "Should return false for non-existing handler"); + } + + @Test + public void testRegisterNewHandler() { + // Create a mock handler + BucketOperationHandler mockHandler = new MockBucketOperationHandler("test"); + + // Register the handler + factory.register(mockHandler); + + // Verify registration + assertTrue(factory.hasHandler("test"), + "Newly registered handler should exist"); + assertEquals(mockHandler, factory.getHandler("test"), + "Retrieved handler should be the same instance"); + } + + @Test + public void testRegisterOverwritesExistingHandler() { + // Register a new handler with the same query param as ACL + BucketOperationHandler mockHandler = new MockBucketOperationHandler("acl"); + + factory.register(mockHandler); + + // Verify the handler was overwritten + BucketOperationHandler handler = factory.getHandler("acl"); + assertEquals(mockHandler, handler, + "Handler should be the newly registered one"); + assertTrue(handler instanceof MockBucketOperationHandler, + "Handler should be an instance of MockBucketOperationHandler"); + } + + @Test + public void testGetRegisteredQueryParams() { + // Default should have at least "acl" + assertTrue(factory.getRegisteredQueryParams().contains("acl"), + "Registered query params should contain 'acl'"); + + // Register additional handlers + factory.register(new MockBucketOperationHandler("lifecycle")); + factory.register(new MockBucketOperationHandler("notification")); + + // Verify all are present + assertEquals(3, factory.getRegisteredQueryParams().size(), + "Should have 3 registered handlers"); + assertTrue(factory.getRegisteredQueryParams().contains("lifecycle"), + "Should contain 'lifecycle'"); + assertTrue(factory.getRegisteredQueryParams().contains("notification"), + "Should contain 'notification'"); + } + + @Test + public void testMultipleHandlerRegistration() { + BucketOperationHandler handler1 = new MockBucketOperationHandler("test1"); + BucketOperationHandler handler2 = new MockBucketOperationHandler("test2"); + BucketOperationHandler handler3 = new MockBucketOperationHandler("test3"); + + factory.register(handler1); + factory.register(handler2); + factory.register(handler3); + + assertTrue(factory.hasHandler("test1"), "Handler test1 should exist"); + assertTrue(factory.hasHandler("test2"), "Handler test2 should exist"); + assertTrue(factory.hasHandler("test3"), "Handler test3 should exist"); + + assertEquals(handler1, factory.getHandler("test1")); + assertEquals(handler2, factory.getHandler("test2")); + assertEquals(handler3, factory.getHandler("test3")); + } + + /** + * Mock implementation of BucketOperationHandler for testing. + */ + private static class MockBucketOperationHandler implements BucketOperationHandler { + private final String queryParamName; + + MockBucketOperationHandler(String queryParamName) { + this.queryParamName = queryParamName; + } + + @Override + public Response handlePutRequest(String bucketName, InputStream body, + HttpHeaders headers, + BucketEndpointContext context, + long startNanos) + throws IOException, OS3Exception { + return Response.ok().build(); + } + + @Override + public String getQueryParamName() { + return queryParamName; + } + } +} From 3f20a42bfd7c5512e9d0998aa536983efd5dcc1b Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 22:29:03 +0800 Subject: [PATCH 34/36] fix: remove context --- .../s3/endpoint/BucketEndpointContext.java | 108 --------- .../BucketOperationHandlerFactory.java | 124 ---------- .../endpoint/TestBucketEndpointContext.java | 211 ------------------ .../TestBucketOperationHandlerFactory.java | 175 --------------- 4 files changed, 618 deletions(-) delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java deleted file mode 100644 index 0cefb0300b26..000000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpointContext.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import java.io.IOException; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -/** - * Context object that provides access to BucketEndpoint resources. - * This allows handlers to access endpoint functionality without - * tight coupling to the BucketEndpoint class. - * - * Since BucketEndpoint extends EndpointBase, handlers can access: - * - Bucket and Volume operations - * - Methods inherited from EndpointBase - */ -public class BucketEndpointContext { - - private final BucketEndpoint endpoint; - - public BucketEndpointContext(BucketEndpoint endpoint) { - this.endpoint = endpoint; - } - - /** - * Get the bucket object. - * Delegates to BucketEndpoint's inherited getBucket() from EndpointBase. - * - * @param bucketName the bucket name - * @return OzoneBucket instance - * @throws IOException if bucket cannot be retrieved - * @throws OS3Exception if S3-specific error occurs - */ - public OzoneBucket getBucket(String bucketName) - throws IOException, OS3Exception { - return endpoint.getBucket(bucketName); - } - - /** - * Get the volume object. - * Delegates to BucketEndpoint's inherited getVolume() from EndpointBase. - * - * @return OzoneVolume instance - * @throws IOException if volume cannot be retrieved - * @throws OS3Exception if S3-specific error occurs - */ - public OzoneVolume getVolume() throws IOException, OS3Exception { - return endpoint.getVolume(); - } - - /** - * Check if an exception indicates access denied. - * This checks for OMException.ResultCodes that indicate permission issues. - * - * @param ex the exception to check - * @return true if access is denied - */ - public boolean isAccessDenied(Exception ex) { - // Check if it's an OMException with ACCESS_DENIED result code - if (ex instanceof OMException) { - OMException omEx = (OMException) ex; - return omEx.getResult() == OMException.ResultCodes.PERMISSION_DENIED || - omEx.getResult() == OMException.ResultCodes.ACCESS_DENIED; - } - return false; - } - - /** - * Audit a write operation failure. - * Delegates to BucketEndpoint's inherited auditWriteFailure() from EndpointBase. - * - * @param action the audit action being performed - * @param ex the exception that occurred - */ - public void auditWriteFailure(AuditAction action, Throwable ex) { - endpoint.auditWriteFailure(action, ex); - } - - /** - * Get reference to the endpoint for accessing other methods. - * Use with caution - prefer adding specific methods to this context - * rather than exposing the entire endpoint. - * - * @return BucketEndpoint instance - */ - protected BucketEndpoint getEndpoint() { - return endpoint; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java deleted file mode 100644 index 1edb19165ded..000000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandlerFactory.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import com.google.common.annotations.VisibleForTesting; -import java.util.HashMap; -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory class that manages all bucket operation handlers. - * Provides a registry pattern for looking up handlers based on query parameters. - */ -public class BucketOperationHandlerFactory { - - private static final Logger LOG = - LoggerFactory.getLogger(BucketOperationHandlerFactory.class); - - private final Map handlers = new HashMap<>(); - - /** - * Register all available bucket operation handlers. - */ - public BucketOperationHandlerFactory() { - registerDefaultHandlers(); - } - - /** - * Register default handlers for S3 bucket operations. - */ - private void registerDefaultHandlers() { - register(new AclHandler()); - } - - /** - * Register a bucket operation handler. - * - * @param handler the handler to register - */ - @VisibleForTesting - public void register(BucketOperationHandler handler) { - String queryParam = handler.getQueryParamName(); - if (handlers.containsKey(queryParam)) { - LOG.warn("Overwriting existing handler for query parameter: {}", - queryParam); - } - handlers.put(queryParam, handler); - LOG.debug("Registered handler for query parameter: {}", queryParam); - } - - /** - * Get a handler for the specified query parameter. - * - * @param queryParam the query parameter name - * @return the corresponding handler, or null if not found - */ - public BucketOperationHandler getHandler(String queryParam) { - return handlers.get(queryParam); - } - - /** - * Check if a handler exists for the specified query parameter. - * - * @param queryParam the query parameter name - * @return true if a handler exists - */ - public boolean hasHandler(String queryParam) { - return handlers.containsKey(queryParam); - } - - /** - * Find the first supported query parameter that has a non-null value. - * - * This method iterates through all registered handlers and checks if the - * corresponding query parameter has a non-null value in the provided map. - * - * @param queryParams map of query parameter names to their values - * @return the name of the first query parameter that has both a non-null value - * and a registered handler, or null if none found - */ - public String findFirstSupportedQueryParam(Map queryParams) { - if (queryParams == null || queryParams.isEmpty()) { - return null; - } - - // Iterate through registered handlers and find the first one with a value - for (Map.Entry entry : handlers.entrySet()) { - String paramName = entry.getKey(); - String paramValue = queryParams.get(paramName); - - if (paramValue != null) { - return paramName; - } - } - - return null; - } - - /** - * Get all registered query parameter names. - * - * @return set of query parameter names - */ - @VisibleForTesting - public java.util.Set getRegisteredQueryParams() { - return handlers.keySet(); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java deleted file mode 100644 index 7d27ae23d664..000000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketEndpointContext.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; - -import java.io.IOException; -import javax.ws.rs.core.HttpHeaders; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -/** - * Test class for BucketEndpointContext. - */ -public class TestBucketEndpointContext { - - private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; - private OzoneClient client; - private BucketEndpointContext context; - - @BeforeEach - public void setup() throws IOException { - client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket(BUCKET_NAME); - - HttpHeaders headers = mock(HttpHeaders.class); - - BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() - .setClient(client) - .setHeaders(headers) - .build(); - - context = new BucketEndpointContext(bucketEndpoint); - } - - @AfterEach - public void clean() throws IOException { - if (client != null) { - client.close(); - } - } - - @Test - public void testGetBucket() throws IOException, OS3Exception { - OzoneBucket bucket = context.getBucket(BUCKET_NAME); - assertNotNull(bucket, "Bucket should not be null"); - assertEquals(BUCKET_NAME, bucket.getName(), - "Bucket name should match"); - } - - @Test - public void testGetBucketNotFound() { - assertThrows(OS3Exception.class, () -> { - context.getBucket("nonexistent-bucket"); - }, "Should throw OS3Exception for non-existent bucket"); - } - - @Test - public void testGetVolume() throws IOException, OS3Exception { - OzoneVolume volume = context.getVolume(); - assertNotNull(volume, "Volume should not be null"); - } - - @Test - public void testIsAccessDeniedWithPermissionDenied() { - OMException exception = new OMException("Access denied", - OMException.ResultCodes.PERMISSION_DENIED); - - assertTrue(context.isAccessDenied(exception), - "Should return true for PERMISSION_DENIED"); - } - - @Test - public void testIsAccessDeniedWithAccessDenied() { - OMException exception = new OMException("Access denied", - OMException.ResultCodes.ACCESS_DENIED); - - assertTrue(context.isAccessDenied(exception), - "Should return true for ACCESS_DENIED"); - } - - @Test - public void testIsAccessDeniedWithBucketNotFound() { - OMException exception = new OMException("Bucket not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); - - assertFalse(context.isAccessDenied(exception), - "Should return false for BUCKET_NOT_FOUND"); - } - - @Test - public void testIsAccessDeniedWithKeyNotFound() { - OMException exception = new OMException("Key not found", - OMException.ResultCodes.KEY_NOT_FOUND); - - assertFalse(context.isAccessDenied(exception), - "Should return false for KEY_NOT_FOUND"); - } - - @Test - public void testIsAccessDeniedWithIOException() { - IOException exception = new IOException("I/O error"); - - assertFalse(context.isAccessDenied(exception), - "Should return false for non-OMException"); - } - - @Test - public void testIsAccessDeniedWithNullException() { - assertFalse(context.isAccessDenied(null), - "Should return false for null exception"); - } - - @Test - public void testIsAccessDeniedWithRuntimeException() { - RuntimeException exception = new RuntimeException("Runtime error"); - - assertFalse(context.isAccessDenied(exception), - "Should return false for RuntimeException"); - } - - @Test - public void testGetEndpoint() { - BucketEndpoint endpoint = context.getEndpoint(); - assertNotNull(endpoint, "Endpoint should not be null"); - } - - @Test - public void testContextDelegatesCorrectly() throws IOException, OS3Exception { - // Test that context properly delegates to endpoint methods - OzoneBucket bucket = context.getBucket(BUCKET_NAME); - OzoneVolume volume = context.getVolume(); - - assertNotNull(bucket, "Delegated getBucket should work"); - assertNotNull(volume, "Delegated getVolume should work"); - } - - @Test - public void testIsAccessDeniedWithMultipleResultCodes() { - // Test all OMException result codes to ensure only access-related ones - // return true - - OMException[] accessDeniedExceptions = { - new OMException("", OMException.ResultCodes.PERMISSION_DENIED), - new OMException("", OMException.ResultCodes.ACCESS_DENIED) - }; - - for (OMException ex : accessDeniedExceptions) { - assertTrue(context.isAccessDenied(ex), - "Should return true for " + ex.getResult()); - } - - OMException[] otherExceptions = { - new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND), - new OMException("", OMException.ResultCodes.KEY_NOT_FOUND), - new OMException("", OMException.ResultCodes.VOLUME_NOT_FOUND), - new OMException("", OMException.ResultCodes.INTERNAL_ERROR) - }; - - for (OMException ex : otherExceptions) { - assertFalse(context.isAccessDenied(ex), - "Should return false for " + ex.getResult()); - } - } - - @Test - public void testBucketOperationsWithContext() throws Exception { - // Create a second bucket to test multiple operations - String secondBucket = "test-bucket-2"; - client.getObjectStore().createS3Bucket(secondBucket); - - // Test getting different buckets through context - OzoneBucket bucket1 = context.getBucket(BUCKET_NAME); - OzoneBucket bucket2 = context.getBucket(secondBucket); - - assertNotNull(bucket1, "First bucket should not be null"); - assertNotNull(bucket2, "Second bucket should not be null"); - assertEquals(BUCKET_NAME, bucket1.getName(), - "First bucket name should match"); - assertEquals(secondBucket, bucket2.getName(), - "Second bucket name should match"); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java deleted file mode 100644 index 1aeb8dc85fb4..000000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketOperationHandlerFactory.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.io.IOException; -import java.io.InputStream; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -/** - * Test class for BucketOperationHandlerFactory. - */ -public class TestBucketOperationHandlerFactory { - - private BucketOperationHandlerFactory factory; - - @BeforeEach - public void setup() { - factory = new BucketOperationHandlerFactory(); - } - - @Test - public void testDefaultHandlersRegistered() { - // Verify that the default ACL handler is registered - assertTrue(factory.hasHandler("acl"), - "ACL handler should be registered by default"); - assertNotNull(factory.getHandler("acl"), - "ACL handler should not be null"); - } - - @Test - public void testGetHandlerForAcl() { - BucketOperationHandler handler = factory.getHandler("acl"); - assertNotNull(handler, "ACL handler should exist"); - assertTrue(handler instanceof AclHandler, - "Handler should be an instance of AclHandler"); - assertEquals("acl", handler.getQueryParamName(), - "Handler query param name should be 'acl'"); - } - - @Test - public void testGetHandlerForNonExistentParam() { - BucketOperationHandler handler = factory.getHandler("nonexistent"); - assertNull(handler, "Handler for non-existent param should be null"); - } - - @Test - public void testHasHandlerReturnsTrueForExisting() { - assertTrue(factory.hasHandler("acl"), - "Should return true for existing handler"); - } - - @Test - public void testHasHandlerReturnsFalseForNonExisting() { - assertFalse(factory.hasHandler("nonexistent"), - "Should return false for non-existing handler"); - } - - @Test - public void testRegisterNewHandler() { - // Create a mock handler - BucketOperationHandler mockHandler = new MockBucketOperationHandler("test"); - - // Register the handler - factory.register(mockHandler); - - // Verify registration - assertTrue(factory.hasHandler("test"), - "Newly registered handler should exist"); - assertEquals(mockHandler, factory.getHandler("test"), - "Retrieved handler should be the same instance"); - } - - @Test - public void testRegisterOverwritesExistingHandler() { - // Register a new handler with the same query param as ACL - BucketOperationHandler mockHandler = new MockBucketOperationHandler("acl"); - - factory.register(mockHandler); - - // Verify the handler was overwritten - BucketOperationHandler handler = factory.getHandler("acl"); - assertEquals(mockHandler, handler, - "Handler should be the newly registered one"); - assertTrue(handler instanceof MockBucketOperationHandler, - "Handler should be an instance of MockBucketOperationHandler"); - } - - @Test - public void testGetRegisteredQueryParams() { - // Default should have at least "acl" - assertTrue(factory.getRegisteredQueryParams().contains("acl"), - "Registered query params should contain 'acl'"); - - // Register additional handlers - factory.register(new MockBucketOperationHandler("lifecycle")); - factory.register(new MockBucketOperationHandler("notification")); - - // Verify all are present - assertEquals(3, factory.getRegisteredQueryParams().size(), - "Should have 3 registered handlers"); - assertTrue(factory.getRegisteredQueryParams().contains("lifecycle"), - "Should contain 'lifecycle'"); - assertTrue(factory.getRegisteredQueryParams().contains("notification"), - "Should contain 'notification'"); - } - - @Test - public void testMultipleHandlerRegistration() { - BucketOperationHandler handler1 = new MockBucketOperationHandler("test1"); - BucketOperationHandler handler2 = new MockBucketOperationHandler("test2"); - BucketOperationHandler handler3 = new MockBucketOperationHandler("test3"); - - factory.register(handler1); - factory.register(handler2); - factory.register(handler3); - - assertTrue(factory.hasHandler("test1"), "Handler test1 should exist"); - assertTrue(factory.hasHandler("test2"), "Handler test2 should exist"); - assertTrue(factory.hasHandler("test3"), "Handler test3 should exist"); - - assertEquals(handler1, factory.getHandler("test1")); - assertEquals(handler2, factory.getHandler("test2")); - assertEquals(handler3, factory.getHandler("test3")); - } - - /** - * Mock implementation of BucketOperationHandler for testing. - */ - private static class MockBucketOperationHandler implements BucketOperationHandler { - private final String queryParamName; - - MockBucketOperationHandler(String queryParamName) { - this.queryParamName = queryParamName; - } - - @Override - public Response handlePutRequest(String bucketName, InputStream body, - HttpHeaders headers, - BucketEndpointContext context, - long startNanos) - throws IOException, OS3Exception { - return Response.ok().build(); - } - - @Override - public String getQueryParamName() { - return queryParamName; - } - } -} From eaa5c43065015850cfcd7bd77d9f86392651cad4 Mon Sep 17 00:00:00 2001 From: echonesis Date: Wed, 24 Dec 2025 23:26:38 +0800 Subject: [PATCH 35/36] fix: check files --- .../ozone/s3/endpoint/BucketEndpoint.java | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 8312d25f5785..d7a583323987 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -87,12 +87,6 @@ public class BucketEndpoint extends EndpointBase { private static final Logger LOG = LoggerFactory.getLogger(BucketEndpoint.class); - private static final BucketOperationHandlerFactory HANDLER_FACTORY = - new BucketOperationHandlerFactory(); - - @Context - private HttpHeaders headers; - private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; @@ -351,18 +345,6 @@ private Response handleCreateBucket(String bucketName) } } - /** - * Map query parameter to corresponding S3GAction for audit logging. - */ - private S3GAction getActionForQueryParam(String queryParam) { - switch (queryParam) { - case "acl": - return S3GAction.PUT_ACL; - default: - return S3GAction.GET_BUCKET; - } - } - public Response listMultipartUploads( String bucketName, String prefix, From 313776be56bd22d4b48f631f1f921127adfb0484 Mon Sep 17 00:00:00 2001 From: echonesis Date: Tue, 6 Jan 2026 23:06:19 +0800 Subject: [PATCH 36/36] fix: CR update --- ...{AclHandler.java => BucketAclHandler.java} | 44 ++--- .../ozone/s3/endpoint/BucketEndpoint.java | 10 +- .../s3/endpoint/BucketOperationHandler.java | 6 +- .../ozone/s3/endpoint/EndpointBuilder.java | 4 +- ...Handler.java => TestBucketAclHandler.java} | 183 +++++------------- 5 files changed, 76 insertions(+), 171 deletions(-) rename hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/{AclHandler.java => BucketAclHandler.java} (88%) rename hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/{TestAclHandler.java => TestBucketAclHandler.java} (53%) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java similarity index 88% rename from hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java index 1252df4db5e8..eb91935016a4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/AclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.s3.endpoint; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import java.io.IOException; import java.io.InputStream; @@ -52,9 +54,9 @@ * This handler extends EndpointBase to inherit all required functionality * (configuration, headers, request context, audit logging, metrics, etc.). */ -public class AclHandler extends EndpointBase implements BucketOperationHandler { +public class BucketAclHandler extends EndpointBase implements BucketOperationHandler { - private static final Logger LOG = LoggerFactory.getLogger(AclHandler.class); + private static final Logger LOG = LoggerFactory.getLogger(BucketAclHandler.class); /** * Determine if this handler should handle the current request. @@ -188,18 +190,16 @@ public Response handlePutRequest(String bucketName, InputStream body) * * Example: x-amz-grant-write: id="111122223333", id="555566667777" */ - private List getAndConvertAclOnBucket(String value, - String permission) - throws OS3Exception { + private List getAndConvertAclOnBucket( + String value, String permission) throws OS3Exception { return parseAndConvertAcl(value, permission, true); } /** * Convert ACL string to Ozone ACL on volume. */ - private List getAndConvertAclOnVolume(String value, - String permission) - throws OS3Exception { + private List getAndConvertAclOnVolume( + String value, String permission) throws OS3Exception { return parseAndConvertAcl(value, permission, false); } @@ -215,9 +215,8 @@ private List getAndConvertAclOnVolume(String value, * @return list of OzoneAcl objects * @throws OS3Exception if parsing fails or grantee type is not supported */ - private List parseAndConvertAcl(String value, String permission, - boolean isBucket) - throws OS3Exception { + private List parseAndConvertAcl( + String value, String permission, boolean isBucket) throws OS3Exception { List ozoneAclList = new ArrayList<>(); if (StringUtils.isEmpty(value)) { return ozoneAclList; @@ -243,28 +242,13 @@ private List parseAndConvertAcl(String value, String permission, // Build ACL on Bucket EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission); - ozoneAclList.add(OzoneAcl.of( - IAccessAuthorizer.ACLIdentityType.USER, - userId, - OzoneAcl.AclScope.DEFAULT, - aclsOnBucket - )); - ozoneAclList.add(OzoneAcl.of( - IAccessAuthorizer.ACLIdentityType.USER, - userId, - ACCESS, - aclsOnBucket - )); + ozoneAclList.add(OzoneAcl.of(USER, userId, DEFAULT, aclsOnBucket)); + ozoneAclList.add(OzoneAcl.of(USER, userId, ACCESS, aclsOnBucket)); } else { // Build ACL on Volume EnumSet aclsOnVolume = S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); - ozoneAclList.add(OzoneAcl.of( - IAccessAuthorizer.ACLIdentityType.USER, - userId, - ACCESS, - aclsOnVolume - )); + ozoneAclList.add(OzoneAcl.of(USER, userId, ACCESS, aclsOnVolume)); } } @@ -274,6 +258,6 @@ private List parseAndConvertAcl(String value, String permission, @Override @PostConstruct public void init() { - // No initialization needed for AclHandler + // No initialization needed for BucketAclHandler } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index d7a583323987..93b525186df3 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -90,7 +90,7 @@ public class BucketEndpoint extends EndpointBase { private boolean listKeysShallowEnabled; private int maxKeysLimit = 1000; - private List putHandlers; + private List handlers; /** * Rest endpoint to list objects in a specific bucket. @@ -307,7 +307,7 @@ public Response put( ) throws IOException, OS3Exception { // Chain of responsibility: let each handler try to handle the request - for (BucketOperationHandler handler : putHandlers) { + for (BucketOperationHandler handler : handlers) { Response response = handler.handlePutRequest(bucketName, body); if (response != null) { return response; // Handler handled the request @@ -607,11 +607,11 @@ public void init() { OZONE_S3G_LIST_MAX_KEYS_LIMIT_DEFAULT); // Initialize PUT handlers - AclHandler aclHandler = new AclHandler(); + BucketAclHandler aclHandler = new BucketAclHandler(); copyDependenciesTo(aclHandler); aclHandler.initialization(); - putHandlers = new ArrayList<>(); - putHandlers.add(aclHandler); + handlers = new ArrayList<>(); + handlers.add(aclHandler); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java index 31d771750bae..745353a99f7a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketOperationHandler.java @@ -43,6 +43,8 @@ public interface BucketOperationHandler { * @throws IOException if an I/O error occurs * @throws OS3Exception if an S3-specific error occurs */ - Response handlePutRequest(String bucketName, InputStream body) - throws IOException, OS3Exception; + default Response handlePutRequest(String bucketName, InputStream body) + throws IOException, OS3Exception { + return null; + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java index 07132839a6fb..1323c13fc0ca 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBuilder.java @@ -150,8 +150,8 @@ public static EndpointBuilder newBucketEndpointBuilder() { return new EndpointBuilder<>(BucketEndpoint::new); } - public static EndpointBuilder newAclHandlerBuilder() { - return new EndpointBuilder<>(AclHandler::new); + public static EndpointBuilder newBucketAclHandlerBuilder() { + return new EndpointBuilder<>(BucketAclHandler::new); } public static EndpointBuilder newObjectEndpointBuilder() { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java similarity index 53% rename from hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java rename to hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java index f59c2d80e3f5..1cf37c6b5e3a 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAclHandler.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java @@ -17,9 +17,9 @@ package org.apache.hadoop.ozone.s3.endpoint; -import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertErrorResponse; +import static org.apache.hadoop.ozone.s3.endpoint.EndpointTestUtils.assertSucceeds; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -35,6 +35,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.stream.Stream; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.OzoneConsts; @@ -45,15 +46,17 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; /** - * Test class for AclHandler. + * Test class for BucketAclHandler. */ -public class TestAclHandler { +public class TestBucketAclHandler { private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; private OzoneClient client; - private AclHandler aclHandler; + private BucketAclHandler aclHandler; private HttpHeaders headers; @BeforeEach @@ -63,11 +66,14 @@ public void setup() throws IOException { headers = mock(HttpHeaders.class); - // Build AclHandler using EndpointBuilder since it extends EndpointBase - aclHandler = EndpointBuilder.newAclHandlerBuilder() + // Build BucketAclHandler using EndpointBuilder since it extends EndpointBase + aclHandler = EndpointBuilder.newBucketAclHandlerBuilder() .setClient(client) .setHeaders(headers) .build(); + + // Set up query parameter for ACL operation (default for most tests) + aclHandler.queryParamsForTest().set("acl", ""); } @AfterEach @@ -79,21 +85,17 @@ public void clean() throws IOException { @Test public void testHandlePutRequestWithAclQueryParam() throws Exception { - // Set up query parameter to indicate ACL operation - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertNotNull(response, "Handler should handle request with ?acl param"); - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); + assertNotNull(aclHandler.handlePutRequest(BUCKET_NAME, null), + "Handler should handle request with ?acl param"); } @Test public void testHandlePutRequestWithoutAclQueryParam() throws Exception { - // No "acl" query parameter - handler should not handle request + // Remove "acl" query parameter - handler should not handle request + aclHandler.queryParamsForTest().unset("acl"); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); @@ -102,122 +104,65 @@ public void testHandlePutRequestWithoutAclQueryParam() throws Exception { assertNull(response, "Handler should return null without ?acl param"); } - @Test - public void testHandlePutRequestWithReadHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - when(headers.getHeaderString(S3Acl.GRANT_READ)) - .thenReturn("id=\"testuser\""); - - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); - } - - @Test - public void testHandlePutRequestWithWriteHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - when(headers.getHeaderString(S3Acl.GRANT_WRITE)) - .thenReturn("id=\"testuser\""); - - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); - } - - @Test - public void testHandlePutRequestWithReadAcpHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - when(headers.getHeaderString(S3Acl.GRANT_READ_ACP)) - .thenReturn("id=\"testuser\""); - - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); - } - - @Test - public void testHandlePutRequestWithWriteAcpHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - when(headers.getHeaderString(S3Acl.GRANT_WRITE_ACP)) - .thenReturn("id=\"testuser\""); - - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); + private static Stream grantHeaderNames() { + return Stream.of( + S3Acl.GRANT_READ, + S3Acl.GRANT_WRITE, + S3Acl.GRANT_READ_ACP, + S3Acl.GRANT_WRITE_ACP, + S3Acl.GRANT_FULL_CONTROL + ); } - @Test - public void testHandlePutRequestWithFullControlHeader() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - when(headers.getHeaderString(S3Acl.GRANT_FULL_CONTROL)) + @ParameterizedTest + @MethodSource("grantHeaderNames") + public void testHandlePutRequestWithGrantHeaders(String headerName) throws Exception { + when(headers.getHeaderString(headerName)) .thenReturn("id=\"testuser\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should return 200 OK"); + assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null)); } @Test public void testHandlePutRequestWithMultipleHeaders() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser1\""); when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"testuser2\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL with multiple headers should return 200 OK"); + assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null)); } @Test public void testHandlePutRequestWithUnsupportedGranteeType() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("uri=\"http://example.com\""); - OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null); - }, "Should throw OS3Exception for unsupported grantee type"); - - assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), - "Should return NOT_IMPLEMENTED for unsupported grantee type"); + assertErrorResponse(NOT_IMPLEMENTED, + () -> aclHandler.handlePutRequest(BUCKET_NAME, null)); } @Test public void testHandlePutRequestWithEmailAddressType() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("emailAddress=\"test@example.com\""); - OS3Exception exception = assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null); - }, "Should throw OS3Exception for email address grantee type"); - - assertEquals(HTTP_NOT_IMPLEMENTED, exception.getHttpCode(), - "Should return NOT_IMPLEMENTED for email address grantee type"); + assertErrorResponse(NOT_IMPLEMENTED, + () -> aclHandler.handlePutRequest(BUCKET_NAME, null)); } @Test public void testHandlePutRequestBucketNotFound() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); - assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest("nonexistent-bucket", null); - }, "Should throw OS3Exception for non-existent bucket"); + assertThrows(OS3Exception.class, + () -> aclHandler.handlePutRequest("nonexistent-bucket", null), + "Should throw OS3Exception for non-existent bucket"); } @Test public void testHandlePutRequestWithBody() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); String aclXml = "\n" + "\n" + " \n" + @@ -238,39 +183,29 @@ public void testHandlePutRequestWithBody() throws Exception { InputStream body = new ByteArrayInputStream( aclXml.getBytes(StandardCharsets.UTF_8)); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, body); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL with body should return 200 OK"); + assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, body)); } @Test public void testHandlePutRequestWithInvalidHeaderFormat() { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); - assertThrows(OS3Exception.class, () -> { - aclHandler.handlePutRequest(BUCKET_NAME, null); - }, "Should throw OS3Exception for invalid header format"); + assertThrows(OS3Exception.class, + () -> aclHandler.handlePutRequest(BUCKET_NAME, null), + "Should throw OS3Exception for invalid header format"); } @Test public void testHandlePutRequestWithMultipleGrantees() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\",id=\"user2\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL with multiple grantees should return 200 OK"); + assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null)); } @Test public void testPutAclReplacesExistingAcls() throws Exception { - aclHandler.queryParamsForTest().set("acl", ""); - // Set initial ACL when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"user1\""); @@ -285,28 +220,19 @@ public void testPutAclReplacesExistingAcls() throws Exception { when(headers.getHeaderString(S3Acl.GRANT_WRITE)) .thenReturn("id=\"user2\""); - Response response = aclHandler.handlePutRequest(BUCKET_NAME, null); - - assertEquals(HTTP_OK, response.getStatus(), - "PUT ACL should replace existing ACLs"); + assertSucceeds(() -> aclHandler.handlePutRequest(BUCKET_NAME, null)); } @Test public void testAuditLoggingOnBucketNotFound() throws Exception { - // Create a spy of AclHandler to verify audit logging - AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() - .setClient(client) - .setHeaders(headers) - .build()); + BucketAclHandler spyHandler = spy(aclHandler); - spyHandler.queryParamsForTest().set("acl", ""); when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("id=\"testuser\""); // This should throw exception for non-existent bucket - assertThrows(OS3Exception.class, () -> { - spyHandler.handlePutRequest("nonexistent-bucket", null); - }); + assertThrows(OS3Exception.class, + () -> spyHandler.handlePutRequest("nonexistent-bucket", null)); // Verify that auditWriteFailure was called with PUT_ACL action verify(spyHandler, times(1)).auditWriteFailure( @@ -316,21 +242,14 @@ public void testAuditLoggingOnBucketNotFound() throws Exception { @Test public void testAuditLoggingOnInvalidArgument() throws Exception { - // Create a spy of AclHandler to verify audit logging - AclHandler spyHandler = spy(EndpointBuilder.newAclHandlerBuilder() - .setClient(client) - .setHeaders(headers) - .build()); - - spyHandler.queryParamsForTest().set("acl", ""); + BucketAclHandler spyHandler = spy(aclHandler); // Invalid format will trigger OS3Exception when(headers.getHeaderString(S3Acl.GRANT_READ)) .thenReturn("invalid-format"); - assertThrows(OS3Exception.class, () -> { - spyHandler.handlePutRequest(BUCKET_NAME, null); - }); + assertThrows(OS3Exception.class, + () -> spyHandler.handlePutRequest(BUCKET_NAME, null)); // Verify that auditWriteFailure was called with PUT_ACL action verify(spyHandler, times(1)).auditWriteFailure(