Skip to content

HADOOP-18975. AWS SDK v2: extend support for FIPS endpoints #6277

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1335,6 +1335,15 @@ private Constants() {
*/
public static final String AWS_S3_DEFAULT_REGION = "us-east-2";

/**
* Is the endpoint a FIPS endpoint?
* Can be queried as a path capability.
* Value {@value}.
*/
public static final String FIPS_ENDPOINT = "fs.s3a.endpoint.fips";

public static final boolean ENDPOINT_FIPS_DEFAULT = false;

/**
* Require that all S3 access is made through Access Points.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import java.net.URI;
import java.net.URISyntaxException;

import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.fs.s3a.impl.AWSClientConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -54,6 +55,7 @@
import static org.apache.hadoop.fs.s3a.Constants.AWS_REGION;
import static org.apache.hadoop.fs.s3a.Constants.AWS_S3_DEFAULT_REGION;
import static org.apache.hadoop.fs.s3a.Constants.CENTRAL_ENDPOINT;
import static org.apache.hadoop.fs.s3a.Constants.FIPS_ENDPOINT;
import static org.apache.hadoop.fs.s3a.Constants.HTTP_SIGNER_CLASS_NAME;
import static org.apache.hadoop.fs.s3a.Constants.HTTP_SIGNER_ENABLED;
import static org.apache.hadoop.fs.s3a.Constants.HTTP_SIGNER_ENABLED_DEFAULT;
Expand All @@ -63,6 +65,7 @@
import static org.apache.hadoop.fs.s3a.auth.SignerFactory.createHttpSigner;
import static org.apache.hadoop.fs.s3a.impl.AWSHeaders.REQUESTER_PAYS_HEADER;
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.AUTH_SCHEME_AWS_SIGV_4;
import static org.apache.hadoop.util.Preconditions.checkArgument;


/**
Expand Down Expand Up @@ -102,6 +105,13 @@ public class DefaultS3ClientFactory extends Configured
/** Exactly once log to inform about ignoring the AWS-SDK Warnings for CSE. */
private static final LogExactlyOnce IGNORE_CSE_WARN = new LogExactlyOnce(LOG);

/**
* Error message when an endpoint is set with FIPS enabled: {@value}.
*/
@VisibleForTesting
public static final String ERROR_ENDPOINT_WITH_FIPS =
"An endpoint cannot set when " + FIPS_ENDPOINT + " is true";

@Override
public S3Client createS3Client(
final URI uri,
Expand Down Expand Up @@ -248,6 +258,7 @@ protected ClientOverrideConfiguration createClientOverrideConfiguration(
* @param conf conf configuration object
* @param <BuilderT> S3 client builder type
* @param <ClientT> S3 client type
* @throws IllegalArgumentException if endpoint is set when FIPS is enabled.
*/
private <BuilderT extends S3BaseClientBuilder<BuilderT, ClientT>, ClientT> void configureEndpointAndRegion(
BuilderT builder, S3ClientCreationParameters parameters, Configuration conf) {
Expand All @@ -263,7 +274,18 @@ private <BuilderT extends S3BaseClientBuilder<BuilderT, ClientT>, ClientT> void
region = Region.of(configuredRegion);
}

// FIPs? Log it, then reject any attempt to set an endpoint
final boolean fipsEnabled = parameters.isFipsEnabled();
if (fipsEnabled) {
LOG.debug("Enabling FIPS mode");
}
// always setting it guarantees the value is non-null,
// which tests expect.
builder.fipsEnabled(fipsEnabled);

if (endpoint != null) {
checkArgument(!fipsEnabled,
"%s : %s", ERROR_ENDPOINT_WITH_FIPS, endpoint);
builder.endpointOverride(endpoint);
// No region was configured, try to determine it from the endpoint.
if (region == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
*/
private boolean isMultipartCopyEnabled;

/**
* Is FIPS enabled?
*/
private boolean fipsEnabled;

/**
* A cache of files that should be deleted when the FileSystem is closed
* or the JVM is exited.
Expand Down Expand Up @@ -614,6 +619,8 @@ public void initialize(URI name, Configuration originalConf)
? conf.getTrimmed(AWS_REGION)
: accessPoint.getRegion();

fipsEnabled = conf.getBoolean(FIPS_ENDPOINT, ENDPOINT_FIPS_DEFAULT);

// is this an S3Express store?
s3ExpressStore = isS3ExpressStore(bucket, endpoint);

Expand Down Expand Up @@ -1046,6 +1053,7 @@ private void bindAWSClient(URI name, boolean dtEnabled) throws IOException {
.withMultipartThreshold(multiPartThreshold)
.withTransferManagerExecutor(unboundedThreadPool)
.withRegion(configuredRegion)
.withFipsEnabled(fipsEnabled)
.withExpressCreateSession(
conf.getBoolean(S3EXPRESS_CREATE_SESSION, S3EXPRESS_CREATE_SESSION_DEFAULT));

Expand Down Expand Up @@ -5521,6 +5529,10 @@ public boolean hasPathCapability(final Path path, final String capability)
case OPTIMIZED_COPY_FROM_LOCAL:
return optimizedCopyFromLocal;

// probe for a fips endpoint
case FIPS_ENDPOINT:
return fipsEnabled;

default:
return super.hasPathCapability(p, cap);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,11 @@ final class S3ClientCreationParameters {
*/
private boolean expressCreateSession = S3EXPRESS_CREATE_SESSION_DEFAULT;

/**
* Is FIPS enabled?
*/
private boolean fipsEnabled;

/**
* List of execution interceptors to include in the chain
* of interceptors in the SDK.
Expand Down Expand Up @@ -461,5 +466,23 @@ public String toString() {
", expressCreateSession=" + expressCreateSession +
'}';
}

/**
* Get the FIPS flag.
* @return is fips enabled
*/
public boolean isFipsEnabled() {
return fipsEnabled;
}

/**
* Set builder value.
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withFipsEnabled(final boolean value) {
fipsEnabled = value;
return this;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_STANDARD_OPTIONS;
import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_OPERATIONS_PURGE_UPLOADS;
import static org.apache.hadoop.fs.s3a.Constants.ENABLE_MULTI_DELETE;
import static org.apache.hadoop.fs.s3a.Constants.FIPS_ENDPOINT;
import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE;
import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE_ENABLED;
import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_AWS_V2;
Expand Down Expand Up @@ -272,6 +273,7 @@ private InternalConstants() {
FS_CHECKSUMS,
FS_MULTIPART_UPLOADER,
DIRECTORY_LISTING_INCONSISTENT,
FIPS_ENDPOINT,

// s3 specific
STORE_CAPABILITY_AWS_V2,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -357,21 +357,19 @@ public static class BucketInfo extends S3GuardTool {
public static final String NAME = BUCKET_INFO;
public static final String GUARDED_FLAG = "guarded";
public static final String UNGUARDED_FLAG = "unguarded";
public static final String AUTH_FLAG = "auth";
public static final String NONAUTH_FLAG = "nonauth";
public static final String ENCRYPTION_FLAG = "encryption";
public static final String MAGIC_FLAG = "magic";
public static final String MARKERS_FLAG = "markers";
public static final String MARKERS_AWARE = "aware";
public static final String FIPS_FLAG = "fips";

public static final String PURPOSE = "provide/check information"
+ " about a specific bucket";

private static final String USAGE = NAME + " [OPTIONS] s3a://BUCKET\n"
+ "\t" + PURPOSE + "\n\n"
+ "Common options:\n"
+ " -" + AUTH_FLAG + " - Require the S3Guard mode to be \"authoritative\"\n"
+ " -" + NONAUTH_FLAG + " - Require the S3Guard mode to be \"non-authoritative\"\n"
+ " -" + FIPS_FLAG + " - Require the client is using a FIPS endpoint\n"
+ " -" + MAGIC_FLAG +
" - Require the S3 filesystem to be support the \"magic\" committer\n"
+ " -" + ENCRYPTION_FLAG
Expand All @@ -395,7 +393,7 @@ public static class BucketInfo extends S3GuardTool {
+ " directory markers are not deleted";

public BucketInfo(Configuration conf) {
super(conf, GUARDED_FLAG, UNGUARDED_FLAG, AUTH_FLAG, NONAUTH_FLAG, MAGIC_FLAG);
super(conf, GUARDED_FLAG, UNGUARDED_FLAG, FIPS_FLAG, MAGIC_FLAG);
CommandFormat format = getCommandFormat();
format.addOptionWithValue(ENCRYPTION_FLAG);
format.addOptionWithValue(MARKERS_FLAG);
Expand Down Expand Up @@ -462,6 +460,10 @@ public int run(String[] args, PrintStream out)
println(out, "\tEndpoint: %s=%s",
ENDPOINT,
StringUtils.isNotEmpty(endpoint) ? endpoint : "(unset)");
String region = conf.getTrimmed(AWS_REGION, "");
println(out, "\tRegion: %s=%s", AWS_REGION,
StringUtils.isNotEmpty(region) ? region : "(unset)");

String encryption =
printOption(out, "\tEncryption", Constants.S3_ENCRYPTION_ALGORITHM,
"none");
Expand All @@ -487,12 +489,12 @@ public int run(String[] args, PrintStream out)
FS_S3A_COMMITTER_NAME, COMMITTER_NAME_FILE);
switch (committer) {
case COMMITTER_NAME_FILE:
println(out, "The original 'file' commmitter is active"
println(out, "The original 'file' committer is active"
+ " -this is slow and potentially unsafe");
break;
case InternalCommitterConstants.COMMITTER_NAME_STAGING:
println(out, "The 'staging' committer is used "
+ "-prefer the 'directory' committer");
+ "-prefer the 'magic' committer");
// fall through
case COMMITTER_NAME_DIRECTORY:
// fall through
Expand Down Expand Up @@ -555,13 +557,17 @@ public int run(String[] args, PrintStream out)
processMarkerOption(out, fs,
getCommandFormat().getOptValue(MARKERS_FLAG));

// and check for capabilitities
// and check for capabilities
println(out, "%nStore Capabilities");
for (String capability : S3A_DYNAMIC_CAPABILITIES) {
out.printf("\t%s %s%n", capability,
fs.hasPathCapability(root, capability));
}
println(out, "");

if (commands.getOpt(FIPS_FLAG) && !fs.hasPathCapability(root, FIPS_ENDPOINT)) {
throw badState("FIPS endpoint was required but the filesystem is not using it");
}
// and finally flush the output and report a success.
out.flush();
return SUCCESS;
Expand Down
Loading