Skip to content

Commit 00077f5

Browse files
committed
HADOOP-18975 S3A: Add option fs.s3a.endpoint.fips to use AWS FIPS endpoints
Adds a new option `fs.s3a.endpoint.fips` to switch the SDK client to use FIPS endpoints, as an alternative to explicitly declaring them. This is not a blocker for FIPS support. * Provided as a path capability for probes. * SDK v2 itself doesn't know that some regions don't have FIPS endpoints * SDK only fails with endpoint + fips flag as a retried exception; this PR fails fast. * Adds a new "connecting.md" doc; moves existing docs there and restructures. * New Tests in ITestS3AEndpointRegion bucket-info command support: * added to list of path capabilities * added -fips flag and test for explicit probe * also now prints bucket region * and removed some of the obsolete s3guard options * updated docs Contributed by Steve Loughran
1 parent 2f1e155 commit 00077f5

File tree

14 files changed

+688
-306
lines changed

14 files changed

+688
-306
lines changed

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1335,6 +1335,15 @@ private Constants() {
13351335
*/
13361336
public static final String AWS_S3_DEFAULT_REGION = "us-east-2";
13371337

1338+
/**
1339+
* Is the endpoint a FIPS endpoint?
1340+
* Can be queried as a path capability.
1341+
* Value {@value}.
1342+
*/
1343+
public static final String FIPS_ENDPOINT = "fs.s3a.endpoint.fips";
1344+
1345+
public static final boolean ENDPOINT_FIPS_DEFAULT = false;
1346+
13381347
/**
13391348
* Require that all S3 access is made through Access Points.
13401349
*/

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import java.net.URI;
2323
import java.net.URISyntaxException;
2424

25+
import org.apache.hadoop.classification.VisibleForTesting;
2526
import org.apache.hadoop.fs.s3a.impl.AWSClientConfig;
2627
import org.slf4j.Logger;
2728
import org.slf4j.LoggerFactory;
@@ -54,6 +55,7 @@
5455
import static org.apache.hadoop.fs.s3a.Constants.AWS_REGION;
5556
import static org.apache.hadoop.fs.s3a.Constants.AWS_S3_DEFAULT_REGION;
5657
import static org.apache.hadoop.fs.s3a.Constants.CENTRAL_ENDPOINT;
58+
import static org.apache.hadoop.fs.s3a.Constants.FIPS_ENDPOINT;
5759
import static org.apache.hadoop.fs.s3a.Constants.HTTP_SIGNER_CLASS_NAME;
5860
import static org.apache.hadoop.fs.s3a.Constants.HTTP_SIGNER_ENABLED;
5961
import static org.apache.hadoop.fs.s3a.Constants.HTTP_SIGNER_ENABLED_DEFAULT;
@@ -63,6 +65,7 @@
6365
import static org.apache.hadoop.fs.s3a.auth.SignerFactory.createHttpSigner;
6466
import static org.apache.hadoop.fs.s3a.impl.AWSHeaders.REQUESTER_PAYS_HEADER;
6567
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.AUTH_SCHEME_AWS_SIGV_4;
68+
import static org.apache.hadoop.util.Preconditions.checkArgument;
6669

6770

6871
/**
@@ -102,6 +105,13 @@ public class DefaultS3ClientFactory extends Configured
102105
/** Exactly once log to inform about ignoring the AWS-SDK Warnings for CSE. */
103106
private static final LogExactlyOnce IGNORE_CSE_WARN = new LogExactlyOnce(LOG);
104107

108+
/**
109+
* Error message when an endpoint is set with FIPS enabled: {@value}.
110+
*/
111+
@VisibleForTesting
112+
public static final String ERROR_ENDPOINT_WITH_FIPS =
113+
"An endpoint cannot set when " + FIPS_ENDPOINT + " is true";
114+
105115
@Override
106116
public S3Client createS3Client(
107117
final URI uri,
@@ -248,6 +258,7 @@ protected ClientOverrideConfiguration createClientOverrideConfiguration(
248258
* @param conf conf configuration object
249259
* @param <BuilderT> S3 client builder type
250260
* @param <ClientT> S3 client type
261+
* @throws IllegalArgumentException if endpoint is set when FIPS is enabled.
251262
*/
252263
private <BuilderT extends S3BaseClientBuilder<BuilderT, ClientT>, ClientT> void configureEndpointAndRegion(
253264
BuilderT builder, S3ClientCreationParameters parameters, Configuration conf) {
@@ -263,7 +274,18 @@ private <BuilderT extends S3BaseClientBuilder<BuilderT, ClientT>, ClientT> void
263274
region = Region.of(configuredRegion);
264275
}
265276

277+
// FIPs? Log it, then reject any attempt to set an endpoint
278+
final boolean fipsEnabled = parameters.isFipsEnabled();
279+
if (fipsEnabled) {
280+
LOG.debug("Enabling FIPS mode");
281+
}
282+
// always setting it guarantees the value is non-null,
283+
// which tests expect.
284+
builder.fipsEnabled(fipsEnabled);
285+
266286
if (endpoint != null) {
287+
checkArgument(!fipsEnabled,
288+
"%s : %s", ERROR_ENDPOINT_WITH_FIPS, endpoint);
267289
builder.endpointOverride(endpoint);
268290
// No region was configured, try to determine it from the endpoint.
269291
if (region == null) {

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -461,6 +461,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
461461
*/
462462
private boolean isMultipartCopyEnabled;
463463

464+
/**
465+
* Is FIPS enabled?
466+
*/
467+
private boolean fipsEnabled;
468+
464469
/**
465470
* A cache of files that should be deleted when the FileSystem is closed
466471
* or the JVM is exited.
@@ -614,6 +619,8 @@ public void initialize(URI name, Configuration originalConf)
614619
? conf.getTrimmed(AWS_REGION)
615620
: accessPoint.getRegion();
616621

622+
fipsEnabled = conf.getBoolean(FIPS_ENDPOINT, ENDPOINT_FIPS_DEFAULT);
623+
617624
// is this an S3Express store?
618625
s3ExpressStore = isS3ExpressStore(bucket, endpoint);
619626

@@ -1046,6 +1053,7 @@ private void bindAWSClient(URI name, boolean dtEnabled) throws IOException {
10461053
.withMultipartThreshold(multiPartThreshold)
10471054
.withTransferManagerExecutor(unboundedThreadPool)
10481055
.withRegion(configuredRegion)
1056+
.withFipsEnabled(fipsEnabled)
10491057
.withExpressCreateSession(
10501058
conf.getBoolean(S3EXPRESS_CREATE_SESSION, S3EXPRESS_CREATE_SESSION_DEFAULT));
10511059

@@ -5521,6 +5529,10 @@ public boolean hasPathCapability(final Path path, final String capability)
55215529
case OPTIMIZED_COPY_FROM_LOCAL:
55225530
return optimizedCopyFromLocal;
55235531

5532+
// probe for a fips endpoint
5533+
case FIPS_ENDPOINT:
5534+
return fipsEnabled;
5535+
55245536
default:
55255537
return super.hasPathCapability(p, cap);
55265538
}

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,11 @@ final class S3ClientCreationParameters {
176176
*/
177177
private boolean expressCreateSession = S3EXPRESS_CREATE_SESSION_DEFAULT;
178178

179+
/**
180+
* Is FIPS enabled?
181+
*/
182+
private boolean fipsEnabled;
183+
179184
/**
180185
* List of execution interceptors to include in the chain
181186
* of interceptors in the SDK.
@@ -461,5 +466,23 @@ public String toString() {
461466
", expressCreateSession=" + expressCreateSession +
462467
'}';
463468
}
469+
470+
/**
471+
* Get the FIPS flag.
472+
* @return is fips enabled
473+
*/
474+
public boolean isFipsEnabled() {
475+
return fipsEnabled;
476+
}
477+
478+
/**
479+
* Set builder value.
480+
* @param value new value
481+
* @return the builder
482+
*/
483+
public S3ClientCreationParameters withFipsEnabled(final boolean value) {
484+
fipsEnabled = value;
485+
return this;
486+
}
464487
}
465488
}

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_STANDARD_OPTIONS;
3939
import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_OPERATIONS_PURGE_UPLOADS;
4040
import static org.apache.hadoop.fs.s3a.Constants.ENABLE_MULTI_DELETE;
41+
import static org.apache.hadoop.fs.s3a.Constants.FIPS_ENDPOINT;
4142
import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE;
4243
import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE_ENABLED;
4344
import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_AWS_V2;
@@ -272,6 +273,7 @@ private InternalConstants() {
272273
FS_CHECKSUMS,
273274
FS_MULTIPART_UPLOADER,
274275
DIRECTORY_LISTING_INCONSISTENT,
276+
FIPS_ENDPOINT,
275277

276278
// s3 specific
277279
STORE_CAPABILITY_AWS_V2,

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -357,21 +357,19 @@ public static class BucketInfo extends S3GuardTool {
357357
public static final String NAME = BUCKET_INFO;
358358
public static final String GUARDED_FLAG = "guarded";
359359
public static final String UNGUARDED_FLAG = "unguarded";
360-
public static final String AUTH_FLAG = "auth";
361-
public static final String NONAUTH_FLAG = "nonauth";
362360
public static final String ENCRYPTION_FLAG = "encryption";
363361
public static final String MAGIC_FLAG = "magic";
364362
public static final String MARKERS_FLAG = "markers";
365363
public static final String MARKERS_AWARE = "aware";
364+
public static final String FIPS_FLAG = "fips";
366365

367366
public static final String PURPOSE = "provide/check information"
368367
+ " about a specific bucket";
369368

370369
private static final String USAGE = NAME + " [OPTIONS] s3a://BUCKET\n"
371370
+ "\t" + PURPOSE + "\n\n"
372371
+ "Common options:\n"
373-
+ " -" + AUTH_FLAG + " - Require the S3Guard mode to be \"authoritative\"\n"
374-
+ " -" + NONAUTH_FLAG + " - Require the S3Guard mode to be \"non-authoritative\"\n"
372+
+ " -" + FIPS_FLAG + " - Require the client is using a FIPS endpoint\n"
375373
+ " -" + MAGIC_FLAG +
376374
" - Require the S3 filesystem to be support the \"magic\" committer\n"
377375
+ " -" + ENCRYPTION_FLAG
@@ -395,7 +393,7 @@ public static class BucketInfo extends S3GuardTool {
395393
+ " directory markers are not deleted";
396394

397395
public BucketInfo(Configuration conf) {
398-
super(conf, GUARDED_FLAG, UNGUARDED_FLAG, AUTH_FLAG, NONAUTH_FLAG, MAGIC_FLAG);
396+
super(conf, GUARDED_FLAG, UNGUARDED_FLAG, FIPS_FLAG, MAGIC_FLAG);
399397
CommandFormat format = getCommandFormat();
400398
format.addOptionWithValue(ENCRYPTION_FLAG);
401399
format.addOptionWithValue(MARKERS_FLAG);
@@ -462,6 +460,10 @@ public int run(String[] args, PrintStream out)
462460
println(out, "\tEndpoint: %s=%s",
463461
ENDPOINT,
464462
StringUtils.isNotEmpty(endpoint) ? endpoint : "(unset)");
463+
String region = conf.getTrimmed(AWS_REGION, "");
464+
println(out, "\tRegion: %s=%s", AWS_REGION,
465+
StringUtils.isNotEmpty(region) ? region : "(unset)");
466+
465467
String encryption =
466468
printOption(out, "\tEncryption", Constants.S3_ENCRYPTION_ALGORITHM,
467469
"none");
@@ -487,12 +489,12 @@ public int run(String[] args, PrintStream out)
487489
FS_S3A_COMMITTER_NAME, COMMITTER_NAME_FILE);
488490
switch (committer) {
489491
case COMMITTER_NAME_FILE:
490-
println(out, "The original 'file' commmitter is active"
492+
println(out, "The original 'file' committer is active"
491493
+ " -this is slow and potentially unsafe");
492494
break;
493495
case InternalCommitterConstants.COMMITTER_NAME_STAGING:
494496
println(out, "The 'staging' committer is used "
495-
+ "-prefer the 'directory' committer");
497+
+ "-prefer the 'magic' committer");
496498
// fall through
497499
case COMMITTER_NAME_DIRECTORY:
498500
// fall through
@@ -555,13 +557,17 @@ public int run(String[] args, PrintStream out)
555557
processMarkerOption(out, fs,
556558
getCommandFormat().getOptValue(MARKERS_FLAG));
557559

558-
// and check for capabilitities
560+
// and check for capabilities
559561
println(out, "%nStore Capabilities");
560562
for (String capability : S3A_DYNAMIC_CAPABILITIES) {
561563
out.printf("\t%s %s%n", capability,
562564
fs.hasPathCapability(root, capability));
563565
}
564566
println(out, "");
567+
568+
if (commands.getOpt(FIPS_FLAG) && !fs.hasPathCapability(root, FIPS_ENDPOINT)) {
569+
throw badState("FIPS endpoint was required but the filesystem is not using it");
570+
}
565571
// and finally flush the output and report a success.
566572
out.flush();
567573
return SUCCESS;

0 commit comments

Comments
 (0)