@@ -22,6 +22,7 @@ import java.net.{Authenticator, PasswordAuthentication}
22
22
import org .apache .hadoop .io .Text
23
23
24
24
import org .apache .spark .deploy .SparkHadoopUtil
25
+ import org .apache .spark .network .sasl .SecretKeyHolder
25
26
26
27
/**
27
28
* Spark class responsible for security.
@@ -84,7 +85,7 @@ import org.apache.spark.deploy.SparkHadoopUtil
84
85
* Authenticator installed in the SecurityManager to how it does the authentication
85
86
* and in this case gets the user name and password from the request.
86
87
*
87
- * - ConnectionManager -> The Spark ConnectionManager uses java nio to asynchronously
88
+ * - BlockTransferService -> The Spark BlockTransferServices uses java nio to asynchronously
88
89
* exchange messages. For this we use the Java SASL
89
90
* (Simple Authentication and Security Layer) API and again use DIGEST-MD5
90
91
* as the authentication mechanism. This means the shared secret is not passed
@@ -98,7 +99,7 @@ import org.apache.spark.deploy.SparkHadoopUtil
98
99
* of protection they want. If we support those, the messages will also have to
99
100
* be wrapped and unwrapped via the SaslServer/SaslClient.wrap/unwrap API's.
100
101
*
101
- * Since the connectionManager does asynchronous messages passing, the SASL
102
+ * Since the NioBlockTransferService does asynchronous messages passing, the SASL
102
103
* authentication is a bit more complex. A ConnectionManager can be both a client
103
104
* and a Server, so for a particular connection is has to determine what to do.
104
105
* A ConnectionId was added to be able to track connections and is used to
@@ -107,6 +108,10 @@ import org.apache.spark.deploy.SparkHadoopUtil
107
108
* and waits for the response from the server and does the handshake before sending
108
109
* the real message.
109
110
*
111
+ * The NettyBlockTransferService ensures that SASL authentication is performed
112
+ * synchronously prior to any other communication on a connection. This is done in
113
+ * SaslClientBootstrap on the client side and SaslRpcHandler on the server side.
114
+ *
110
115
* - HTTP for the Spark UI -> the UI was changed to use servlets so that javax servlet filters
111
116
* can be used. Yarn requires a specific AmIpFilter be installed for security to work
112
117
* properly. For non-Yarn deployments, users can write a filter to go through a
@@ -139,7 +144,7 @@ import org.apache.spark.deploy.SparkHadoopUtil
139
144
* can take place.
140
145
*/
141
146
142
- private [spark] class SecurityManager (sparkConf : SparkConf ) extends Logging {
147
+ private [spark] class SecurityManager (sparkConf : SparkConf ) extends Logging with SecretKeyHolder {
143
148
144
149
// key used to store the spark secret in the Hadoop UGI
145
150
private val sparkSecretLookupKey = " sparkCookie"
@@ -337,4 +342,16 @@ private[spark] class SecurityManager(sparkConf: SparkConf) extends Logging {
337
342
* @return the secret key as a String if authentication is enabled, otherwise returns null
338
343
*/
339
344
def getSecretKey (): String = secretKey
345
+
346
+ override def getSaslUser (appId : String ): String = {
347
+ val myAppId = sparkConf.getAppId
348
+ require(appId == myAppId, s " SASL appId $appId did not match my appId ${myAppId}" )
349
+ getSaslUser()
350
+ }
351
+
352
+ override def getSecretKey (appId : String ): String = {
353
+ val myAppId = sparkConf.getAppId
354
+ require(appId == myAppId, s " SASL appId $appId did not match my appId ${myAppId}" )
355
+ getSecretKey()
356
+ }
340
357
}
0 commit comments