@@ -191,36 +191,22 @@ The following code illustrates how to load a sample dataset and use logistic reg
191
191
192
192
{% highlight scala %}
193
193
194
- import scala.collection.mutable
195
- import scala.language.reflectiveCalls
196
-
197
- import org.apache.spark.{SparkConf, SparkContext}
198
- import org.apache.spark.ml.{Pipeline, PipelineStage}
199
- import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
200
- import org.apache.spark.ml.feature.StringIndexer
194
+ import org.apache.spark.ml.classification.LogisticRegression
201
195
import org.apache.spark.mllib.util.MLUtils
202
- import org.apache.spark.sql.DataFrame
203
-
204
- val regParam = 0.3
205
- val elasticNetParam = 0.8
206
- val tol = 1E-6
207
- val dataPath = "data/mllib/sample_libsvm_data.txt"
208
-
209
- println(s"LogisticRegressionExample with regParam $regParam and elasticNetParam $elasticNetParam")
210
196
211
197
// Load training and test data and cache it.
212
- val training = MLUtils.loadLibSVMFile(sc, dataPath ).toDF()
198
+ val training = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt" ).toDF()
213
199
214
200
val lor = new LogisticRegression()
215
- .setRegParam(regParam )
216
- .setElasticNetParam(elasticNetParam )
217
- .setTol(tol )
201
+ .setRegParam(0.3 )
202
+ .setElasticNetParam(0.8 )
203
+ .setTol(1e-6 )
218
204
219
205
// Fit the model
220
- val lirModel = lor.fit(training)
206
+ val lorModel = lor.fit(training)
221
207
222
208
// Print the weights and intercept for logistic regression.
223
- println(s"Weights: ${lirModel .weights} Intercept: ${lirModel .intercept}")
209
+ println(s"Weights: ${lorModel .weights} Intercept: ${lorModel .intercept}")
224
210
225
211
{% endhighlight %}
226
212
0 commit comments