Skip to content

Commit 9372779

Browse files
committed
code style: max line lenght <= 100
Signed-off-by: Manish Amde <[email protected]>
1 parent dd0c0d7 commit 9372779

File tree

7 files changed

+184
-100
lines changed

7 files changed

+184
-100
lines changed

mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionTree.scala

Lines changed: 0 additions & 21 deletions
This file was deleted.

mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala

Lines changed: 154 additions & 62 deletions
Large diffs are not rendered by default.

mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTreeRunner.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import org.apache.spark.mllib.tree.configuration.Algo._
2929
object DecisionTreeRunner extends Logging {
3030

3131
val usage = """
32-
Usage: DecisionTreeRunner <master>[slices] --algo <Classification,Regression> --trainDataDir path --testDataDir path --maxDepth num [--impurity <Gini,Entropy,Variance>] [--maxBins num]
32+
Usage: DecisionTreeRunner<master>[slices] --algo <Classification,Regression> --trainDataDir path --testDataDir path --maxDepth num [--impurity <Gini,Entropy,Variance>] [--maxBins num]
3333
"""
3434

3535

@@ -132,7 +132,8 @@ object DecisionTreeRunner extends Logging {
132132

133133
//TODO: Make these generic MLTable metrics
134134
def meanSquaredError(tree : DecisionTreeModel, data : RDD[LabeledPoint]) : Double = {
135-
val meanSumOfSquares = data.map(y => (tree.predict(y.features) - y.label)*(tree.predict(y.features) - y.label)).mean()
135+
val meanSumOfSquares =
136+
data.map(y => (tree.predict(y.features) - y.label)*(tree.predict(y.features) - y.label)).mean()
136137
meanSumOfSquares
137138
}
138139

mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,5 +31,7 @@ object Gini extends Impurity {
3131
}
3232
}
3333

34-
def calculate(count: Double, sum: Double, sumSquares: Double): Double = throw new OperationNotSupportedException("Gini.calculate")
34+
def calculate(count: Double, sum: Double, sumSquares: Double): Double =
35+
throw new OperationNotSupportedException("Gini.calculate")
36+
3537
}

mllib/src/main/scala/org/apache/spark/mllib/tree/model/InformationGainStats.scala

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,12 @@
1616
*/
1717
package org.apache.spark.mllib.tree.model
1818

19-
class InformationGainStats(val gain : Double,
20-
val impurity: Double,
21-
val leftImpurity : Double,
22-
//val leftSamples : Long,
23-
val rightImpurity : Double,
24-
//val rightSamples : Long
25-
val predict : Double) extends Serializable {
19+
class InformationGainStats(
20+
val gain : Double,
21+
val impurity: Double,
22+
val leftImpurity : Double,
23+
val rightImpurity : Double,
24+
val predict : Double) extends Serializable {
2625

2726
override def toString = {
2827
"gain = %f, impurity = %f, left impurity = %f, right impurity = %f, predict = %f"

mllib/src/main/scala/org/apache/spark/mllib/tree/model/Node.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,13 @@ class Node ( val id : Int,
2929
val stats : Option[InformationGainStats]
3030
) extends Serializable with Logging{
3131

32-
override def toString = "id = " + id + ", isLeaf = " + isLeaf + ", predict = " + predict + ", split = " + split + ", stats = " + stats
32+
override def toString = "id = " + id + ", isLeaf = " + isLeaf + ", predict = " + predict + ", " +
33+
"split = " + split + ", stats = " + stats
3334

3435
def build(nodes : Array[Node]) : Unit = {
3536

36-
logDebug("building node " + id + " at level " + (scala.math.log(id + 1)/scala.math.log(2)).toInt )
37+
logDebug("building node " + id + " at level " +
38+
(scala.math.log(id + 1)/scala.math.log(2)).toInt )
3739
logDebug("id = " + id + ", split = " + split)
3840
logDebug("stats = " + stats)
3941
logDebug("predict = " + predict)

mllib/src/main/scala/org/apache/spark/mllib/tree/model/Split.scala

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,23 @@ package org.apache.spark.mllib.tree.model
1818

1919
import org.apache.spark.mllib.tree.configuration.FeatureType.FeatureType
2020

21-
case class Split(feature: Int, threshold : Double, featureType : FeatureType, categories : List[Double]){
21+
case class Split(
22+
feature: Int,
23+
threshold : Double,
24+
featureType : FeatureType,
25+
categories : List[Double]){
26+
2227
override def toString =
23-
"Feature = " + feature + ", threshold = " + threshold + ", featureType = " + featureType + ", categories = " + categories
28+
"Feature = " + feature + ", threshold = " + threshold + ", featureType = " + featureType +
29+
", categories = " + categories
2430
}
2531

26-
class DummyLowSplit(feature: Int, kind : FeatureType) extends Split(feature, Double.MinValue, kind, List())
32+
class DummyLowSplit(feature: Int, kind : FeatureType)
33+
extends Split(feature, Double.MinValue, kind, List())
2734

28-
class DummyHighSplit(feature: Int, kind : FeatureType) extends Split(feature, Double.MaxValue, kind, List())
35+
class DummyHighSplit(feature: Int, kind : FeatureType)
36+
extends Split(feature, Double.MaxValue, kind, List())
2937

30-
class DummyCategoricalSplit(feature: Int, kind : FeatureType) extends Split(feature, Double.MaxValue, kind, List())
38+
class DummyCategoricalSplit(feature: Int, kind : FeatureType)
39+
extends Split(feature, Double.MaxValue, kind, List())
3140

0 commit comments

Comments
 (0)