Skip to content

Commit 8a258ca

Browse files
mengxrjeanlyn
authored andcommitted
[SPARK-7333] [MLLIB] Add BinaryClassificationEvaluator to PySpark
This PR adds `BinaryClassificationEvaluator` to Python ML Pipelines API, which is a simple wrapper of the Scala implementation. oefirouz Author: Xiangrui Meng <[email protected]> Closes apache#5885 from mengxr/SPARK-7333 and squashes the following commits: 25d7451 [Xiangrui Meng] fix tests in python 3 babdde7 [Xiangrui Meng] fix doc cb51e6a [Xiangrui Meng] add BinaryClassificationEvaluator in PySpark
1 parent bfd1bf8 commit 8a258ca

File tree

8 files changed

+193
-3
lines changed

8 files changed

+193
-3
lines changed

python/docs/pyspark.ml.rst

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,3 +24,19 @@ pyspark.ml.classification module
2424
:members:
2525
:undoc-members:
2626
:inherited-members:
27+
28+
pyspark.ml.tuning module
29+
--------------------------------
30+
31+
.. automodule:: pyspark.ml.tuning
32+
:members:
33+
:undoc-members:
34+
:inherited-members:
35+
36+
pyspark.ml.evaluation module
37+
--------------------------------
38+
39+
.. automodule:: pyspark.ml.evaluation
40+
:members:
41+
:undoc-members:
42+
:inherited-members:

python/pyspark/ml/evaluation.py

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
#
2+
# Licensed to the Apache Software Foundation (ASF) under one or more
3+
# contributor license agreements. See the NOTICE file distributed with
4+
# this work for additional information regarding copyright ownership.
5+
# The ASF licenses this file to You under the Apache License, Version 2.0
6+
# (the "License"); you may not use this file except in compliance with
7+
# the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
from pyspark.ml.wrapper import JavaEvaluator
19+
from pyspark.ml.param import Param, Params
20+
from pyspark.ml.param.shared import HasLabelCol, HasRawPredictionCol
21+
from pyspark.ml.util import keyword_only
22+
from pyspark.mllib.common import inherit_doc
23+
24+
__all__ = ['BinaryClassificationEvaluator']
25+
26+
27+
@inherit_doc
28+
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol):
29+
"""
30+
Evaluator for binary classification, which expects two input
31+
columns: rawPrediction and label.
32+
33+
>>> from pyspark.mllib.linalg import Vectors
34+
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
35+
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
36+
>>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["raw", "label"])
37+
...
38+
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
39+
>>> evaluator.evaluate(dataset)
40+
0.70...
41+
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
42+
0.83...
43+
"""
44+
45+
_java_class = "org.apache.spark.ml.evaluation.BinaryClassificationEvaluator"
46+
47+
# a placeholder to make it appear in the generated doc
48+
metricName = Param(Params._dummy(), "metricName",
49+
"metric name in evaluation (areaUnderROC|areaUnderPR)")
50+
51+
@keyword_only
52+
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
53+
metricName="areaUnderROC"):
54+
"""
55+
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
56+
metricName="areaUnderROC")
57+
"""
58+
super(BinaryClassificationEvaluator, self).__init__()
59+
#: param for metric name in evaluation (areaUnderROC|areaUnderPR)
60+
self.metricName = Param(self, "metricName",
61+
"metric name in evaluation (areaUnderROC|areaUnderPR)")
62+
self._setDefault(rawPredictionCol="rawPrediction", labelCol="label",
63+
metricName="areaUnderROC")
64+
kwargs = self.__init__._input_kwargs
65+
self._set(**kwargs)
66+
67+
def setMetricName(self, value):
68+
"""
69+
Sets the value of :py:attr:`metricName`.
70+
"""
71+
self.paramMap[self.metricName] = value
72+
return self
73+
74+
def getMetricName(self):
75+
"""
76+
Gets the value of metricName or its default value.
77+
"""
78+
return self.getOrDefault(self.metricName)
79+
80+
@keyword_only
81+
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
82+
metricName="areaUnderROC"):
83+
"""
84+
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
85+
metricName="areaUnderROC")
86+
Sets params for binary classification evaluator.
87+
"""
88+
kwargs = self.setParams._input_kwargs
89+
return self._set(**kwargs)
90+
91+
92+
if __name__ == "__main__":
93+
import doctest
94+
from pyspark.context import SparkContext
95+
from pyspark.sql import SQLContext
96+
globs = globals().copy()
97+
# The small batch size here ensures that we see multiple batches,
98+
# even in these small test examples:
99+
sc = SparkContext("local[2]", "ml.evaluation tests")
100+
sqlContext = SQLContext(sc)
101+
globs['sc'] = sc
102+
globs['sqlContext'] = sqlContext
103+
(failure_count, test_count) = doctest.testmod(
104+
globs=globs, optionflags=doctest.ELLIPSIS)
105+
sc.stop()
106+
if failure_count:
107+
exit(-1)

python/pyspark/ml/param/_shared_params_code_gen.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ def get$Name(self):
9393
("featuresCol", "features column name", "'features'"),
9494
("labelCol", "label column name", "'label'"),
9595
("predictionCol", "prediction column name", "'prediction'"),
96+
("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
9697
("inputCol", "input column name", None),
9798
("outputCol", "output column name", None),
9899
("numFeatures", "number of features", None)]

python/pyspark/ml/param/shared.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,35 @@ def getPredictionCol(self):
165165
return self.getOrDefault(self.predictionCol)
166166

167167

168+
class HasRawPredictionCol(Params):
169+
"""
170+
Mixin for param rawPredictionCol: raw prediction column name.
171+
"""
172+
173+
# a placeholder to make it appear in the generated doc
174+
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction column name")
175+
176+
def __init__(self):
177+
super(HasRawPredictionCol, self).__init__()
178+
#: param for raw prediction column name
179+
self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction column name")
180+
if 'rawPrediction' is not None:
181+
self._setDefault(rawPredictionCol='rawPrediction')
182+
183+
def setRawPredictionCol(self, value):
184+
"""
185+
Sets the value of :py:attr:`rawPredictionCol`.
186+
"""
187+
self.paramMap[self.rawPredictionCol] = value
188+
return self
189+
190+
def getRawPredictionCol(self):
191+
"""
192+
Gets the value of rawPredictionCol or its default value.
193+
"""
194+
return self.getOrDefault(self.rawPredictionCol)
195+
196+
168197
class HasInputCol(Params):
169198
"""
170199
Mixin for param inputCol: input column name.

python/pyspark/ml/pipeline.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from pyspark.mllib.common import inherit_doc
2323

2424

25-
__all__ = ['Estimator', 'Transformer', 'Pipeline', 'PipelineModel']
25+
__all__ = ['Estimator', 'Transformer', 'Pipeline', 'PipelineModel', 'Evaluator']
2626

2727

2828
@inherit_doc
@@ -168,3 +168,24 @@ def transform(self, dataset, params={}):
168168
for t in self.transformers:
169169
dataset = t.transform(dataset, paramMap)
170170
return dataset
171+
172+
173+
class Evaluator(object):
174+
"""
175+
Base class for evaluators that compute metrics from predictions.
176+
"""
177+
178+
__metaclass__ = ABCMeta
179+
180+
@abstractmethod
181+
def evaluate(self, dataset, params={}):
182+
"""
183+
Evaluates the output.
184+
185+
:param dataset: a dataset that contains labels/observations and
186+
predictions
187+
:param params: an optional param map that overrides embedded
188+
params
189+
:return: metric
190+
"""
191+
raise NotImplementedError()

python/pyspark/ml/wrapper.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from pyspark import SparkContext
2121
from pyspark.sql import DataFrame
2222
from pyspark.ml.param import Params
23-
from pyspark.ml.pipeline import Estimator, Transformer
23+
from pyspark.ml.pipeline import Estimator, Transformer, Evaluator
2424
from pyspark.mllib.common import inherit_doc
2525

2626

@@ -147,3 +147,18 @@ def __init__(self, java_model):
147147

148148
def _java_obj(self):
149149
return self._java_model
150+
151+
152+
@inherit_doc
153+
class JavaEvaluator(Evaluator, JavaWrapper):
154+
"""
155+
Base class for :py:class:`Evaluator`s that wrap Java/Scala
156+
implementations.
157+
"""
158+
159+
__metaclass__ = ABCMeta
160+
161+
def evaluate(self, dataset, params={}):
162+
java_obj = self._java_obj()
163+
self._transfer_params_to_java(params, java_obj)
164+
return java_obj.evaluate(dataset._jdf, self._empty_java_param_map())

python/pyspark/sql/_types.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,7 @@ def _python_to_sql_converter(dataType):
652652

653653
if isinstance(dataType, StructType):
654654
names, types = zip(*[(f.name, f.dataType) for f in dataType.fields])
655-
converters = map(_python_to_sql_converter, types)
655+
converters = [_python_to_sql_converter(t) for t in types]
656656

657657
def converter(obj):
658658
if isinstance(obj, dict):

python/run-tests

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ function run_ml_tests() {
100100
run_test "pyspark/ml/classification.py"
101101
run_test "pyspark/ml/tuning.py"
102102
run_test "pyspark/ml/tests.py"
103+
run_test "pyspark/ml/evaluation.py"
103104
}
104105

105106
function run_streaming_tests() {

0 commit comments

Comments
 (0)