Skip to content

Commit da6e12f

Browse files
sdesrozisIshan-Kumar2
authored andcommitted
[skip ci] add doctest for regression metrics (pytorch#2324)
1 parent f5cb516 commit da6e12f

15 files changed

+258
-38
lines changed

ignite/contrib/metrics/regression/canberra_metric.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -37,18 +37,24 @@ class CanberraMetric(_BaseRegression):
3737
.. _`Botchkarev 2018`:
3838
https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
3939
40-
.. testcode::
40+
Examples:
41+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
42+
The output of the engine's ``process_function`` needs to be in format of
43+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
4144
42-
metric = CanberraMetric()
43-
metric.attach(default_evaluator, 'canberra')
44-
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
45-
y_true = y_pred * 1.5
46-
state = default_evaluator.run([[y_pred, y_true]])
47-
print(state.metrics['canberra'])
45+
.. testcode::
4846
49-
.. testoutput::
47+
metric = CanberraMetric()
48+
metric.attach(default_evaluator, 'canberra')
49+
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
50+
y_true = y_pred * 1.5
51+
state = default_evaluator.run([[y_pred, y_true]])
52+
print(state.metrics['canberra'])
53+
54+
.. testoutput::
55+
56+
0.8000...
5057
51-
0.8000...
5258
.. versionchanged:: 0.4.3
5359
5460
- Fixed implementation: ``abs`` in denominator.

ignite/contrib/metrics/regression/fractional_absolute_error.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,18 +34,24 @@ class FractionalAbsoluteError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. testcode::
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
3841
39-
metric = FractionalAbsoluteError()
40-
metric.attach(default_evaluator, 'fractional_abs_error')
41-
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
42-
y_true = y_pred * 0.8
43-
state = default_evaluator.run([[y_pred, y_true]])
44-
print(state.metrics['fractional_abs_error'])
42+
.. testcode::
4543
46-
.. testoutput::
44+
metric = FractionalAbsoluteError()
45+
metric.attach(default_evaluator, 'fractional_abs_error')
46+
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
47+
y_true = y_pred * 0.8
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['fractional_abs_error'])
50+
51+
.. testoutput::
52+
53+
0.2222...
4754
48-
0.2222...
4955
.. versionchanged:: 0.4.5
5056
- Works with DDP.
5157
"""

ignite/contrib/metrics/regression/fractional_bias.py

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,18 +34,23 @@ class FractionalBias(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. testcode::
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
3841
39-
metric = FractionalBias()
40-
metric.attach(default_evaluator, 'fractional_bias')
41-
y_pred = torch.Tensor([[3.8], [9.9], [5.4], [2.1]])
42-
y_true = y_pred * 1.5
43-
state = default_evaluator.run([[y_pred, y_true]])
44-
print(state.metrics['fractional_bias'])
42+
.. testcode::
4543
46-
.. testoutput::
44+
metric = FractionalBias()
45+
metric.attach(default_evaluator, 'fractional_bias')
46+
y_pred = torch.Tensor([[3.8], [9.9], [5.4], [2.1]])
47+
y_true = y_pred * 1.5
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['fractional_bias'])
4750
48-
0.4000...
51+
.. testoutput::
52+
53+
0.4000...
4954
5055
.. versionchanged:: 0.4.5
5156
- Works with DDP.

ignite/contrib/metrics/regression/geometric_mean_absolute_error.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,18 +34,24 @@ class GeometricMeanAbsoluteError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. testcode::
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
3841
39-
metric = GeometricMeanAbsoluteError()
40-
metric.attach(default_evaluator, 'gmae')
41-
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
42-
y_true = y_pred * 1.5
43-
state = default_evaluator.run([[y_pred, y_true]])
44-
print(state.metrics['gmae'])
42+
.. testcode::
4543
46-
.. testoutput::
44+
metric = GeometricMeanAbsoluteError()
45+
metric.attach(default_evaluator, 'gmae')
46+
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
47+
y_true = y_pred * 1.5
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['gmae'])
50+
51+
.. testoutput::
52+
53+
2.2723...
4754
48-
2.2723...
4955
.. versionchanged:: 0.4.5
5056
- Works with DDP.
5157
"""

ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,24 @@ class GeometricMeanRelativeAbsoluteError(_BaseRegression):
4747
device: specifies which device updates are accumulated on. Setting the
4848
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
4949
non-blocking. By default, CPU.
50+
51+
Examples:
52+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
53+
The output of the engine's ``process_function`` needs to be in format of
54+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
55+
56+
.. testcode::
57+
58+
metric = GeometricMeanRelativeAbsoluteError()
59+
metric.attach(default_evaluator, 'gmare')
60+
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
61+
y_pred = y_true * 0.75
62+
state = default_evaluator.run([[y_pred, y_true]])
63+
print(state.metrics['gmare'])
64+
65+
.. testoutput::
66+
67+
0.0...
5068
"""
5169

5270
@reinit__is_reduced

ignite/contrib/metrics/regression/manhattan_distance.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,24 @@ class ManhattanDistance(_BaseRegression):
3333
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3434
non-blocking. By default, CPU.
3535
36+
Examples:
37+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
38+
The output of the engine's ``process_function`` needs to be in format of
39+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
40+
41+
.. testcode::
42+
43+
metric = ManhattanDistance()
44+
metric.attach(default_evaluator, 'manhattan')
45+
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
46+
y_pred = y_true * 0.75
47+
state = default_evaluator.run([[y_pred, y_true]])
48+
print(state.metrics['manhattan'])
49+
50+
.. testoutput::
51+
52+
3.75...
53+
3654
.. versionchanged:: 0.4.3
3755
3856
- Fixed sklearn compatibility.

ignite/contrib/metrics/regression/maximum_absolute_error.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,24 @@ class MaximumAbsoluteError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
41+
42+
.. testcode::
43+
44+
metric = MaximumAbsoluteError()
45+
metric.attach(default_evaluator, 'mae')
46+
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
47+
y_pred = y_true * 0.75
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['mae'])
50+
51+
.. testoutput::
52+
53+
1.25...
54+
3755
.. versionchanged:: 0.4.5
3856
- Works with DDP.
3957
"""

ignite/contrib/metrics/regression/mean_absolute_relative_error.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,24 @@ class MeanAbsoluteRelativeError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
41+
42+
.. testcode::
43+
44+
metric = MeanAbsoluteRelativeError()
45+
metric.attach(default_evaluator, 'mare')
46+
y_true = torch.Tensor([1, 2, 3, 4, 5])
47+
y_pred = y_true * 0.75
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['mare'])
50+
51+
.. testoutput::
52+
53+
0.25...
54+
3755
.. versionchanged:: 0.4.5
3856
- Works with DDP.
3957
"""

ignite/contrib/metrics/regression/mean_error.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,24 @@ class MeanError(_BaseRegression):
3333
device: specifies which device updates are accumulated on. Setting the
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
36+
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
41+
42+
.. testcode::
43+
44+
metric = MeanError()
45+
metric.attach(default_evaluator, 'me')
46+
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
47+
y_pred = y_true * 0.75
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['me'])
50+
51+
.. testoutput::
52+
53+
0.625...
3654
"""
3755

3856
@reinit__is_reduced

ignite/contrib/metrics/regression/mean_normalized_bias.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,24 @@ class MeanNormalizedBias(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37+
Examples:
38+
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
39+
The output of the engine's ``process_function`` needs to be in format of
40+
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
41+
42+
.. testcode::
43+
44+
metric = MeanNormalizedBias()
45+
metric.attach(default_evaluator, 'mnb')
46+
y_true = torch.Tensor([1, 2, 3, 4, 5])
47+
y_pred = y_true * 0.75
48+
state = default_evaluator.run([[y_pred, y_true]])
49+
print(state.metrics['mnb'])
50+
51+
.. testoutput::
52+
53+
0.25...
54+
3755
.. versionchanged:: 0.4.5
3856
- Works with DDP.
3957
"""

0 commit comments

Comments
 (0)