1
1
import numpy as np
2
+ import pytensor
2
3
import pytensor .tensor as pt
3
4
import pytest
4
5
from pytensor .tensor .var import TensorVariable
5
6
6
7
from pymc_marketing .mmm .transformers import (
8
+ batched_convolution ,
7
9
delayed_adstock ,
8
- delayed_adstock_vectorized ,
9
10
geometric_adstock ,
10
- geometric_adstock_vectorized ,
11
11
logistic_saturation ,
12
12
tanh_saturation ,
13
13
)
@@ -27,6 +27,58 @@ def dummy_design_matrix():
27
27
)
28
28
29
29
30
+ @pytest .fixture (
31
+ scope = "module" , params = ["ndarray" , "TensorConstant" , "TensorVariable" ], ids = str
32
+ )
33
+ def convolution_inputs (request ):
34
+ x_val = np .ones ((3 , 4 , 5 ))
35
+ w_val = np .ones ((2 ))
36
+ if request .param == "ndarray" :
37
+ return x_val , w_val , None , None
38
+ elif request .param == "TensorConstant" :
39
+ return pt .as_tensor_variable (x_val ), pt .as_tensor_variable (w_val ), None , None
40
+ elif request .param == "TensorVariable" :
41
+ return (
42
+ pt .dtensor3 ("x" ),
43
+ pt .specify_shape (pt .dvector ("w" ), w_val .shape ),
44
+ x_val ,
45
+ w_val ,
46
+ )
47
+
48
+
49
+ @pytest .fixture (scope = "module" , params = [0 , 1 , - 1 ])
50
+ def convolution_axis (request ):
51
+ return request .param
52
+
53
+
54
+ def test_batched_convolution (convolution_inputs , convolution_axis ):
55
+ x , w , x_val , w_val = convolution_inputs
56
+ y = batched_convolution (x , w , convolution_axis )
57
+ if x_val is None :
58
+ y_val = y .eval ()
59
+ expected_shape = getattr (x , "value" , x ).shape
60
+ else :
61
+ y_val = pytensor .function ([x , w ], y )(x_val , w_val )
62
+ expected_shape = x_val .shape
63
+ assert y_val .shape == expected_shape
64
+ y_val = np .moveaxis (y_val , convolution_axis , 0 )
65
+ x_val = np .moveaxis (
66
+ x_val if x_val is not None else getattr (x , "value" , x ), convolution_axis , 0
67
+ )
68
+ assert np .allclose (y_val [0 ], x_val [0 ])
69
+ assert np .allclose (y_val [1 :], x_val [1 :] + x_val [:- 1 ])
70
+
71
+
72
+ def test_batched_convolution_broadcasting ():
73
+ x_val = np .random .default_rng (42 ).normal (size = (3 , 1 , 5 ))
74
+ x = pt .as_tensor_variable (x_val )
75
+ w = pt .as_tensor_variable (np .ones ((1 , 1 , 4 , 2 )))
76
+ y = batched_convolution (x , w , axis = - 1 ).eval ()
77
+ assert y .shape == (1 , 3 , 4 , 5 )
78
+ assert np .allclose (y [..., 0 ], x_val [..., 0 ])
79
+ assert np .allclose (y [..., 1 :], x_val [..., 1 :] + x_val [..., :- 1 ])
80
+
81
+
30
82
class TestsAdstockTransformers :
31
83
def test_geometric_adstock_x_zero (self ):
32
84
x = np .zeros (shape = (100 ))
@@ -62,14 +114,12 @@ def test_delayed_adstock_x_zero(self):
62
114
y = delayed_adstock (x = x , alpha = 0.2 , theta = 2 , l_max = 4 )
63
115
np .testing .assert_array_equal (x = x , y = y .eval ())
64
116
65
- def test_geometric_adstock_vactorized (self , dummy_design_matrix ):
117
+ def test_geometric_adstock_vectorized (self , dummy_design_matrix ):
66
118
x = dummy_design_matrix .copy ()
67
119
x_tensor = pt .as_tensor_variable (x )
68
120
alpha = [0.9 , 0.33 , 0.5 , 0.1 , 0.0 ]
69
121
alpha_tensor = pt .as_tensor_variable (alpha )
70
- y_tensor = geometric_adstock_vectorized (
71
- x = x_tensor , alpha = alpha_tensor , l_max = 12
72
- )
122
+ y_tensor = geometric_adstock (x = x_tensor , alpha = alpha_tensor , l_max = 12 , axis = 0 )
73
123
y = y_tensor .eval ()
74
124
75
125
y_tensors = [
@@ -80,15 +130,15 @@ def test_geometric_adstock_vactorized(self, dummy_design_matrix):
80
130
assert y .shape == x .shape
81
131
np .testing .assert_almost_equal (actual = y , desired = ys , decimal = 12 )
82
132
83
- def test_delayed_adstock_vactorized (self , dummy_design_matrix ):
133
+ def test_delayed_adstock_vectorized (self , dummy_design_matrix ):
84
134
x = dummy_design_matrix
85
135
x_tensor = pt .as_tensor_variable (x )
86
136
alpha = [0.9 , 0.33 , 0.5 , 0.1 , 0.0 ]
87
137
alpha_tensor = pt .as_tensor_variable (alpha )
88
138
theta = [0 , 1 , 2 , 3 , 4 ]
89
139
theta_tensor = pt .as_tensor_variable (theta )
90
- y_tensor = delayed_adstock_vectorized (
91
- x = x_tensor , alpha = alpha_tensor , theta = theta_tensor , l_max = 12
140
+ y_tensor = delayed_adstock (
141
+ x = x_tensor , alpha = alpha_tensor , theta = theta_tensor , l_max = 12 , axis = 0
92
142
)
93
143
y = y_tensor .eval ()
94
144
@@ -220,7 +270,7 @@ def test_logistic_saturation_delayed_adstock_composition(
220
270
assert z2_eval .max () <= 1
221
271
assert z2_eval .min () >= 0
222
272
223
- def test_geometric_adstock_vactorized_logistic_saturation (
273
+ def test_geometric_adstock_vectorized_logistic_saturation (
224
274
self , dummy_design_matrix
225
275
):
226
276
x = dummy_design_matrix .copy ()
@@ -229,9 +279,7 @@ def test_geometric_adstock_vactorized_logistic_saturation(
229
279
alpha_tensor = pt .as_tensor_variable (alpha )
230
280
lam = [0.5 , 1.0 , 2.0 , 3.0 , 4.0 ]
231
281
lam_tensor = pt .as_tensor_variable (lam )
232
- y_tensor = geometric_adstock_vectorized (
233
- x = x_tensor , alpha = alpha_tensor , l_max = 12
234
- )
282
+ y_tensor = geometric_adstock (x = x_tensor , alpha = alpha_tensor , l_max = 12 , axis = 0 )
235
283
z_tensor = logistic_saturation (x = y_tensor , lam = lam_tensor )
236
284
z = z_tensor .eval ()
237
285
@@ -246,7 +294,7 @@ def test_geometric_adstock_vactorized_logistic_saturation(
246
294
assert zs .shape == x .shape
247
295
np .testing .assert_almost_equal (actual = z , desired = zs , decimal = 12 )
248
296
249
- def test_delayed_adstock_vactorized_logistic_saturation (self , dummy_design_matrix ):
297
+ def test_delayed_adstock_vectorized_logistic_saturation (self , dummy_design_matrix ):
250
298
x = dummy_design_matrix .copy ()
251
299
x_tensor = pt .as_tensor_variable (x )
252
300
alpha = [0.9 , 0.33 , 0.5 , 0.1 , 0.0 ]
@@ -255,8 +303,8 @@ def test_delayed_adstock_vactorized_logistic_saturation(self, dummy_design_matri
255
303
theta_tensor = pt .as_tensor_variable (theta )
256
304
lam = [0.5 , 1.0 , 2.0 , 3.0 , 4.0 ]
257
305
lam_tensor = pt .as_tensor_variable (lam )
258
- y_tensor = delayed_adstock_vectorized (
259
- x = x_tensor , alpha = alpha_tensor , theta = theta_tensor , l_max = 12
306
+ y_tensor = delayed_adstock (
307
+ x = x_tensor , alpha = alpha_tensor , theta = theta_tensor , l_max = 12 , axis = 0
260
308
)
261
309
z_tensor = logistic_saturation (x = y_tensor , lam = lam_tensor )
262
310
z = z_tensor .eval ()
0 commit comments