google-meridian 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_meridian-1.0.4.dist-info → google_meridian-1.0.5.dist-info}/METADATA +3 -3
- {google_meridian-1.0.4.dist-info → google_meridian-1.0.5.dist-info}/RECORD +9 -9
- meridian/__init__.py +1 -1
- meridian/analysis/analyzer.py +10 -181
- meridian/analysis/optimizer.py +219 -119
- meridian/model/model.py +4 -3
- {google_meridian-1.0.4.dist-info → google_meridian-1.0.5.dist-info}/LICENSE +0 -0
- {google_meridian-1.0.4.dist-info → google_meridian-1.0.5.dist-info}/WHEEL +0 -0
- {google_meridian-1.0.4.dist-info → google_meridian-1.0.5.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: google-meridian
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.5
|
|
4
4
|
Summary: Google's open source mixed marketing model library, helps you understand your return on investment and direct your ad spend with confidence.
|
|
5
5
|
Author-email: The Meridian Authors <no-reply@google.com>
|
|
6
6
|
License:
|
|
@@ -215,7 +215,7 @@ Classifier: Programming Language :: Python :: 3
|
|
|
215
215
|
Classifier: Programming Language :: Python :: 3 :: Only
|
|
216
216
|
Classifier: Topic :: Other/Nonlisted Topic
|
|
217
217
|
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
218
|
-
Requires-Python: >=3.
|
|
218
|
+
Requires-Python: >=3.10
|
|
219
219
|
Description-Content-Type: text/markdown
|
|
220
220
|
License-File: LICENSE
|
|
221
221
|
Requires-Dist: arviz
|
|
@@ -392,7 +392,7 @@ To cite this repository:
|
|
|
392
392
|
author = {Google Meridian Marketing Mix Modeling Team},
|
|
393
393
|
title = {Meridian: Marketing Mix Modeling},
|
|
394
394
|
url = {https://github.com/google/meridian},
|
|
395
|
-
version = {1.0.
|
|
395
|
+
version = {1.0.5},
|
|
396
396
|
year = {2025},
|
|
397
397
|
}
|
|
398
398
|
```
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
meridian/__init__.py,sha256=
|
|
1
|
+
meridian/__init__.py,sha256=tjar6BXbxRQ0ihMDHu2kVYddlW0WGWsef0_5pw1VwOM,714
|
|
2
2
|
meridian/constants.py,sha256=PhQX3S0b-Odv_bjl1YaO6vnLT4rBomiAcec0789VZNE,14599
|
|
3
3
|
meridian/analysis/__init__.py,sha256=-FooDZ5OzePpyTVkvRoWQx_xBaRR_hjVLny9H8-kkyQ,836
|
|
4
|
-
meridian/analysis/analyzer.py,sha256=
|
|
4
|
+
meridian/analysis/analyzer.py,sha256=N-nWKmyJnXNv2RRt8bj-sO8hoBHUqMABU4Sj8eeBSNc,197121
|
|
5
5
|
meridian/analysis/formatter.py,sha256=F8OYxD2bH13zV10JY63j2ugCOj-DpTXhyJr43n5ukr8,7270
|
|
6
|
-
meridian/analysis/optimizer.py,sha256=
|
|
6
|
+
meridian/analysis/optimizer.py,sha256=Kbir76hEYAPvT5qstZ9edHwVkXs95KFJ6p66cCJqj-Y,77242
|
|
7
7
|
meridian/analysis/summarizer.py,sha256=oJVqTCGLDf0cxh-1nFlAp4Hlo3NKXRYAoA67K9Hraw0,17498
|
|
8
8
|
meridian/analysis/summary_text.py,sha256=wHdETMdnYiZn2wf5LfHzzcGbJMjTAI5GmYnketeJnU0,11469
|
|
9
9
|
meridian/analysis/test_utils.py,sha256=WImpYOt5wLjhi8NczcxCvqSHRikTLvm1yli4o5DYzKE,77258
|
|
@@ -27,15 +27,15 @@ meridian/model/__init__.py,sha256=bvx8vvXolktsCTDKViU9U1v85pgNWF3haDowTKy11d4,98
|
|
|
27
27
|
meridian/model/adstock_hill.py,sha256=b_YYhqci6ndgi602FFXmx2f12ceC4N0tp338nMMtm54,9283
|
|
28
28
|
meridian/model/knots.py,sha256=r7PPaJM96d5pkoOeV9crIOgkM0-rh24mWMvypMiV4aQ,8054
|
|
29
29
|
meridian/model/media.py,sha256=Gjr4jm0y_6pFy7aa_oKIuuZ8P7F56e3ZB-3o6msApeA,11876
|
|
30
|
-
meridian/model/model.py,sha256=
|
|
30
|
+
meridian/model/model.py,sha256=WS1b_Gb-RlA3KPEaI6d1vsd78y09SYLEJhGpo4t0Mic,43178
|
|
31
31
|
meridian/model/model_test_data.py,sha256=dqS_vDQUg811UGmyr8ZgWp8VTIra-krA7A2erQlfPlU,12488
|
|
32
32
|
meridian/model/posterior_sampler.py,sha256=ffAzlkrCEqJjFmu6pDbUfam4BLTskxrSr0-U9-_aFiA,22637
|
|
33
33
|
meridian/model/prior_distribution.py,sha256=6fqx_XIM0DSQICd65XaSRhelsjvZ4ariBfeyOeoKld8,39075
|
|
34
34
|
meridian/model/prior_sampler.py,sha256=zGSAQviFO3s2GcVbfG9EfXxo_SNFBFbTQC3e-QBFzio,23079
|
|
35
35
|
meridian/model/spec.py,sha256=xaHxfCLWLnWMAkMy2ouDoqGBHI_4tzzX8AaJOsKdu7Q,8878
|
|
36
36
|
meridian/model/transformers.py,sha256=te3OJixprWLtv7O00a9GZWE4waTS94NNLVo3tWIl1-k,7420
|
|
37
|
-
google_meridian-1.0.
|
|
38
|
-
google_meridian-1.0.
|
|
39
|
-
google_meridian-1.0.
|
|
40
|
-
google_meridian-1.0.
|
|
41
|
-
google_meridian-1.0.
|
|
37
|
+
google_meridian-1.0.5.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
|
38
|
+
google_meridian-1.0.5.dist-info/METADATA,sha256=gPEiheNFDpJRtx5a1tmjcgaXBYPh1ESH_gkTP5LTgLQ,22040
|
|
39
|
+
google_meridian-1.0.5.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
|
40
|
+
google_meridian-1.0.5.dist-info/top_level.txt,sha256=nwaCebZvvU34EopTKZsjK0OMTFjVnkf4FfnBN_TAc0g,9
|
|
41
|
+
google_meridian-1.0.5.dist-info/RECORD,,
|
meridian/__init__.py
CHANGED
meridian/analysis/analyzer.py
CHANGED
|
@@ -3480,175 +3480,6 @@ class Analyzer:
|
|
|
3480
3480
|
baseline_pct_of_contribution,
|
|
3481
3481
|
])
|
|
3482
3482
|
|
|
3483
|
-
# TODO: This method can be replaced once generalized
|
|
3484
|
-
# `media_summary_metric` is done.
|
|
3485
|
-
def _counterfactual_metric_dataset(
|
|
3486
|
-
self,
|
|
3487
|
-
use_posterior: bool = True,
|
|
3488
|
-
new_data: DataTensors | None = None,
|
|
3489
|
-
marginal_roi_by_reach: bool = True,
|
|
3490
|
-
selected_geos: Sequence[str] | None = None,
|
|
3491
|
-
selected_times: Sequence[str] | None = None,
|
|
3492
|
-
use_kpi: bool = False,
|
|
3493
|
-
attrs: Mapping[str, Any] | None = None,
|
|
3494
|
-
confidence_level: float = constants.DEFAULT_CONFIDENCE_LEVEL,
|
|
3495
|
-
batch_size: int = constants.DEFAULT_BATCH_SIZE,
|
|
3496
|
-
) -> xr.Dataset:
|
|
3497
|
-
"""Calculates the counterfactual metric dataset.
|
|
3498
|
-
|
|
3499
|
-
Args:
|
|
3500
|
-
use_posterior: Boolean. If `True`, posterior counterfactual metrics are
|
|
3501
|
-
generated. If `False`, prior counterfactual metrics are generated.
|
|
3502
|
-
new_data: Optional DataTensors. When specified, it contains the
|
|
3503
|
-
counterfactual `media`, `reach`, `frequency`, `media_spend`, `rf_spend`
|
|
3504
|
-
and `revenue_per_kpi` values. The new tensors' dimensions must match the
|
|
3505
|
-
dimensions of the corresponding original tensors from
|
|
3506
|
-
`meridian.input_data`. If `None`, the existing tensors from the Meridian
|
|
3507
|
-
object are used.
|
|
3508
|
-
marginal_roi_by_reach: Boolean. Marginal ROI (mROI) is defined as the
|
|
3509
|
-
return on the next dollar spent. If this argument is `True`, the
|
|
3510
|
-
assumption is that the next dollar spent only impacts reach, holding
|
|
3511
|
-
frequency constant. If this argument is `False`, the assumption is that
|
|
3512
|
-
the next dollar spent only impacts frequency, holding reach constant.
|
|
3513
|
-
selected_geos: Optional list contains a subset of geos to include. By
|
|
3514
|
-
default, all geos are included.
|
|
3515
|
-
selected_times: Optional list contains a subset of times to include. By
|
|
3516
|
-
default, all time periods are included.
|
|
3517
|
-
use_kpi: Boolean. If `True`, the counterfactual metrics are calculated
|
|
3518
|
-
using KPI. If `False`, the counterfactual metrics are calculated using
|
|
3519
|
-
revenue.
|
|
3520
|
-
attrs: Optional dictionary of attributes to add to the dataset.
|
|
3521
|
-
confidence_level: Confidence level for prior and posterior credible
|
|
3522
|
-
intervals, represented as a value between zero and one.
|
|
3523
|
-
batch_size: Maximum draws per chain in each batch. The calculation is run
|
|
3524
|
-
in batches to avoid memory exhaustion. If a memory error occurs, try
|
|
3525
|
-
reducing `batch_size`. The calculation will generally be faster with
|
|
3526
|
-
larger `batch_size` values.
|
|
3527
|
-
|
|
3528
|
-
Returns:
|
|
3529
|
-
An xarray Dataset which contains:
|
|
3530
|
-
* Coordinates: `channel`, `metric` (`mean`, `median`, `ci_lo`, `ci_hi`).
|
|
3531
|
-
* Data variables:
|
|
3532
|
-
* `spend`: The spend for each channel.
|
|
3533
|
-
* `pct_of_spend`: The percentage of spend for each channel.
|
|
3534
|
-
* `incremental_outcome`: The incremental outcome for each channel.
|
|
3535
|
-
* `pct_of_contribution`: The contribution percentage for each channel.
|
|
3536
|
-
* `roi`: The ROI for each channel.
|
|
3537
|
-
* `effectiveness`: The effectiveness for each channel.
|
|
3538
|
-
* `mroi`: The marginal ROI for each channel.
|
|
3539
|
-
* `cpik`: The CPIK for each channel.
|
|
3540
|
-
"""
|
|
3541
|
-
dim_kwargs = {
|
|
3542
|
-
"selected_geos": selected_geos,
|
|
3543
|
-
"selected_times": selected_times,
|
|
3544
|
-
}
|
|
3545
|
-
metric_tensor_kwargs = {
|
|
3546
|
-
"use_posterior": use_posterior,
|
|
3547
|
-
"use_kpi": use_kpi,
|
|
3548
|
-
"batch_size": batch_size,
|
|
3549
|
-
}
|
|
3550
|
-
filled_data = self._validate_and_fill_roi_analysis_arguments(
|
|
3551
|
-
new_data=new_data or DataTensors()
|
|
3552
|
-
)
|
|
3553
|
-
spend = filled_data.total_spend()
|
|
3554
|
-
if spend is not None and spend.ndim == 3:
|
|
3555
|
-
spend = self.filter_and_aggregate_geos_and_times(spend, **dim_kwargs)
|
|
3556
|
-
|
|
3557
|
-
# _counterfactual_metric_dataset() is called only from `optimal_freq()`
|
|
3558
|
-
# and uses only paid channels.
|
|
3559
|
-
incremental_outcome_tensor = self.incremental_outcome(
|
|
3560
|
-
new_data=filled_data,
|
|
3561
|
-
include_non_paid_channels=False,
|
|
3562
|
-
**dim_kwargs,
|
|
3563
|
-
**metric_tensor_kwargs,
|
|
3564
|
-
)
|
|
3565
|
-
# expected_outcome returns a tensor of shape (n_chains, n_draws).
|
|
3566
|
-
mean_expected_outcome = tf.reduce_mean(
|
|
3567
|
-
self.expected_outcome(
|
|
3568
|
-
new_data=filled_data,
|
|
3569
|
-
**dim_kwargs,
|
|
3570
|
-
**metric_tensor_kwargs,
|
|
3571
|
-
),
|
|
3572
|
-
(0, 1),
|
|
3573
|
-
)
|
|
3574
|
-
|
|
3575
|
-
# Calculate the mean, median, and confidence intervals for each metric.
|
|
3576
|
-
incremental_outcome = get_central_tendency_and_ci(
|
|
3577
|
-
data=incremental_outcome_tensor,
|
|
3578
|
-
confidence_level=confidence_level,
|
|
3579
|
-
include_median=True,
|
|
3580
|
-
)
|
|
3581
|
-
pct_of_contribution = get_central_tendency_and_ci(
|
|
3582
|
-
data=incremental_outcome_tensor
|
|
3583
|
-
/ mean_expected_outcome[..., None]
|
|
3584
|
-
* 100,
|
|
3585
|
-
confidence_level=confidence_level,
|
|
3586
|
-
include_median=True,
|
|
3587
|
-
)
|
|
3588
|
-
roi = get_central_tendency_and_ci(
|
|
3589
|
-
data=tf.math.divide_no_nan(incremental_outcome_tensor, spend),
|
|
3590
|
-
confidence_level=confidence_level,
|
|
3591
|
-
include_median=True,
|
|
3592
|
-
)
|
|
3593
|
-
mroi = get_central_tendency_and_ci(
|
|
3594
|
-
data=self.marginal_roi(
|
|
3595
|
-
by_reach=marginal_roi_by_reach,
|
|
3596
|
-
new_data=filled_data,
|
|
3597
|
-
**dim_kwargs,
|
|
3598
|
-
**metric_tensor_kwargs,
|
|
3599
|
-
),
|
|
3600
|
-
confidence_level=confidence_level,
|
|
3601
|
-
include_median=True,
|
|
3602
|
-
)
|
|
3603
|
-
effectiveness = get_central_tendency_and_ci(
|
|
3604
|
-
data=incremental_outcome_tensor
|
|
3605
|
-
/ self.get_aggregated_impressions(
|
|
3606
|
-
**dim_kwargs,
|
|
3607
|
-
optimal_frequency=filled_data.frequency,
|
|
3608
|
-
include_non_paid_channels=False,
|
|
3609
|
-
),
|
|
3610
|
-
confidence_level=confidence_level,
|
|
3611
|
-
include_median=True,
|
|
3612
|
-
)
|
|
3613
|
-
cpik = get_central_tendency_and_ci(
|
|
3614
|
-
data=tf.math.divide_no_nan(spend, incremental_outcome_tensor),
|
|
3615
|
-
confidence_level=confidence_level,
|
|
3616
|
-
include_median=True,
|
|
3617
|
-
)
|
|
3618
|
-
|
|
3619
|
-
budget = np.sum(spend) if np.sum(spend) > 0 else 1
|
|
3620
|
-
dims = [constants.CHANNEL, constants.METRIC]
|
|
3621
|
-
data_vars = {
|
|
3622
|
-
constants.SPEND: ([constants.CHANNEL], spend),
|
|
3623
|
-
constants.PCT_OF_SPEND: ([constants.CHANNEL], spend / budget),
|
|
3624
|
-
constants.INCREMENTAL_OUTCOME: (dims, incremental_outcome),
|
|
3625
|
-
constants.PCT_OF_CONTRIBUTION: (dims, pct_of_contribution),
|
|
3626
|
-
constants.ROI: (dims, roi),
|
|
3627
|
-
constants.MROI: (dims, mroi),
|
|
3628
|
-
constants.EFFECTIVENESS: (dims, effectiveness),
|
|
3629
|
-
constants.CPIK: (dims, cpik),
|
|
3630
|
-
}
|
|
3631
|
-
|
|
3632
|
-
return xr.Dataset(
|
|
3633
|
-
data_vars=data_vars,
|
|
3634
|
-
coords={
|
|
3635
|
-
constants.CHANNEL: (
|
|
3636
|
-
[constants.CHANNEL],
|
|
3637
|
-
self._meridian.input_data.get_all_paid_channels(),
|
|
3638
|
-
),
|
|
3639
|
-
constants.METRIC: (
|
|
3640
|
-
[constants.METRIC],
|
|
3641
|
-
[
|
|
3642
|
-
constants.MEAN,
|
|
3643
|
-
constants.MEDIAN,
|
|
3644
|
-
constants.CI_LO,
|
|
3645
|
-
constants.CI_HI,
|
|
3646
|
-
],
|
|
3647
|
-
),
|
|
3648
|
-
},
|
|
3649
|
-
attrs=attrs,
|
|
3650
|
-
)
|
|
3651
|
-
|
|
3652
3483
|
def optimal_freq(
|
|
3653
3484
|
self,
|
|
3654
3485
|
freq_grid: Sequence[float] | None = None,
|
|
@@ -3696,8 +3527,6 @@ class Analyzer:
|
|
|
3696
3527
|
* `roi`: The ROI for each frequency value in `freq_grid`.
|
|
3697
3528
|
* `optimized_incremental_outcome`: The incremental outcome based on the
|
|
3698
3529
|
optimal frequency.
|
|
3699
|
-
* `optimized_pct_of_contribution`: The contribution percentage based on
|
|
3700
|
-
the optimal frequency.
|
|
3701
3530
|
* `optimized_effectiveness`: The effectiveness based on the optimal
|
|
3702
3531
|
frequency.
|
|
3703
3532
|
* `optimized_roi`: The ROI based on the optimal frequency.
|
|
@@ -3770,8 +3599,7 @@ class Analyzer:
|
|
|
3770
3599
|
)
|
|
3771
3600
|
|
|
3772
3601
|
# Compute the optimized metrics based on the optimal frequency.
|
|
3773
|
-
optimized_metrics_by_reach = self.
|
|
3774
|
-
use_posterior=use_posterior,
|
|
3602
|
+
optimized_metrics_by_reach = self.summary_metrics(
|
|
3775
3603
|
new_data=DataTensors(
|
|
3776
3604
|
reach=optimal_reach, frequency=optimal_frequency_tensor
|
|
3777
3605
|
),
|
|
@@ -3779,9 +3607,11 @@ class Analyzer:
|
|
|
3779
3607
|
selected_geos=selected_geos,
|
|
3780
3608
|
selected_times=selected_times,
|
|
3781
3609
|
use_kpi=use_kpi,
|
|
3782
|
-
).sel({
|
|
3783
|
-
|
|
3784
|
-
|
|
3610
|
+
).sel({
|
|
3611
|
+
constants.CHANNEL: rf_channel_values,
|
|
3612
|
+
constants.DISTRIBUTION: dist_type,
|
|
3613
|
+
})
|
|
3614
|
+
optimized_metrics_by_frequency = self.summary_metrics(
|
|
3785
3615
|
new_data=DataTensors(
|
|
3786
3616
|
reach=optimal_reach, frequency=optimal_frequency_tensor
|
|
3787
3617
|
),
|
|
@@ -3789,7 +3619,10 @@ class Analyzer:
|
|
|
3789
3619
|
selected_geos=selected_geos,
|
|
3790
3620
|
selected_times=selected_times,
|
|
3791
3621
|
use_kpi=use_kpi,
|
|
3792
|
-
).sel({
|
|
3622
|
+
).sel({
|
|
3623
|
+
constants.CHANNEL: rf_channel_values,
|
|
3624
|
+
constants.DISTRIBUTION: dist_type,
|
|
3625
|
+
})
|
|
3793
3626
|
|
|
3794
3627
|
data_vars = {
|
|
3795
3628
|
constants.ROI: (
|
|
@@ -3804,10 +3637,6 @@ class Analyzer:
|
|
|
3804
3637
|
[constants.RF_CHANNEL, constants.METRIC],
|
|
3805
3638
|
optimized_metrics_by_reach.incremental_outcome.data,
|
|
3806
3639
|
),
|
|
3807
|
-
constants.OPTIMIZED_PCT_OF_CONTRIBUTION: (
|
|
3808
|
-
[constants.RF_CHANNEL, constants.METRIC],
|
|
3809
|
-
optimized_metrics_by_reach.pct_of_contribution.data,
|
|
3810
|
-
),
|
|
3811
3640
|
constants.OPTIMIZED_ROI: (
|
|
3812
3641
|
(constants.RF_CHANNEL, constants.METRIC),
|
|
3813
3642
|
optimized_metrics_by_reach.roi.data,
|
meridian/analysis/optimizer.py
CHANGED
|
@@ -45,6 +45,64 @@ alt.data_transformers.disable_max_rows()
|
|
|
45
45
|
_SpendConstraint: TypeAlias = float | Sequence[float]
|
|
46
46
|
|
|
47
47
|
|
|
48
|
+
@dataclasses.dataclass(frozen=True)
|
|
49
|
+
class OptimizationGrid:
|
|
50
|
+
"""Optimization grid information.
|
|
51
|
+
|
|
52
|
+
Attributes:
|
|
53
|
+
spend: ndarray of shape `(n_paid_channels,)` containing the spend allocation
|
|
54
|
+
for spend for all media and RF channels. The order matches
|
|
55
|
+
`InputData.get_all_paid_channels`.
|
|
56
|
+
use_kpi: Whether using generic KPI or revenue.
|
|
57
|
+
use_posterior: Whether posterior distributions were used, or prior.
|
|
58
|
+
use_optimal_frequency: Whether optimal frequency was used.
|
|
59
|
+
round_factor: The round factor used for the optimization grid.
|
|
60
|
+
optimal_frequency: Optional ndarray of shape `(n_paid_channels,)`,
|
|
61
|
+
containing the optimal frequency per channel. Value is `None` if the model
|
|
62
|
+
does not contain reach and frequency data, or if the model does contain
|
|
63
|
+
reach and frequency data, but historical frequency is used for the
|
|
64
|
+
optimization scenario.
|
|
65
|
+
selected_times: The time coordinates from the model used in this grid.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
_grid_dataset: xr.Dataset
|
|
69
|
+
|
|
70
|
+
spend: np.ndarray
|
|
71
|
+
use_kpi: bool
|
|
72
|
+
use_posterior: bool
|
|
73
|
+
use_optimal_frequency: bool
|
|
74
|
+
round_factor: int
|
|
75
|
+
optimal_frequency: np.ndarray | None
|
|
76
|
+
selected_times: list[str] | None
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
def grid_dataset(self) -> xr.Dataset:
|
|
80
|
+
"""Dataset holding the grid information used for optimization.
|
|
81
|
+
|
|
82
|
+
The dataset contains the following:
|
|
83
|
+
|
|
84
|
+
- Coordinates: `grid_spend_index`, `channel`
|
|
85
|
+
- Data variables: `spend_grid`, `incremental_outcome_grid`
|
|
86
|
+
- Attributes: `spend_step_size`
|
|
87
|
+
"""
|
|
88
|
+
return self._grid_dataset
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def spend_grid(self) -> np.ndarray:
|
|
92
|
+
"""The spend grid."""
|
|
93
|
+
return self.grid_dataset.spend_grid
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def incremental_outcome_grid(self) -> np.ndarray:
|
|
97
|
+
"""The incremental outcome grid."""
|
|
98
|
+
return self.grid_dataset.incremental_outcome_grid
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def spend_step_size(self) -> float:
|
|
102
|
+
"""The spend step size."""
|
|
103
|
+
return self.grid_dataset.attrs[c.SPEND_STEP_SIZE]
|
|
104
|
+
|
|
105
|
+
|
|
48
106
|
@dataclasses.dataclass(frozen=True)
|
|
49
107
|
class OptimizationResults:
|
|
50
108
|
"""The optimized budget allocation.
|
|
@@ -69,10 +127,6 @@ class OptimizationResults:
|
|
|
69
127
|
meridian: The fitted Meridian model that was used to create this budget
|
|
70
128
|
allocation.
|
|
71
129
|
analyzer: The analyzer bound to the model above.
|
|
72
|
-
use_posterior: Whether the posterior distribution was used to optimize the
|
|
73
|
-
budget. If `False`, the prior distribution was used.
|
|
74
|
-
use_optimal_frequency: Whether optimal frequency was used to optimize the
|
|
75
|
-
budget.
|
|
76
130
|
spend_ratio: The spend ratio used to scale the non-optimized budget metrics
|
|
77
131
|
to the optimized budget metrics.
|
|
78
132
|
spend_bounds: The spend bounds used to scale the non-optimized budget
|
|
@@ -88,10 +142,6 @@ class OptimizationResults:
|
|
|
88
142
|
meridian: model.Meridian
|
|
89
143
|
# The analyzer bound to the model above.
|
|
90
144
|
analyzer: analyzer.Analyzer
|
|
91
|
-
|
|
92
|
-
# The intermediate values used to derive the optimized budget allocation.
|
|
93
|
-
use_posterior: bool
|
|
94
|
-
use_optimal_frequency: bool
|
|
95
145
|
spend_ratio: np.ndarray # spend / historical spend
|
|
96
146
|
spend_bounds: tuple[np.ndarray, np.ndarray]
|
|
97
147
|
|
|
@@ -99,7 +149,7 @@ class OptimizationResults:
|
|
|
99
149
|
_nonoptimized_data: xr.Dataset
|
|
100
150
|
_nonoptimized_data_with_optimal_freq: xr.Dataset
|
|
101
151
|
_optimized_data: xr.Dataset
|
|
102
|
-
_optimization_grid:
|
|
152
|
+
_optimization_grid: OptimizationGrid
|
|
103
153
|
|
|
104
154
|
# TODO: Move this, and the plotting methods, to a summarizer.
|
|
105
155
|
@functools.cached_property
|
|
@@ -174,15 +224,8 @@ class OptimizationResults:
|
|
|
174
224
|
return self._optimized_data
|
|
175
225
|
|
|
176
226
|
@property
|
|
177
|
-
def optimization_grid(self) ->
|
|
178
|
-
"""
|
|
179
|
-
|
|
180
|
-
The dataset contains the following:
|
|
181
|
-
|
|
182
|
-
- Coordinates: `grid_spend_index`, `channel`
|
|
183
|
-
- Data variables: `spend_grid`, `incremental_outcome_grid`
|
|
184
|
-
- Attributes: `spend_step_size`
|
|
185
|
-
"""
|
|
227
|
+
def optimization_grid(self) -> OptimizationGrid:
|
|
228
|
+
"""The grid information used for optimization."""
|
|
186
229
|
return self._optimization_grid
|
|
187
230
|
|
|
188
231
|
def output_optimization_summary(self, filename: str, filepath: str):
|
|
@@ -539,10 +582,10 @@ class OptimizationResults:
|
|
|
539
582
|
# response curve computation might take a significant amount of time.
|
|
540
583
|
return self.analyzer.response_curves(
|
|
541
584
|
spend_multipliers=spend_multiplier,
|
|
542
|
-
use_posterior=self.use_posterior,
|
|
585
|
+
use_posterior=self.optimization_grid.use_posterior,
|
|
543
586
|
selected_times=selected_times,
|
|
544
587
|
by_reach=True,
|
|
545
|
-
use_optimal_frequency=self.use_optimal_frequency,
|
|
588
|
+
use_optimal_frequency=self.optimization_grid.use_optimal_frequency,
|
|
546
589
|
)
|
|
547
590
|
|
|
548
591
|
def _get_plottable_response_curves_df(
|
|
@@ -674,7 +717,6 @@ class OptimizationResults:
|
|
|
674
717
|
id=summary_text.SCENARIO_PLAN_CARD_ID,
|
|
675
718
|
title=summary_text.SCENARIO_PLAN_CARD_TITLE,
|
|
676
719
|
)
|
|
677
|
-
|
|
678
720
|
scenario_type = (
|
|
679
721
|
summary_text.FIXED_BUDGET_LABEL.lower()
|
|
680
722
|
if self.optimized_data.fixed_budget
|
|
@@ -891,6 +933,14 @@ class BudgetOptimizer:
|
|
|
891
933
|
self._meridian = meridian
|
|
892
934
|
self._analyzer = analyzer.Analyzer(self._meridian)
|
|
893
935
|
|
|
936
|
+
def _validate_model_fit(self, use_posterior: bool):
|
|
937
|
+
"""Validates that the model is fit."""
|
|
938
|
+
dist_type = c.POSTERIOR if use_posterior else c.PRIOR
|
|
939
|
+
if dist_type not in self._meridian.inference_data.groups():
|
|
940
|
+
raise model.NotFittedModelError(
|
|
941
|
+
'Running budget optimization scenarios requires fitting the model.'
|
|
942
|
+
)
|
|
943
|
+
|
|
894
944
|
def optimize(
|
|
895
945
|
self,
|
|
896
946
|
use_posterior: bool = True,
|
|
@@ -980,12 +1030,13 @@ class BudgetOptimizer:
|
|
|
980
1030
|
An `OptimizationResults` object containing optimized budget allocation
|
|
981
1031
|
datasets, along with some of the intermediate values used to derive them.
|
|
982
1032
|
"""
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
1033
|
+
_validate_budget(
|
|
1034
|
+
fixed_budget=fixed_budget,
|
|
1035
|
+
budget=budget,
|
|
1036
|
+
target_roi=target_roi,
|
|
1037
|
+
target_mroi=target_mroi,
|
|
1038
|
+
)
|
|
1039
|
+
|
|
989
1040
|
if selected_times is not None:
|
|
990
1041
|
start_date, end_date = selected_times
|
|
991
1042
|
selected_time_dims = self._meridian.expand_selected_time_dims(
|
|
@@ -994,28 +1045,17 @@ class BudgetOptimizer:
|
|
|
994
1045
|
)
|
|
995
1046
|
else:
|
|
996
1047
|
selected_time_dims = None
|
|
997
|
-
|
|
998
1048
|
hist_spend = self._analyzer.get_historical_spend(
|
|
999
1049
|
selected_time_dims,
|
|
1000
1050
|
include_media=self._meridian.n_media_channels > 0,
|
|
1001
1051
|
include_rf=self._meridian.n_rf_channels > 0,
|
|
1002
1052
|
).data
|
|
1003
1053
|
|
|
1004
|
-
use_historical_budget = budget is None or round(budget) == round(
|
|
1005
|
-
np.sum(hist_spend)
|
|
1006
|
-
)
|
|
1007
1054
|
budget = budget or np.sum(hist_spend)
|
|
1008
1055
|
pct_of_spend = self._validate_pct_of_spend(hist_spend, pct_of_spend)
|
|
1009
1056
|
spend = budget * pct_of_spend
|
|
1010
1057
|
round_factor = _get_round_factor(budget, gtol)
|
|
1011
|
-
step_size = 10 ** (-round_factor)
|
|
1012
1058
|
rounded_spend = np.round(spend, round_factor).astype(int)
|
|
1013
|
-
spend_ratio = np.divide(
|
|
1014
|
-
spend,
|
|
1015
|
-
hist_spend,
|
|
1016
|
-
out=np.zeros_like(hist_spend, dtype=float),
|
|
1017
|
-
where=hist_spend != 0,
|
|
1018
|
-
)
|
|
1019
1059
|
if self._meridian.n_rf_channels > 0 and use_optimal_frequency:
|
|
1020
1060
|
optimal_frequency = tf.convert_to_tensor(
|
|
1021
1061
|
self._analyzer.optimal_freq(
|
|
@@ -1037,34 +1077,30 @@ class BudgetOptimizer:
|
|
|
1037
1077
|
fixed_budget=fixed_budget,
|
|
1038
1078
|
)
|
|
1039
1079
|
)
|
|
1040
|
-
|
|
1080
|
+
optimization_grid = self.create_optimization_grid(
|
|
1041
1081
|
spend=hist_spend,
|
|
1042
1082
|
spend_bound_lower=optimization_lower_bound,
|
|
1043
1083
|
spend_bound_upper=optimization_upper_bound,
|
|
1044
|
-
step_size=step_size,
|
|
1045
1084
|
selected_times=selected_time_dims,
|
|
1085
|
+
round_factor=round_factor,
|
|
1046
1086
|
use_posterior=use_posterior,
|
|
1047
1087
|
use_kpi=use_kpi,
|
|
1088
|
+
use_optimal_frequency=use_optimal_frequency,
|
|
1048
1089
|
optimal_frequency=optimal_frequency,
|
|
1049
1090
|
batch_size=batch_size,
|
|
1050
1091
|
)
|
|
1092
|
+
# TODO: b/375644691) - Move grid search to a OptimizationGrid class.
|
|
1051
1093
|
optimal_spend = self._grid_search(
|
|
1052
|
-
spend_grid=spend_grid,
|
|
1053
|
-
incremental_outcome_grid=incremental_outcome_grid,
|
|
1094
|
+
spend_grid=optimization_grid.spend_grid,
|
|
1095
|
+
incremental_outcome_grid=optimization_grid.incremental_outcome_grid,
|
|
1054
1096
|
budget=np.sum(rounded_spend),
|
|
1055
1097
|
fixed_budget=fixed_budget,
|
|
1056
1098
|
target_mroi=target_mroi,
|
|
1057
1099
|
target_roi=target_roi,
|
|
1058
1100
|
)
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
}
|
|
1063
|
-
if target_roi:
|
|
1064
|
-
constraints[c.TARGET_ROI] = target_roi
|
|
1065
|
-
elif target_mroi:
|
|
1066
|
-
constraints[c.TARGET_MROI] = target_mroi
|
|
1067
|
-
|
|
1101
|
+
use_historical_budget = budget is None or round(budget) == round(
|
|
1102
|
+
np.sum(hist_spend)
|
|
1103
|
+
)
|
|
1068
1104
|
nonoptimized_data = self._create_budget_dataset(
|
|
1069
1105
|
use_posterior=use_posterior,
|
|
1070
1106
|
use_kpi=use_kpi,
|
|
@@ -1086,6 +1122,13 @@ class BudgetOptimizer:
|
|
|
1086
1122
|
batch_size=batch_size,
|
|
1087
1123
|
use_historical_budget=use_historical_budget,
|
|
1088
1124
|
)
|
|
1125
|
+
constraints = {
|
|
1126
|
+
c.FIXED_BUDGET: fixed_budget,
|
|
1127
|
+
}
|
|
1128
|
+
if target_roi:
|
|
1129
|
+
constraints[c.TARGET_ROI] = target_roi
|
|
1130
|
+
elif target_mroi:
|
|
1131
|
+
constraints[c.TARGET_MROI] = target_mroi
|
|
1089
1132
|
optimized_data = self._create_budget_dataset(
|
|
1090
1133
|
use_posterior=use_posterior,
|
|
1091
1134
|
use_kpi=use_kpi,
|
|
@@ -1098,18 +1141,16 @@ class BudgetOptimizer:
|
|
|
1098
1141
|
batch_size=batch_size,
|
|
1099
1142
|
use_historical_budget=use_historical_budget,
|
|
1100
1143
|
)
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1144
|
+
spend_ratio = np.divide(
|
|
1145
|
+
spend,
|
|
1146
|
+
hist_spend,
|
|
1147
|
+
out=np.zeros_like(hist_spend, dtype=float),
|
|
1148
|
+
where=hist_spend != 0,
|
|
1106
1149
|
)
|
|
1107
1150
|
|
|
1108
1151
|
return OptimizationResults(
|
|
1109
1152
|
meridian=self._meridian,
|
|
1110
1153
|
analyzer=self._analyzer,
|
|
1111
|
-
use_posterior=use_posterior,
|
|
1112
|
-
use_optimal_frequency=use_optimal_frequency,
|
|
1113
1154
|
spend_ratio=spend_ratio,
|
|
1114
1155
|
spend_bounds=spend_bounds,
|
|
1115
1156
|
_nonoptimized_data=nonoptimized_data,
|
|
@@ -1118,7 +1159,83 @@ class BudgetOptimizer:
|
|
|
1118
1159
|
_optimization_grid=optimization_grid,
|
|
1119
1160
|
)
|
|
1120
1161
|
|
|
1121
|
-
def
|
|
1162
|
+
def create_optimization_grid(
|
|
1163
|
+
self,
|
|
1164
|
+
spend: np.ndarray,
|
|
1165
|
+
spend_bound_lower: np.ndarray,
|
|
1166
|
+
spend_bound_upper: np.ndarray,
|
|
1167
|
+
selected_times: Sequence[str] | None,
|
|
1168
|
+
round_factor: int,
|
|
1169
|
+
use_posterior: bool = True,
|
|
1170
|
+
use_kpi: bool = False,
|
|
1171
|
+
use_optimal_frequency: bool = True,
|
|
1172
|
+
optimal_frequency: xr.DataArray | None = None,
|
|
1173
|
+
batch_size: int = c.DEFAULT_BATCH_SIZE,
|
|
1174
|
+
) -> OptimizationGrid:
|
|
1175
|
+
"""Creates a OptimizationGrid for optimization.
|
|
1176
|
+
|
|
1177
|
+
Args:
|
|
1178
|
+
spend: ndarray of shape `(n_paid_channels,)` with spend per paid channel.
|
|
1179
|
+
spend_bound_lower: ndarray of dimension `(n_total_channels,)` containing
|
|
1180
|
+
the lower constraint spend for each channel.
|
|
1181
|
+
spend_bound_upper: ndarray of dimension `(n_total_channels,)` containing
|
|
1182
|
+
the upper constraint spend for each channel.
|
|
1183
|
+
selected_times: Sequence of strings representing the time dimensions in
|
|
1184
|
+
`meridian.input_data.time` to use for optimization.
|
|
1185
|
+
round_factor: The round factor used for the optimization grid.
|
|
1186
|
+
use_posterior: Boolean. If `True`, then the incremental outcome is derived
|
|
1187
|
+
from the posterior distribution of the model. Otherwise, the prior
|
|
1188
|
+
distribution is used.
|
|
1189
|
+
use_kpi: Boolean. If `True`, then the incremental outcome is derived from
|
|
1190
|
+
the KPI impact. Otherwise, the incremental outcome is derived from the
|
|
1191
|
+
revenue impact.
|
|
1192
|
+
use_optimal_frequency: Boolean. Whether optimal frequency was used.
|
|
1193
|
+
optimal_frequency: `xr.DataArray` with dimension `n_rf_channels`,
|
|
1194
|
+
containing the optimal frequency per channel, that maximizes mean ROI
|
|
1195
|
+
over the corresponding prior/posterior distribution. Value is `None` if
|
|
1196
|
+
the model does not contain reach and frequency data, or if the model
|
|
1197
|
+
does contain reach and frequency data, but historical frequency is used
|
|
1198
|
+
for the optimization scenario.
|
|
1199
|
+
batch_size: Max draws per chain in each batch. The calculation is run in
|
|
1200
|
+
batches to avoid memory exhaustion. If a memory error occurs, try
|
|
1201
|
+
reducing `batch_size`. The calculation will generally be faster with
|
|
1202
|
+
larger `batch_size` values.
|
|
1203
|
+
|
|
1204
|
+
Returns:
|
|
1205
|
+
An OptimizationGrid object containing the grid data for optimization.
|
|
1206
|
+
"""
|
|
1207
|
+
self._validate_model_fit(use_posterior)
|
|
1208
|
+
|
|
1209
|
+
step_size = 10 ** (-round_factor)
|
|
1210
|
+
(spend_grid, incremental_outcome_grid) = self._create_grids(
|
|
1211
|
+
spend=spend,
|
|
1212
|
+
spend_bound_lower=spend_bound_lower,
|
|
1213
|
+
spend_bound_upper=spend_bound_upper,
|
|
1214
|
+
step_size=step_size,
|
|
1215
|
+
selected_times=selected_times,
|
|
1216
|
+
use_posterior=use_posterior,
|
|
1217
|
+
use_kpi=use_kpi,
|
|
1218
|
+
optimal_frequency=optimal_frequency,
|
|
1219
|
+
batch_size=batch_size,
|
|
1220
|
+
)
|
|
1221
|
+
grid_dataset = self._create_grid_dataset(
|
|
1222
|
+
spend_grid=spend_grid,
|
|
1223
|
+
spend_step_size=step_size,
|
|
1224
|
+
incremental_outcome_grid=incremental_outcome_grid,
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
return OptimizationGrid(
|
|
1228
|
+
_grid_dataset=grid_dataset,
|
|
1229
|
+
spend=spend,
|
|
1230
|
+
use_kpi=use_kpi,
|
|
1231
|
+
use_posterior=use_posterior,
|
|
1232
|
+
use_optimal_frequency=use_optimal_frequency,
|
|
1233
|
+
round_factor=round_factor,
|
|
1234
|
+
optimal_frequency=optimal_frequency,
|
|
1235
|
+
selected_times=selected_times,
|
|
1236
|
+
)
|
|
1237
|
+
|
|
1238
|
+
def _create_grid_dataset(
|
|
1122
1239
|
self,
|
|
1123
1240
|
spend_grid: np.ndarray,
|
|
1124
1241
|
spend_step_size: float,
|
|
@@ -1164,39 +1281,6 @@ class BudgetOptimizer:
|
|
|
1164
1281
|
attrs={c.SPEND_STEP_SIZE: spend_step_size},
|
|
1165
1282
|
)
|
|
1166
1283
|
|
|
1167
|
-
def _validate_budget(
|
|
1168
|
-
self,
|
|
1169
|
-
fixed_budget: bool,
|
|
1170
|
-
budget: float | None,
|
|
1171
|
-
target_roi: float | None,
|
|
1172
|
-
target_mroi: float | None,
|
|
1173
|
-
):
|
|
1174
|
-
"""Validates the budget optimization arguments."""
|
|
1175
|
-
if fixed_budget:
|
|
1176
|
-
if target_roi is not None:
|
|
1177
|
-
raise ValueError(
|
|
1178
|
-
'`target_roi` is only used for flexible budget scenarios.'
|
|
1179
|
-
)
|
|
1180
|
-
if target_mroi is not None:
|
|
1181
|
-
raise ValueError(
|
|
1182
|
-
'`target_mroi` is only used for flexible budget scenarios.'
|
|
1183
|
-
)
|
|
1184
|
-
if budget is not None and budget <= 0:
|
|
1185
|
-
raise ValueError('`budget` must be greater than zero.')
|
|
1186
|
-
else:
|
|
1187
|
-
if budget is not None:
|
|
1188
|
-
raise ValueError('`budget` is only used for fixed budget scenarios.')
|
|
1189
|
-
if target_roi is None and target_mroi is None:
|
|
1190
|
-
raise ValueError(
|
|
1191
|
-
'Must specify either `target_roi` or `target_mroi` for flexible'
|
|
1192
|
-
' budget optimization.'
|
|
1193
|
-
)
|
|
1194
|
-
if target_roi is not None and target_mroi is not None:
|
|
1195
|
-
raise ValueError(
|
|
1196
|
-
'Must specify only one of `target_roi` or `target_mroi` for'
|
|
1197
|
-
'flexible budget optimization.'
|
|
1198
|
-
)
|
|
1199
|
-
|
|
1200
1284
|
def _validate_pct_of_spend(
|
|
1201
1285
|
self, hist_spend: np.ndarray, pct_of_spend: Sequence[float] | None
|
|
1202
1286
|
) -> np.ndarray:
|
|
@@ -1390,27 +1474,6 @@ class BudgetOptimizer:
|
|
|
1390
1474
|
incremental_outcome_with_mean_median_and_ci[:, 0]
|
|
1391
1475
|
)
|
|
1392
1476
|
|
|
1393
|
-
# expected_outcome here is a tensor with the shape (n_chains, n_draws)
|
|
1394
|
-
expected_outcome = self._analyzer.expected_outcome(
|
|
1395
|
-
use_posterior=use_posterior,
|
|
1396
|
-
new_data=analyzer.DataTensors(
|
|
1397
|
-
media=new_media,
|
|
1398
|
-
reach=new_reach,
|
|
1399
|
-
frequency=new_frequency,
|
|
1400
|
-
),
|
|
1401
|
-
selected_times=selected_times,
|
|
1402
|
-
use_kpi=use_kpi,
|
|
1403
|
-
batch_size=batch_size,
|
|
1404
|
-
)
|
|
1405
|
-
mean_expected_outcome = tf.reduce_mean(expected_outcome, (0, 1)) # a scalar
|
|
1406
|
-
|
|
1407
|
-
pct_contrib = incremental_outcome / mean_expected_outcome[..., None] * 100
|
|
1408
|
-
pct_contrib_with_mean_median_and_ci = analyzer.get_central_tendency_and_ci(
|
|
1409
|
-
data=pct_contrib,
|
|
1410
|
-
confidence_level=confidence_level,
|
|
1411
|
-
include_median=True,
|
|
1412
|
-
)
|
|
1413
|
-
|
|
1414
1477
|
aggregated_impressions = self._analyzer.get_aggregated_impressions(
|
|
1415
1478
|
selected_times=selected_times,
|
|
1416
1479
|
selected_geos=None,
|
|
@@ -1471,10 +1534,6 @@ class BudgetOptimizer:
|
|
|
1471
1534
|
[c.CHANNEL, c.METRIC],
|
|
1472
1535
|
incremental_outcome_with_mean_median_and_ci,
|
|
1473
1536
|
),
|
|
1474
|
-
c.PCT_OF_CONTRIBUTION: (
|
|
1475
|
-
[c.CHANNEL, c.METRIC],
|
|
1476
|
-
pct_contrib_with_mean_median_and_ci,
|
|
1477
|
-
),
|
|
1478
1537
|
c.EFFECTIVENESS: (
|
|
1479
1538
|
[c.CHANNEL, c.METRIC],
|
|
1480
1539
|
effectiveness_with_mean_median_and_ci,
|
|
@@ -1714,9 +1773,12 @@ class BudgetOptimizer:
|
|
|
1714
1773
|
)
|
|
1715
1774
|
spend_grid[: len(spend_grid_m), i] = spend_grid_m
|
|
1716
1775
|
incremental_outcome_grid = np.full([n_grid_rows, n_grid_columns], np.nan)
|
|
1717
|
-
|
|
1776
|
+
multipliers_grid_base = tf.cast(
|
|
1718
1777
|
tf.math.divide_no_nan(spend_grid, spend), dtype=tf.float32
|
|
1719
1778
|
)
|
|
1779
|
+
multipliers_grid = np.where(
|
|
1780
|
+
np.isnan(spend_grid), np.nan, multipliers_grid_base
|
|
1781
|
+
)
|
|
1720
1782
|
for i in range(n_grid_rows):
|
|
1721
1783
|
self._update_incremental_outcome_grid(
|
|
1722
1784
|
i=i,
|
|
@@ -1838,6 +1900,39 @@ class BudgetOptimizer:
|
|
|
1838
1900
|
return spend_optimal
|
|
1839
1901
|
|
|
1840
1902
|
|
|
1903
|
+
def _validate_budget(
|
|
1904
|
+
fixed_budget: bool,
|
|
1905
|
+
budget: float | None,
|
|
1906
|
+
target_roi: float | None,
|
|
1907
|
+
target_mroi: float | None,
|
|
1908
|
+
):
|
|
1909
|
+
"""Validates the budget optimization arguments."""
|
|
1910
|
+
if fixed_budget:
|
|
1911
|
+
if target_roi is not None:
|
|
1912
|
+
raise ValueError(
|
|
1913
|
+
'`target_roi` is only used for flexible budget scenarios.'
|
|
1914
|
+
)
|
|
1915
|
+
if target_mroi is not None:
|
|
1916
|
+
raise ValueError(
|
|
1917
|
+
'`target_mroi` is only used for flexible budget scenarios.'
|
|
1918
|
+
)
|
|
1919
|
+
if budget is not None and budget <= 0:
|
|
1920
|
+
raise ValueError('`budget` must be greater than zero.')
|
|
1921
|
+
else:
|
|
1922
|
+
if budget is not None:
|
|
1923
|
+
raise ValueError('`budget` is only used for fixed budget scenarios.')
|
|
1924
|
+
if target_roi is None and target_mroi is None:
|
|
1925
|
+
raise ValueError(
|
|
1926
|
+
'Must specify either `target_roi` or `target_mroi` for flexible'
|
|
1927
|
+
' budget optimization.'
|
|
1928
|
+
)
|
|
1929
|
+
if target_roi is not None and target_mroi is not None:
|
|
1930
|
+
raise ValueError(
|
|
1931
|
+
'Must specify only one of `target_roi` or `target_mroi` for'
|
|
1932
|
+
'flexible budget optimization.'
|
|
1933
|
+
)
|
|
1934
|
+
|
|
1935
|
+
|
|
1841
1936
|
def _get_round_factor(budget: float, gtol: float) -> int:
|
|
1842
1937
|
"""Function for obtaining number of integer digits to round off of budget.
|
|
1843
1938
|
|
|
@@ -1902,6 +1997,11 @@ def _exceeds_optimization_constraints(
|
|
|
1902
1997
|
if fixed_budget:
|
|
1903
1998
|
return np.sum(spend) > budget
|
|
1904
1999
|
elif target_roi is not None:
|
|
1905
|
-
|
|
2000
|
+
cur_total_roi = np.sum(incremental_outcome) / np.sum(spend)
|
|
2001
|
+
# In addition to the total roi being less than the target roi, the roi of
|
|
2002
|
+
# the current optimization step should also be less than the total roi.
|
|
2003
|
+
# Without the second condition, the optimization algorithm may not have
|
|
2004
|
+
# found the roi point close to the target roi yet.
|
|
2005
|
+
return cur_total_roi < target_roi and roi_grid_point < cur_total_roi
|
|
1906
2006
|
else:
|
|
1907
2007
|
return roi_grid_point < target_mroi
|
meridian/model/model.py
CHANGED
|
@@ -537,9 +537,10 @@ class Meridian:
|
|
|
537
537
|
self._validate_injected_inference_data_group_coord(
|
|
538
538
|
inference_data, group, constants.TIME, self.n_times
|
|
539
539
|
)
|
|
540
|
-
self.
|
|
541
|
-
|
|
542
|
-
|
|
540
|
+
if not self.model_spec.unique_sigma_for_each_geo:
|
|
541
|
+
self._validate_injected_inference_data_group_coord(
|
|
542
|
+
inference_data, group, constants.SIGMA_DIM, self._sigma_shape
|
|
543
|
+
)
|
|
543
544
|
self._validate_injected_inference_data_group_coord(
|
|
544
545
|
inference_data,
|
|
545
546
|
group,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|