diff-diff 2.0.4__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1291 @@
1
+ """
2
+ Triple Difference (DDD) estimators.
3
+
4
+ Implements the methodology from Ortiz-Villavicencio & Sant'Anna (2025)
5
+ "Better Understanding Triple Differences Estimators" for causal inference
6
+ when treatment requires satisfying two criteria:
7
+ 1. Belonging to a treated group (e.g., a state with a policy)
8
+ 2. Being in an eligible partition (e.g., women, low-income, etc.)
9
+
10
+ This module provides regression adjustment, inverse probability weighting,
11
+ and doubly robust estimators that correctly handle covariate adjustment,
12
+ unlike naive implementations.
13
+
14
+ Current Implementation (v1.3):
15
+ - 2-period DDD (pre/post binary time indicator)
16
+ - Regression adjustment, IPW, and doubly robust estimation
17
+ - Analytical standard errors with robust/cluster options
18
+ - Proper covariate handling
19
+
20
+ Planned for v1.4 (see ROADMAP.md):
21
+ - Staggered adoption support (multiple treatment timing)
22
+ - Event study aggregation for dynamic treatment effects
23
+ - Multiplier bootstrap inference
24
+ - Integration with plot_event_study() visualization
25
+
26
+ Reference:
27
+ Ortiz-Villavicencio, M., & Sant'Anna, P. H. C. (2025).
28
+ Better Understanding Triple Differences Estimators.
29
+ arXiv:2505.09942.
30
+ """
31
+
32
+ import warnings
33
+ from dataclasses import dataclass, field
34
+ from typing import Any, Dict, List, Optional, Tuple
35
+
36
+ import numpy as np
37
+ import pandas as pd
38
+ from scipy import optimize
39
+
40
+ from diff_diff.linalg import LinearRegression, compute_robust_vcov, solve_ols
41
+ from diff_diff.results import _get_significance_stars
42
+ from diff_diff.utils import (
43
+ compute_confidence_interval,
44
+ compute_p_value,
45
+ )
46
+
47
+ # =============================================================================
48
+ # Results Classes
49
+ # =============================================================================
50
+
51
+
52
+ @dataclass
53
+ class TripleDifferenceResults:
54
+ """
55
+ Results from Triple Difference (DDD) estimation.
56
+
57
+ Provides access to the estimated average treatment effect on the treated
58
+ (ATT), standard errors, confidence intervals, and diagnostic information.
59
+
60
+ Attributes
61
+ ----------
62
+ att : float
63
+ Average Treatment effect on the Treated (ATT).
64
+ This is the effect on units in the treated group (G=1) and eligible
65
+ partition (P=1) after treatment (T=1).
66
+ se : float
67
+ Standard error of the ATT estimate.
68
+ t_stat : float
69
+ T-statistic for the ATT estimate.
70
+ p_value : float
71
+ P-value for the null hypothesis that ATT = 0.
72
+ conf_int : tuple[float, float]
73
+ Confidence interval for the ATT.
74
+ n_obs : int
75
+ Total number of observations used in estimation.
76
+ n_treated_eligible : int
77
+ Number of observations in treated group and eligible partition.
78
+ n_treated_ineligible : int
79
+ Number of observations in treated group and ineligible partition.
80
+ n_control_eligible : int
81
+ Number of observations in control group and eligible partition.
82
+ n_control_ineligible : int
83
+ Number of observations in control group and ineligible partition.
84
+ estimation_method : str
85
+ Estimation method used: "dr" (doubly robust), "reg" (regression
86
+ adjustment), or "ipw" (inverse probability weighting).
87
+ alpha : float
88
+ Significance level used for confidence intervals.
89
+ """
90
+
91
+ att: float
92
+ se: float
93
+ t_stat: float
94
+ p_value: float
95
+ conf_int: Tuple[float, float]
96
+ n_obs: int
97
+ n_treated_eligible: int
98
+ n_treated_ineligible: int
99
+ n_control_eligible: int
100
+ n_control_ineligible: int
101
+ estimation_method: str
102
+ alpha: float = 0.05
103
+ # Group means for diagnostics
104
+ group_means: Optional[Dict[str, float]] = field(default=None)
105
+ # Propensity score diagnostics (for IPW/DR)
106
+ pscore_stats: Optional[Dict[str, float]] = field(default=None)
107
+ # Regression diagnostics
108
+ r_squared: Optional[float] = field(default=None)
109
+ # Covariate balance statistics
110
+ covariate_balance: Optional[pd.DataFrame] = field(default=None, repr=False)
111
+ # Inference details
112
+ inference_method: str = field(default="analytical")
113
+ n_bootstrap: Optional[int] = field(default=None)
114
+ n_clusters: Optional[int] = field(default=None)
115
+
116
+ def __repr__(self) -> str:
117
+ """Concise string representation."""
118
+ return (
119
+ f"TripleDifferenceResults(ATT={self.att:.4f}{self.significance_stars}, "
120
+ f"SE={self.se:.4f}, p={self.p_value:.4f}, method={self.estimation_method})"
121
+ )
122
+
123
+ def summary(self, alpha: Optional[float] = None) -> str:
124
+ """
125
+ Generate a formatted summary of the estimation results.
126
+
127
+ Parameters
128
+ ----------
129
+ alpha : float, optional
130
+ Significance level for confidence intervals. Defaults to the
131
+ alpha used during estimation.
132
+
133
+ Returns
134
+ -------
135
+ str
136
+ Formatted summary table.
137
+ """
138
+ alpha = alpha or self.alpha
139
+ conf_level = int((1 - alpha) * 100)
140
+
141
+ lines = [
142
+ "=" * 75,
143
+ "Triple Difference (DDD) Estimation Results".center(75),
144
+ "=" * 75,
145
+ "",
146
+ f"{'Estimation method:':<30} {self.estimation_method:>15}",
147
+ f"{'Total observations:':<30} {self.n_obs:>15}",
148
+ "",
149
+ "Sample Composition by Cell:",
150
+ f" {'Treated group, Eligible:':<28} {self.n_treated_eligible:>15}",
151
+ f" {'Treated group, Ineligible:':<28} {self.n_treated_ineligible:>15}",
152
+ f" {'Control group, Eligible:':<28} {self.n_control_eligible:>15}",
153
+ f" {'Control group, Ineligible:':<28} {self.n_control_ineligible:>15}",
154
+ ]
155
+
156
+ if self.r_squared is not None:
157
+ lines.append(f"{'R-squared:':<30} {self.r_squared:>15.4f}")
158
+
159
+ if self.inference_method != "analytical":
160
+ lines.append(f"{'Inference method:':<30} {self.inference_method:>15}")
161
+ if self.n_bootstrap is not None:
162
+ lines.append(f"{'Bootstrap replications:':<30} {self.n_bootstrap:>15}")
163
+ if self.n_clusters is not None:
164
+ lines.append(f"{'Number of clusters:':<30} {self.n_clusters:>15}")
165
+
166
+ lines.extend([
167
+ "",
168
+ "-" * 75,
169
+ f"{'Parameter':<15} {'Estimate':>12} {'Std. Err.':>12} {'t-stat':>10} {'P>|t|':>10} {'':>5}",
170
+ "-" * 75,
171
+ f"{'ATT':<15} {self.att:>12.4f} {self.se:>12.4f} {self.t_stat:>10.3f} {self.p_value:>10.4f} {self.significance_stars:>5}",
172
+ "-" * 75,
173
+ "",
174
+ f"{conf_level}% Confidence Interval: [{self.conf_int[0]:.4f}, {self.conf_int[1]:.4f}]",
175
+ ])
176
+
177
+ # Show group means if available
178
+ if self.group_means:
179
+ lines.extend([
180
+ "",
181
+ "-" * 75,
182
+ "Cell Means (Y):",
183
+ "-" * 75,
184
+ ])
185
+ for cell, mean in self.group_means.items():
186
+ lines.append(f" {cell:<35} {mean:>12.4f}")
187
+
188
+ # Show propensity score diagnostics if available
189
+ if self.pscore_stats:
190
+ lines.extend([
191
+ "",
192
+ "-" * 75,
193
+ "Propensity Score Diagnostics:",
194
+ "-" * 75,
195
+ ])
196
+ for stat, value in self.pscore_stats.items():
197
+ lines.append(f" {stat:<35} {value:>12.4f}")
198
+
199
+ lines.extend([
200
+ "",
201
+ "Signif. codes: '***' 0.001, '**' 0.01, '*' 0.05, '.' 0.1",
202
+ "=" * 75,
203
+ ])
204
+
205
+ return "\n".join(lines)
206
+
207
+ def print_summary(self, alpha: Optional[float] = None) -> None:
208
+ """Print the summary to stdout."""
209
+ print(self.summary(alpha))
210
+
211
+ def to_dict(self) -> Dict[str, Any]:
212
+ """
213
+ Convert results to a dictionary.
214
+
215
+ Returns
216
+ -------
217
+ Dict[str, Any]
218
+ Dictionary containing all estimation results.
219
+ """
220
+ result = {
221
+ "att": self.att,
222
+ "se": self.se,
223
+ "t_stat": self.t_stat,
224
+ "p_value": self.p_value,
225
+ "conf_int_lower": self.conf_int[0],
226
+ "conf_int_upper": self.conf_int[1],
227
+ "n_obs": self.n_obs,
228
+ "n_treated_eligible": self.n_treated_eligible,
229
+ "n_treated_ineligible": self.n_treated_ineligible,
230
+ "n_control_eligible": self.n_control_eligible,
231
+ "n_control_ineligible": self.n_control_ineligible,
232
+ "estimation_method": self.estimation_method,
233
+ "inference_method": self.inference_method,
234
+ }
235
+ if self.r_squared is not None:
236
+ result["r_squared"] = self.r_squared
237
+ if self.n_bootstrap is not None:
238
+ result["n_bootstrap"] = self.n_bootstrap
239
+ if self.n_clusters is not None:
240
+ result["n_clusters"] = self.n_clusters
241
+ return result
242
+
243
+ def to_dataframe(self) -> pd.DataFrame:
244
+ """
245
+ Convert results to a pandas DataFrame.
246
+
247
+ Returns
248
+ -------
249
+ pd.DataFrame
250
+ DataFrame with estimation results.
251
+ """
252
+ return pd.DataFrame([self.to_dict()])
253
+
254
+ @property
255
+ def is_significant(self) -> bool:
256
+ """Check if the ATT is statistically significant at the alpha level."""
257
+ return bool(self.p_value < self.alpha)
258
+
259
+ @property
260
+ def significance_stars(self) -> str:
261
+ """Return significance stars based on p-value."""
262
+ return _get_significance_stars(self.p_value)
263
+
264
+
265
+ # =============================================================================
266
+ # Helper Functions
267
+ # =============================================================================
268
+
269
+
270
+ def _logistic_regression(
271
+ X: np.ndarray,
272
+ y: np.ndarray,
273
+ max_iter: int = 100,
274
+ tol: float = 1e-6,
275
+ ) -> Tuple[np.ndarray, np.ndarray]:
276
+ """
277
+ Fit logistic regression using scipy optimize.
278
+
279
+ Parameters
280
+ ----------
281
+ X : np.ndarray
282
+ Feature matrix (n_samples, n_features). Intercept added automatically.
283
+ y : np.ndarray
284
+ Binary outcome (0/1).
285
+ max_iter : int
286
+ Maximum iterations.
287
+ tol : float
288
+ Convergence tolerance.
289
+
290
+ Returns
291
+ -------
292
+ beta : np.ndarray
293
+ Fitted coefficients (including intercept).
294
+ probs : np.ndarray
295
+ Predicted probabilities.
296
+ """
297
+ n, p = X.shape
298
+ X_with_intercept = np.column_stack([np.ones(n), X])
299
+
300
+ def neg_log_likelihood(beta: np.ndarray) -> float:
301
+ z = X_with_intercept @ beta
302
+ z = np.clip(z, -500, 500)
303
+ log_lik = np.sum(y * z - np.log(1 + np.exp(z)))
304
+ return -log_lik
305
+
306
+ def gradient(beta: np.ndarray) -> np.ndarray:
307
+ z = X_with_intercept @ beta
308
+ z = np.clip(z, -500, 500)
309
+ probs = 1 / (1 + np.exp(-z))
310
+ return -X_with_intercept.T @ (y - probs)
311
+
312
+ beta_init = np.zeros(p + 1)
313
+
314
+ result = optimize.minimize(
315
+ neg_log_likelihood,
316
+ beta_init,
317
+ method='BFGS',
318
+ jac=gradient,
319
+ options={'maxiter': max_iter, 'gtol': tol}
320
+ )
321
+
322
+ beta = result.x
323
+ z = X_with_intercept @ beta
324
+ z = np.clip(z, -500, 500)
325
+ probs = 1 / (1 + np.exp(-z))
326
+
327
+ return beta, probs
328
+
329
+
330
+ def _linear_regression(
331
+ X: np.ndarray,
332
+ y: np.ndarray,
333
+ ) -> Tuple[np.ndarray, np.ndarray, float]:
334
+ """
335
+ Fit OLS regression.
336
+
337
+ Parameters
338
+ ----------
339
+ X : np.ndarray
340
+ Feature matrix (n_samples, n_features). Intercept added automatically.
341
+ y : np.ndarray
342
+ Outcome variable.
343
+
344
+ Returns
345
+ -------
346
+ beta : np.ndarray
347
+ Fitted coefficients (including intercept).
348
+ fitted : np.ndarray
349
+ Fitted values.
350
+ r_squared : float
351
+ R-squared of the regression.
352
+ """
353
+ n = X.shape[0]
354
+ X_with_intercept = np.column_stack([np.ones(n), X])
355
+
356
+ # Use unified OLS backend
357
+ beta, residuals, fitted, _ = solve_ols(
358
+ X_with_intercept, y, return_fitted=True, return_vcov=False
359
+ )
360
+
361
+ # Compute R-squared
362
+ ss_res = np.sum(residuals**2)
363
+ ss_tot = np.sum((y - np.mean(y)) ** 2)
364
+ r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0.0
365
+
366
+ return beta, fitted, r_squared
367
+
368
+
369
+ # =============================================================================
370
+ # Main Estimator Class
371
+ # =============================================================================
372
+
373
+
374
+ class TripleDifference:
375
+ """
376
+ Triple Difference (DDD) estimator.
377
+
378
+ Estimates the Average Treatment effect on the Treated (ATT) when treatment
379
+ requires satisfying two criteria: belonging to a treated group AND being
380
+ in an eligible partition of the population.
381
+
382
+ This implementation follows Ortiz-Villavicencio & Sant'Anna (2025), which
383
+ shows that naive DDD implementations (difference of two DiDs, three-way
384
+ fixed effects) are invalid when covariates are needed for identification.
385
+
386
+ Parameters
387
+ ----------
388
+ estimation_method : str, default="dr"
389
+ Estimation method to use:
390
+ - "dr": Doubly robust (recommended). Consistent if either the outcome
391
+ model or propensity score model is correctly specified.
392
+ - "reg": Regression adjustment (outcome regression).
393
+ - "ipw": Inverse probability weighting.
394
+ robust : bool, default=True
395
+ Whether to use heteroskedasticity-robust standard errors (HC1).
396
+ cluster : str, optional
397
+ Column name for cluster-robust standard errors.
398
+ alpha : float, default=0.05
399
+ Significance level for confidence intervals.
400
+ pscore_trim : float, default=0.01
401
+ Trimming threshold for propensity scores. Scores below this value
402
+ or above (1 - pscore_trim) are clipped to avoid extreme weights.
403
+
404
+ Attributes
405
+ ----------
406
+ results_ : TripleDifferenceResults
407
+ Estimation results after calling fit().
408
+ is_fitted_ : bool
409
+ Whether the model has been fitted.
410
+
411
+ Examples
412
+ --------
413
+ Basic usage with a DataFrame:
414
+
415
+ >>> import pandas as pd
416
+ >>> from diff_diff import TripleDifference
417
+ >>>
418
+ >>> # Data where treatment affects women (partition=1) in states
419
+ >>> # that enacted a policy (group=1)
420
+ >>> data = pd.DataFrame({
421
+ ... 'outcome': [...],
422
+ ... 'group': [1, 1, 0, 0, ...], # 1=policy state, 0=control state
423
+ ... 'partition': [1, 0, 1, 0, ...], # 1=women, 0=men
424
+ ... 'post': [0, 0, 1, 1, ...], # 1=post-treatment period
425
+ ... })
426
+ >>>
427
+ >>> # Fit using doubly robust estimation
428
+ >>> ddd = TripleDifference(estimation_method="dr")
429
+ >>> results = ddd.fit(
430
+ ... data,
431
+ ... outcome='outcome',
432
+ ... group='group',
433
+ ... partition='partition',
434
+ ... time='post'
435
+ ... )
436
+ >>> print(results.att) # ATT estimate
437
+
438
+ With covariates (properly handled unlike naive DDD):
439
+
440
+ >>> results = ddd.fit(
441
+ ... data,
442
+ ... outcome='outcome',
443
+ ... group='group',
444
+ ... partition='partition',
445
+ ... time='post',
446
+ ... covariates=['age', 'income']
447
+ ... )
448
+
449
+ Notes
450
+ -----
451
+ The DDD estimator is appropriate when:
452
+
453
+ 1. Treatment affects only units satisfying BOTH criteria:
454
+ - Belonging to a treated group (G=1), e.g., states with a policy
455
+ - Being in an eligible partition (P=1), e.g., women, low-income
456
+
457
+ 2. The DDD parallel trends assumption holds: the differential trend
458
+ between eligible and ineligible partitions would have been the same
459
+ across treated and control groups, absent treatment.
460
+
461
+ This is weaker than requiring separate parallel trends for two DiDs,
462
+ as biases can cancel out in the differencing.
463
+
464
+ References
465
+ ----------
466
+ .. [1] Ortiz-Villavicencio, M., & Sant'Anna, P. H. C. (2025).
467
+ Better Understanding Triple Differences Estimators.
468
+ arXiv:2505.09942.
469
+
470
+ .. [2] Gruber, J. (1994). The incidence of mandated maternity benefits.
471
+ American Economic Review, 84(3), 622-641.
472
+ """
473
+
474
+ def __init__(
475
+ self,
476
+ estimation_method: str = "dr",
477
+ robust: bool = True,
478
+ cluster: Optional[str] = None,
479
+ alpha: float = 0.05,
480
+ pscore_trim: float = 0.01,
481
+ ):
482
+ if estimation_method not in ("dr", "reg", "ipw"):
483
+ raise ValueError(
484
+ f"estimation_method must be 'dr', 'reg', or 'ipw', "
485
+ f"got '{estimation_method}'"
486
+ )
487
+ self.estimation_method = estimation_method
488
+ self.robust = robust
489
+ self.cluster = cluster
490
+ self.alpha = alpha
491
+ self.pscore_trim = pscore_trim
492
+
493
+ self.is_fitted_ = False
494
+ self.results_: Optional[TripleDifferenceResults] = None
495
+
496
+ def fit(
497
+ self,
498
+ data: pd.DataFrame,
499
+ outcome: str,
500
+ group: str,
501
+ partition: str,
502
+ time: str,
503
+ covariates: Optional[List[str]] = None,
504
+ ) -> TripleDifferenceResults:
505
+ """
506
+ Fit the Triple Difference model.
507
+
508
+ Parameters
509
+ ----------
510
+ data : pd.DataFrame
511
+ DataFrame containing all variables.
512
+ outcome : str
513
+ Name of the outcome variable column.
514
+ group : str
515
+ Name of the group indicator column (0/1).
516
+ 1 = treated group (e.g., states that enacted policy).
517
+ 0 = control group.
518
+ partition : str
519
+ Name of the partition/eligibility indicator column (0/1).
520
+ 1 = eligible partition (e.g., women, targeted demographic).
521
+ 0 = ineligible partition.
522
+ time : str
523
+ Name of the time period indicator column (0/1).
524
+ 1 = post-treatment period.
525
+ 0 = pre-treatment period.
526
+ covariates : list of str, optional
527
+ List of covariate column names to adjust for.
528
+ These are properly incorporated using the selected estimation
529
+ method (unlike naive DDD implementations).
530
+
531
+ Returns
532
+ -------
533
+ TripleDifferenceResults
534
+ Object containing estimation results.
535
+
536
+ Raises
537
+ ------
538
+ ValueError
539
+ If required columns are missing or data validation fails.
540
+ """
541
+ # Validate inputs
542
+ self._validate_data(data, outcome, group, partition, time, covariates)
543
+
544
+ # Extract data
545
+ y = data[outcome].values.astype(float)
546
+ G = data[group].values.astype(float)
547
+ P = data[partition].values.astype(float)
548
+ T = data[time].values.astype(float)
549
+
550
+ # Get covariates if specified
551
+ X = None
552
+ if covariates:
553
+ X = data[covariates].values.astype(float)
554
+ if np.any(np.isnan(X)):
555
+ raise ValueError("Covariates contain missing values")
556
+
557
+ # Count observations in each cell
558
+ n_obs = len(y)
559
+ n_treated_eligible = int(np.sum((G == 1) & (P == 1)))
560
+ n_treated_ineligible = int(np.sum((G == 1) & (P == 0)))
561
+ n_control_eligible = int(np.sum((G == 0) & (P == 1)))
562
+ n_control_ineligible = int(np.sum((G == 0) & (P == 0)))
563
+
564
+ # Compute cell means for diagnostics
565
+ group_means = self._compute_cell_means(y, G, P, T)
566
+
567
+ # Estimate ATT based on method
568
+ if self.estimation_method == "reg":
569
+ att, se, r_squared, pscore_stats = self._regression_adjustment(
570
+ y, G, P, T, X
571
+ )
572
+ elif self.estimation_method == "ipw":
573
+ att, se, r_squared, pscore_stats = self._ipw_estimation(
574
+ y, G, P, T, X
575
+ )
576
+ else: # doubly robust
577
+ att, se, r_squared, pscore_stats = self._doubly_robust(
578
+ y, G, P, T, X
579
+ )
580
+
581
+ # Compute inference
582
+ t_stat = att / se if se > 0 else 0.0
583
+ df = n_obs - 8 # Approximate df (8 cell means)
584
+ if covariates:
585
+ df -= len(covariates)
586
+ df = max(df, 1)
587
+
588
+ p_value = compute_p_value(t_stat, df=df)
589
+ conf_int = compute_confidence_interval(att, se, self.alpha, df=df)
590
+
591
+ # Get number of clusters if clustering
592
+ n_clusters = None
593
+ if self.cluster is not None:
594
+ n_clusters = data[self.cluster].nunique()
595
+
596
+ # Create results object
597
+ self.results_ = TripleDifferenceResults(
598
+ att=att,
599
+ se=se,
600
+ t_stat=t_stat,
601
+ p_value=p_value,
602
+ conf_int=conf_int,
603
+ n_obs=n_obs,
604
+ n_treated_eligible=n_treated_eligible,
605
+ n_treated_ineligible=n_treated_ineligible,
606
+ n_control_eligible=n_control_eligible,
607
+ n_control_ineligible=n_control_ineligible,
608
+ estimation_method=self.estimation_method,
609
+ alpha=self.alpha,
610
+ group_means=group_means,
611
+ pscore_stats=pscore_stats,
612
+ r_squared=r_squared,
613
+ inference_method="analytical",
614
+ n_clusters=n_clusters,
615
+ )
616
+
617
+ self.is_fitted_ = True
618
+ return self.results_
619
+
620
+ def _validate_data(
621
+ self,
622
+ data: pd.DataFrame,
623
+ outcome: str,
624
+ group: str,
625
+ partition: str,
626
+ time: str,
627
+ covariates: Optional[List[str]] = None,
628
+ ) -> None:
629
+ """Validate input data."""
630
+ if not isinstance(data, pd.DataFrame):
631
+ raise TypeError("data must be a pandas DataFrame")
632
+
633
+ # Check required columns exist
634
+ required_cols = [outcome, group, partition, time]
635
+ if covariates:
636
+ required_cols.extend(covariates)
637
+
638
+ missing_cols = [col for col in required_cols if col not in data.columns]
639
+ if missing_cols:
640
+ raise ValueError(f"Missing columns in data: {missing_cols}")
641
+
642
+ # Check for missing values in required columns
643
+ for col in [outcome, group, partition, time]:
644
+ if data[col].isna().any():
645
+ raise ValueError(f"Column '{col}' contains missing values")
646
+
647
+ # Validate binary variables
648
+ for col, name in [(group, "group"), (partition, "partition"), (time, "time")]:
649
+ unique_vals = set(data[col].unique())
650
+ if not unique_vals.issubset({0, 1, 0.0, 1.0}):
651
+ raise ValueError(
652
+ f"'{name}' column must be binary (0/1), "
653
+ f"got values: {sorted(unique_vals)}"
654
+ )
655
+ if len(unique_vals) < 2:
656
+ raise ValueError(
657
+ f"'{name}' column must have both 0 and 1 values"
658
+ )
659
+
660
+ # Check we have observations in all cells
661
+ G = data[group].values
662
+ P = data[partition].values
663
+ T = data[time].values
664
+
665
+ cells = [
666
+ ((G == 1) & (P == 1) & (T == 0), "treated, eligible, pre"),
667
+ ((G == 1) & (P == 1) & (T == 1), "treated, eligible, post"),
668
+ ((G == 1) & (P == 0) & (T == 0), "treated, ineligible, pre"),
669
+ ((G == 1) & (P == 0) & (T == 1), "treated, ineligible, post"),
670
+ ((G == 0) & (P == 1) & (T == 0), "control, eligible, pre"),
671
+ ((G == 0) & (P == 1) & (T == 1), "control, eligible, post"),
672
+ ((G == 0) & (P == 0) & (T == 0), "control, ineligible, pre"),
673
+ ((G == 0) & (P == 0) & (T == 1), "control, ineligible, post"),
674
+ ]
675
+
676
+ for mask, cell_name in cells:
677
+ if np.sum(mask) == 0:
678
+ raise ValueError(
679
+ f"No observations in cell: {cell_name}. "
680
+ "DDD requires observations in all 8 cells."
681
+ )
682
+
683
+ def _compute_cell_means(
684
+ self,
685
+ y: np.ndarray,
686
+ G: np.ndarray,
687
+ P: np.ndarray,
688
+ T: np.ndarray,
689
+ ) -> Dict[str, float]:
690
+ """Compute mean outcomes for each of the 8 DDD cells."""
691
+ means = {}
692
+ for g_val, g_name in [(1, "Treated"), (0, "Control")]:
693
+ for p_val, p_name in [(1, "Eligible"), (0, "Ineligible")]:
694
+ for t_val, t_name in [(0, "Pre"), (1, "Post")]:
695
+ mask = (G == g_val) & (P == p_val) & (T == t_val)
696
+ cell_name = f"{g_name}, {p_name}, {t_name}"
697
+ means[cell_name] = float(np.mean(y[mask]))
698
+ return means
699
+
700
+ def _regression_adjustment(
701
+ self,
702
+ y: np.ndarray,
703
+ G: np.ndarray,
704
+ P: np.ndarray,
705
+ T: np.ndarray,
706
+ X: Optional[np.ndarray],
707
+ ) -> Tuple[float, float, Optional[float], Optional[Dict[str, float]]]:
708
+ """
709
+ Estimate ATT using regression adjustment.
710
+
711
+ Fits an outcome regression with full interactions and covariates,
712
+ then computes the DDD estimand.
713
+
714
+ With covariates, this properly conditions on X rather than naively
715
+ differencing two DiD estimates.
716
+ """
717
+ n = len(y)
718
+
719
+ # Build design matrix for DDD regression
720
+ # Full specification: Y = α + β_G*G + β_P*P + β_T*T
721
+ # + β_GP*G*P + β_GT*G*T + β_PT*P*T
722
+ # + β_GPT*G*P*T + γ'X + ε
723
+ # The DDD estimate is β_GPT
724
+
725
+ # Create interactions
726
+ GP = G * P
727
+ GT = G * T
728
+ PT = P * T
729
+ GPT = G * P * T
730
+
731
+ # Build design matrix
732
+ design_cols = [np.ones(n), G, P, T, GP, GT, PT, GPT]
733
+ col_names = ["const", "G", "P", "T", "G*P", "G*T", "P*T", "G*P*T"]
734
+
735
+ if X is not None:
736
+ for i in range(X.shape[1]):
737
+ design_cols.append(X[:, i])
738
+ col_names.append(f"X{i}")
739
+
740
+ design_matrix = np.column_stack(design_cols)
741
+
742
+ # Fit OLS using LinearRegression helper
743
+ reg = LinearRegression(
744
+ include_intercept=False, # Intercept already in design_matrix
745
+ robust=self.robust,
746
+ alpha=self.alpha,
747
+ ).fit(design_matrix, y)
748
+
749
+ # ATT is the coefficient on G*P*T (index 7)
750
+ inference = reg.get_inference(7)
751
+ att = inference.coefficient
752
+ se = inference.se
753
+ r_squared = reg.r_squared()
754
+
755
+ return att, se, r_squared, None
756
+
757
+ def _ipw_estimation(
758
+ self,
759
+ y: np.ndarray,
760
+ G: np.ndarray,
761
+ P: np.ndarray,
762
+ T: np.ndarray,
763
+ X: Optional[np.ndarray],
764
+ ) -> Tuple[float, float, Optional[float], Optional[Dict[str, float]]]:
765
+ """
766
+ Estimate ATT using inverse probability weighting.
767
+
768
+ Estimates propensity scores for cell membership and uses IPW
769
+ to reweight observations for the DDD estimand.
770
+ """
771
+ n = len(y)
772
+
773
+ # For DDD-IPW, we need to estimate probabilities for each cell
774
+ # and use them to construct weighted estimators
775
+
776
+ # Create cell indicators
777
+ # Cell 1: G=1, P=1 (treated, eligible) - "effectively treated"
778
+ # Cell 2: G=1, P=0 (treated, ineligible)
779
+ # Cell 3: G=0, P=1 (control, eligible)
780
+ # Cell 4: G=0, P=0 (control, ineligible)
781
+
782
+ cell_1 = (G == 1) & (P == 1)
783
+ cell_2 = (G == 1) & (P == 0)
784
+ cell_3 = (G == 0) & (P == 1)
785
+ cell_4 = (G == 0) & (P == 0)
786
+
787
+ if X is not None and X.shape[1] > 0:
788
+ # Estimate multinomial propensity scores
789
+ # For simplicity, we estimate binary propensity scores for each cell
790
+ # P(G=1|X) and P(P=1|X,G)
791
+
792
+ # Propensity for being in treated group
793
+ try:
794
+ _, p_G = _logistic_regression(X, G)
795
+ except Exception:
796
+ warnings.warn(
797
+ "Propensity score estimation for G failed. "
798
+ "Using unconditional probabilities.",
799
+ UserWarning,
800
+ stacklevel=3,
801
+ )
802
+ p_G = np.full(n, np.mean(G))
803
+
804
+ # Propensity for being in eligible partition (conditional on X)
805
+ try:
806
+ _, p_P = _logistic_regression(X, P)
807
+ except Exception:
808
+ warnings.warn(
809
+ "Propensity score estimation for P failed. "
810
+ "Using unconditional probabilities.",
811
+ UserWarning,
812
+ stacklevel=3,
813
+ )
814
+ p_P = np.full(n, np.mean(P))
815
+
816
+ # Clip propensity scores
817
+ p_G = np.clip(p_G, self.pscore_trim, 1 - self.pscore_trim)
818
+ p_P = np.clip(p_P, self.pscore_trim, 1 - self.pscore_trim)
819
+
820
+ # Cell probabilities (assuming independence conditional on X)
821
+ p_cell_1 = p_G * p_P # P(G=1, P=1|X)
822
+ p_cell_2 = p_G * (1 - p_P) # P(G=1, P=0|X)
823
+ p_cell_3 = (1 - p_G) * p_P # P(G=0, P=1|X)
824
+ p_cell_4 = (1 - p_G) * (1 - p_P) # P(G=0, P=0|X)
825
+
826
+ pscore_stats = {
827
+ "P(G=1) mean": float(np.mean(p_G)),
828
+ "P(G=1) std": float(np.std(p_G)),
829
+ "P(P=1) mean": float(np.mean(p_P)),
830
+ "P(P=1) std": float(np.std(p_P)),
831
+ }
832
+ else:
833
+ # Unconditional probabilities
834
+ p_cell_1 = np.full(n, np.mean(cell_1))
835
+ p_cell_2 = np.full(n, np.mean(cell_2))
836
+ p_cell_3 = np.full(n, np.mean(cell_3))
837
+ p_cell_4 = np.full(n, np.mean(cell_4))
838
+ pscore_stats = None
839
+
840
+ # Clip cell probabilities
841
+ p_cell_1 = np.clip(p_cell_1, self.pscore_trim, 1 - self.pscore_trim)
842
+ p_cell_2 = np.clip(p_cell_2, self.pscore_trim, 1 - self.pscore_trim)
843
+ p_cell_3 = np.clip(p_cell_3, self.pscore_trim, 1 - self.pscore_trim)
844
+ p_cell_4 = np.clip(p_cell_4, self.pscore_trim, 1 - self.pscore_trim)
845
+
846
+ # IPW estimator for DDD
847
+ # The DDD-IPW estimator reweights each cell to have the same
848
+ # covariate distribution as the effectively treated (G=1, P=1)
849
+
850
+ # Pre-period means
851
+ pre_mask = T == 0
852
+ post_mask = T == 1
853
+
854
+ def weighted_mean(y_vals, weights):
855
+ """Compute weighted mean, handling edge cases."""
856
+ w_sum = np.sum(weights)
857
+ if w_sum <= 0:
858
+ return 0.0
859
+ return np.sum(y_vals * weights) / w_sum
860
+
861
+ # Cell 1 (G=1, P=1): weight = 1 (reference)
862
+ w1_pre = cell_1 & pre_mask
863
+ w1_post = cell_1 & post_mask
864
+ y_11_pre = np.mean(y[w1_pre]) if np.sum(w1_pre) > 0 else 0
865
+ y_11_post = np.mean(y[w1_post]) if np.sum(w1_post) > 0 else 0
866
+
867
+ # Cell 2 (G=1, P=0): reweight to match X-distribution of cell 1
868
+ w2_pre = (cell_2 & pre_mask).astype(float) * (p_cell_1 / p_cell_2)
869
+ w2_post = (cell_2 & post_mask).astype(float) * (p_cell_1 / p_cell_2)
870
+ y_10_pre = weighted_mean(y, w2_pre)
871
+ y_10_post = weighted_mean(y, w2_post)
872
+
873
+ # Cell 3 (G=0, P=1): reweight to match X-distribution of cell 1
874
+ w3_pre = (cell_3 & pre_mask).astype(float) * (p_cell_1 / p_cell_3)
875
+ w3_post = (cell_3 & post_mask).astype(float) * (p_cell_1 / p_cell_3)
876
+ y_01_pre = weighted_mean(y, w3_pre)
877
+ y_01_post = weighted_mean(y, w3_post)
878
+
879
+ # Cell 4 (G=0, P=0): reweight to match X-distribution of cell 1
880
+ w4_pre = (cell_4 & pre_mask).astype(float) * (p_cell_1 / p_cell_4)
881
+ w4_post = (cell_4 & post_mask).astype(float) * (p_cell_1 / p_cell_4)
882
+ y_00_pre = weighted_mean(y, w4_pre)
883
+ y_00_post = weighted_mean(y, w4_post)
884
+
885
+ # DDD estimate
886
+ att = (
887
+ (y_11_post - y_11_pre)
888
+ - (y_10_post - y_10_pre)
889
+ - (y_01_post - y_01_pre)
890
+ + (y_00_post - y_00_pre)
891
+ )
892
+
893
+ # Standard error (approximate, using delta method)
894
+ # For simplicity, use influence function approach
895
+ se = self._compute_ipw_se(
896
+ y, G, P, T, cell_1, cell_2, cell_3, cell_4,
897
+ p_cell_1, p_cell_2, p_cell_3, p_cell_4, att
898
+ )
899
+
900
+ return att, se, None, pscore_stats
901
+
902
+ def _doubly_robust(
903
+ self,
904
+ y: np.ndarray,
905
+ G: np.ndarray,
906
+ P: np.ndarray,
907
+ T: np.ndarray,
908
+ X: Optional[np.ndarray],
909
+ ) -> Tuple[float, float, Optional[float], Optional[Dict[str, float]]]:
910
+ """
911
+ Estimate ATT using doubly robust estimation.
912
+
913
+ Combines outcome regression and IPW for robustness:
914
+ consistent if either the outcome model or propensity score
915
+ model is correctly specified.
916
+ """
917
+ n = len(y)
918
+
919
+ # Cell indicators
920
+ cell_1 = (G == 1) & (P == 1)
921
+ cell_2 = (G == 1) & (P == 0)
922
+ cell_3 = (G == 0) & (P == 1)
923
+ cell_4 = (G == 0) & (P == 0)
924
+
925
+ # Step 1: Outcome regression for each cell-time combination
926
+ # Predict E[Y|X,T] for each cell
927
+ if X is not None and X.shape[1] > 0:
928
+ # Fit outcome models for each cell
929
+ mu_fitted = np.zeros(n)
930
+
931
+ for cell_mask, cell_name in [
932
+ (cell_1, "cell_1"), (cell_2, "cell_2"),
933
+ (cell_3, "cell_3"), (cell_4, "cell_4")
934
+ ]:
935
+ for t_val in [0, 1]:
936
+ mask = cell_mask & (T == t_val)
937
+ if np.sum(mask) > 1:
938
+ X_cell = np.column_stack([X[mask], T[mask]])
939
+ try:
940
+ _, fitted, _ = _linear_regression(X_cell, y[mask])
941
+ mu_fitted[mask] = fitted
942
+ except Exception:
943
+ mu_fitted[mask] = np.mean(y[mask])
944
+ elif np.sum(mask) == 1:
945
+ mu_fitted[mask] = y[mask]
946
+
947
+ # Propensity scores
948
+ try:
949
+ _, p_G = _logistic_regression(X, G)
950
+ except Exception:
951
+ p_G = np.full(n, np.mean(G))
952
+
953
+ try:
954
+ _, p_P = _logistic_regression(X, P)
955
+ except Exception:
956
+ p_P = np.full(n, np.mean(P))
957
+
958
+ p_G = np.clip(p_G, self.pscore_trim, 1 - self.pscore_trim)
959
+ p_P = np.clip(p_P, self.pscore_trim, 1 - self.pscore_trim)
960
+
961
+ p_cell_1 = p_G * p_P
962
+ p_cell_2 = p_G * (1 - p_P)
963
+ p_cell_3 = (1 - p_G) * p_P
964
+ p_cell_4 = (1 - p_G) * (1 - p_P)
965
+
966
+ pscore_stats = {
967
+ "P(G=1) mean": float(np.mean(p_G)),
968
+ "P(G=1) std": float(np.std(p_G)),
969
+ "P(P=1) mean": float(np.mean(p_P)),
970
+ "P(P=1) std": float(np.std(p_P)),
971
+ }
972
+ else:
973
+ # No covariates: use cell means as predictions
974
+ mu_fitted = np.zeros(n)
975
+ for cell_mask in [cell_1, cell_2, cell_3, cell_4]:
976
+ for t_val in [0, 1]:
977
+ mask = cell_mask & (T == t_val)
978
+ if np.sum(mask) > 0:
979
+ mu_fitted[mask] = np.mean(y[mask])
980
+
981
+ # Unconditional probabilities
982
+ p_cell_1 = np.full(n, np.mean(cell_1))
983
+ p_cell_2 = np.full(n, np.mean(cell_2))
984
+ p_cell_3 = np.full(n, np.mean(cell_3))
985
+ p_cell_4 = np.full(n, np.mean(cell_4))
986
+ pscore_stats = None
987
+
988
+ # Clip cell probabilities
989
+ p_cell_1 = np.clip(p_cell_1, self.pscore_trim, 1 - self.pscore_trim)
990
+ p_cell_2 = np.clip(p_cell_2, self.pscore_trim, 1 - self.pscore_trim)
991
+ p_cell_3 = np.clip(p_cell_3, self.pscore_trim, 1 - self.pscore_trim)
992
+ p_cell_4 = np.clip(p_cell_4, self.pscore_trim, 1 - self.pscore_trim)
993
+
994
+ # Step 2: Doubly robust estimator
995
+ # For each cell, compute the augmented IPW term:
996
+ # (Y - mu(X)) * weight + mu(X)
997
+
998
+ pre_mask = T == 0
999
+ post_mask = T == 1
1000
+
1001
+ # Influence function components for each observation
1002
+ n_1 = np.sum(cell_1)
1003
+ p_ref = n_1 / n
1004
+
1005
+ # Cell 1 (G=1, P=1) - effectively treated
1006
+ inf_11 = np.zeros(n)
1007
+ inf_11[cell_1] = (y[cell_1] - mu_fitted[cell_1]) / p_ref
1008
+ # Add outcome model contribution
1009
+ inf_11 += mu_fitted * cell_1.astype(float) / p_ref
1010
+
1011
+ # Cell 2 (G=1, P=0)
1012
+ w_10 = cell_2.astype(float) * (p_cell_1 / p_cell_2)
1013
+ inf_10 = w_10 * (y - mu_fitted) / p_ref
1014
+ # Add outcome model contribution for cell 2 (vectorized)
1015
+ inf_10[cell_2] += mu_fitted[cell_2] * (p_cell_1[cell_2] / p_cell_2[cell_2]) / p_ref
1016
+
1017
+ # Cell 3 (G=0, P=1)
1018
+ w_01 = cell_3.astype(float) * (p_cell_1 / p_cell_3)
1019
+ inf_01 = w_01 * (y - mu_fitted) / p_ref
1020
+ # Add outcome model contribution for cell 3 (vectorized)
1021
+ inf_01[cell_3] += mu_fitted[cell_3] * (p_cell_1[cell_3] / p_cell_3[cell_3]) / p_ref
1022
+
1023
+ # Cell 4 (G=0, P=0)
1024
+ w_00 = cell_4.astype(float) * (p_cell_1 / p_cell_4)
1025
+ inf_00 = w_00 * (y - mu_fitted) / p_ref
1026
+ # Add outcome model contribution for cell 4 (vectorized)
1027
+ inf_00[cell_4] += mu_fitted[cell_4] * (p_cell_1[cell_4] / p_cell_4[cell_4]) / p_ref
1028
+
1029
+ # Compute cell-time means using DR formula
1030
+ def dr_mean(inf_vals, t_mask):
1031
+ return np.mean(inf_vals[t_mask])
1032
+
1033
+ y_11_pre = dr_mean(inf_11, pre_mask)
1034
+ y_11_post = dr_mean(inf_11, post_mask)
1035
+ y_10_pre = dr_mean(inf_10, pre_mask)
1036
+ y_10_post = dr_mean(inf_10, post_mask)
1037
+ y_01_pre = dr_mean(inf_01, pre_mask)
1038
+ y_01_post = dr_mean(inf_01, post_mask)
1039
+ y_00_pre = dr_mean(inf_00, pre_mask)
1040
+ y_00_post = dr_mean(inf_00, post_mask)
1041
+
1042
+ # DDD estimate
1043
+ att = (
1044
+ (y_11_post - y_11_pre)
1045
+ - (y_10_post - y_10_pre)
1046
+ - (y_01_post - y_01_pre)
1047
+ + (y_00_post - y_00_pre)
1048
+ )
1049
+
1050
+ # Standard error computation
1051
+ # Use the simpler variance formula for the DDD estimator
1052
+ # Var(DDD) ≈ sum of variances of cell means / cell_sizes
1053
+
1054
+ # Compute variances within each cell-time combination
1055
+ def cell_var(cell_mask, t_mask, y_vals):
1056
+ mask = cell_mask & t_mask
1057
+ if np.sum(mask) > 1:
1058
+ return np.var(y_vals[mask], ddof=1), np.sum(mask)
1059
+ return 0.0, max(1, np.sum(mask))
1060
+
1061
+ # Variance components for each of the 8 cells
1062
+ var_components = []
1063
+ for cell_mask in [cell_1, cell_2, cell_3, cell_4]:
1064
+ for t_mask in [pre_mask, post_mask]:
1065
+ v, n_cell = cell_var(cell_mask, t_mask, y)
1066
+ if n_cell > 0:
1067
+ var_components.append(v / n_cell)
1068
+
1069
+ # Total variance is sum of components (assuming independence)
1070
+ total_var = sum(var_components)
1071
+ se = np.sqrt(total_var)
1072
+
1073
+ # R-squared from outcome regression
1074
+ if X is not None:
1075
+ ss_res = np.sum((y - mu_fitted) ** 2)
1076
+ ss_tot = np.sum((y - np.mean(y)) ** 2)
1077
+ r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0.0
1078
+ else:
1079
+ r_squared = None
1080
+
1081
+ return att, se, r_squared, pscore_stats
1082
+
1083
+ def _compute_se(
1084
+ self,
1085
+ X: np.ndarray,
1086
+ residuals: np.ndarray,
1087
+ coef_idx: int,
1088
+ ) -> float:
1089
+ """Compute standard error for a coefficient using robust or clustered SE."""
1090
+ n, k = X.shape
1091
+
1092
+ if self.robust:
1093
+ # HC1 robust standard errors
1094
+ vcov = compute_robust_vcov(X, residuals, cluster_ids=None)
1095
+ else:
1096
+ # Classical OLS standard errors
1097
+ mse = np.sum(residuals**2) / (n - k)
1098
+ try:
1099
+ vcov = np.linalg.solve(X.T @ X, mse * np.eye(k))
1100
+ except np.linalg.LinAlgError:
1101
+ vcov = np.linalg.pinv(X.T @ X) * mse
1102
+
1103
+ return float(np.sqrt(vcov[coef_idx, coef_idx]))
1104
+
1105
+ def _compute_ipw_se(
1106
+ self,
1107
+ y: np.ndarray,
1108
+ G: np.ndarray,
1109
+ P: np.ndarray,
1110
+ T: np.ndarray,
1111
+ cell_1: np.ndarray,
1112
+ cell_2: np.ndarray,
1113
+ cell_3: np.ndarray,
1114
+ cell_4: np.ndarray,
1115
+ p_cell_1: np.ndarray,
1116
+ p_cell_2: np.ndarray,
1117
+ p_cell_3: np.ndarray,
1118
+ p_cell_4: np.ndarray,
1119
+ att: float,
1120
+ ) -> float:
1121
+ """Compute standard error for IPW estimator using influence function."""
1122
+ n = len(y)
1123
+ post_mask = T == 1
1124
+
1125
+ # Influence function for IPW estimator (vectorized)
1126
+ inf_func = np.zeros(n)
1127
+
1128
+ n_ref = np.sum(cell_1)
1129
+ p_ref = n_ref / n
1130
+
1131
+ # Sign: +1 for post, -1 for pre
1132
+ sign = np.where(post_mask, 1.0, -1.0)
1133
+
1134
+ # Cell 1 (G=1, P=1): sign * (y - att) / p_ref
1135
+ inf_func[cell_1] = sign[cell_1] * (y[cell_1] - att) / p_ref
1136
+
1137
+ # Cell 2 (G=1, P=0): -sign * y * (p_cell_1 / p_cell_2) / p_ref
1138
+ w_2 = p_cell_1[cell_2] / p_cell_2[cell_2]
1139
+ inf_func[cell_2] = -sign[cell_2] * y[cell_2] * w_2 / p_ref
1140
+
1141
+ # Cell 3 (G=0, P=1): -sign * y * (p_cell_1 / p_cell_3) / p_ref
1142
+ w_3 = p_cell_1[cell_3] / p_cell_3[cell_3]
1143
+ inf_func[cell_3] = -sign[cell_3] * y[cell_3] * w_3 / p_ref
1144
+
1145
+ # Cell 4 (G=0, P=0): sign * y * (p_cell_1 / p_cell_4) / p_ref
1146
+ w_4 = p_cell_1[cell_4] / p_cell_4[cell_4]
1147
+ inf_func[cell_4] = sign[cell_4] * y[cell_4] * w_4 / p_ref
1148
+
1149
+ var_inf = np.var(inf_func, ddof=1)
1150
+ se = np.sqrt(var_inf / n)
1151
+
1152
+ return se
1153
+
1154
+ def get_params(self) -> Dict[str, Any]:
1155
+ """
1156
+ Get estimator parameters (sklearn-compatible).
1157
+
1158
+ Returns
1159
+ -------
1160
+ Dict[str, Any]
1161
+ Estimator parameters.
1162
+ """
1163
+ return {
1164
+ "estimation_method": self.estimation_method,
1165
+ "robust": self.robust,
1166
+ "cluster": self.cluster,
1167
+ "alpha": self.alpha,
1168
+ "pscore_trim": self.pscore_trim,
1169
+ }
1170
+
1171
+ def set_params(self, **params) -> "TripleDifference":
1172
+ """
1173
+ Set estimator parameters (sklearn-compatible).
1174
+
1175
+ Parameters
1176
+ ----------
1177
+ **params
1178
+ Estimator parameters.
1179
+
1180
+ Returns
1181
+ -------
1182
+ self
1183
+ """
1184
+ for key, value in params.items():
1185
+ if hasattr(self, key):
1186
+ setattr(self, key, value)
1187
+ else:
1188
+ raise ValueError(f"Unknown parameter: {key}")
1189
+ return self
1190
+
1191
+ def summary(self) -> str:
1192
+ """
1193
+ Get summary of estimation results.
1194
+
1195
+ Returns
1196
+ -------
1197
+ str
1198
+ Formatted summary.
1199
+ """
1200
+ if not self.is_fitted_:
1201
+ raise RuntimeError("Model must be fitted before calling summary()")
1202
+ assert self.results_ is not None
1203
+ return self.results_.summary()
1204
+
1205
+ def print_summary(self) -> None:
1206
+ """Print summary to stdout."""
1207
+ print(self.summary())
1208
+
1209
+
1210
+ # =============================================================================
1211
+ # Convenience function
1212
+ # =============================================================================
1213
+
1214
+
1215
+ def triple_difference(
1216
+ data: pd.DataFrame,
1217
+ outcome: str,
1218
+ group: str,
1219
+ partition: str,
1220
+ time: str,
1221
+ covariates: Optional[List[str]] = None,
1222
+ estimation_method: str = "dr",
1223
+ robust: bool = True,
1224
+ cluster: Optional[str] = None,
1225
+ alpha: float = 0.05,
1226
+ ) -> TripleDifferenceResults:
1227
+ """
1228
+ Estimate Triple Difference (DDD) treatment effect.
1229
+
1230
+ Convenience function that creates a TripleDifference estimator and
1231
+ fits it to the data in one step.
1232
+
1233
+ Parameters
1234
+ ----------
1235
+ data : pd.DataFrame
1236
+ DataFrame containing all variables.
1237
+ outcome : str
1238
+ Name of the outcome variable column.
1239
+ group : str
1240
+ Name of the group indicator column (0/1).
1241
+ 1 = treated group (e.g., states that enacted policy).
1242
+ partition : str
1243
+ Name of the partition/eligibility indicator column (0/1).
1244
+ 1 = eligible partition (e.g., women, targeted demographic).
1245
+ time : str
1246
+ Name of the time period indicator column (0/1).
1247
+ 1 = post-treatment period.
1248
+ covariates : list of str, optional
1249
+ List of covariate column names to adjust for.
1250
+ estimation_method : str, default="dr"
1251
+ Estimation method: "dr" (doubly robust), "reg" (regression),
1252
+ or "ipw" (inverse probability weighting).
1253
+ robust : bool, default=True
1254
+ Whether to use robust standard errors.
1255
+ cluster : str, optional
1256
+ Column name for cluster-robust standard errors.
1257
+ alpha : float, default=0.05
1258
+ Significance level for confidence intervals.
1259
+
1260
+ Returns
1261
+ -------
1262
+ TripleDifferenceResults
1263
+ Object containing estimation results.
1264
+
1265
+ Examples
1266
+ --------
1267
+ >>> from diff_diff import triple_difference
1268
+ >>> results = triple_difference(
1269
+ ... data,
1270
+ ... outcome='earnings',
1271
+ ... group='policy_state',
1272
+ ... partition='female',
1273
+ ... time='post_policy',
1274
+ ... covariates=['age', 'education']
1275
+ ... )
1276
+ >>> print(f"ATT: {results.att:.3f} (SE: {results.se:.3f})")
1277
+ """
1278
+ estimator = TripleDifference(
1279
+ estimation_method=estimation_method,
1280
+ robust=robust,
1281
+ cluster=cluster,
1282
+ alpha=alpha,
1283
+ )
1284
+ return estimator.fit(
1285
+ data=data,
1286
+ outcome=outcome,
1287
+ group=group,
1288
+ partition=partition,
1289
+ time=time,
1290
+ covariates=covariates,
1291
+ )