diff-diff 2.3.2__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
diff_diff/twfe.py ADDED
@@ -0,0 +1,428 @@
1
+ """
2
+ Two-Way Fixed Effects estimator for panel Difference-in-Differences.
3
+ """
4
+
5
+ import warnings
6
+ from typing import TYPE_CHECKING, List, Optional
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+
11
+ if TYPE_CHECKING:
12
+ from diff_diff.bacon import BaconDecompositionResults
13
+
14
+ from diff_diff.estimators import DifferenceInDifferences
15
+ from diff_diff.linalg import LinearRegression
16
+ from diff_diff.results import DiDResults
17
+ from diff_diff.utils import (
18
+ compute_confidence_interval,
19
+ compute_p_value,
20
+ within_transform as _within_transform_util,
21
+ )
22
+
23
+
24
+ class TwoWayFixedEffects(DifferenceInDifferences):
25
+ """
26
+ Two-Way Fixed Effects (TWFE) estimator for panel DiD.
27
+
28
+ Extends DifferenceInDifferences to handle panel data with unit
29
+ and time fixed effects.
30
+
31
+ Parameters
32
+ ----------
33
+ robust : bool, default=True
34
+ Whether to use heteroskedasticity-robust standard errors.
35
+ cluster : str, optional
36
+ Column name for cluster-robust standard errors.
37
+ If None, automatically clusters at the unit level (the `unit`
38
+ parameter passed to `fit()`). This differs from
39
+ DifferenceInDifferences where cluster=None means no clustering.
40
+ alpha : float, default=0.05
41
+ Significance level for confidence intervals.
42
+
43
+ Notes
44
+ -----
45
+ This estimator uses the regression:
46
+
47
+ Y_it = α_i + γ_t + β*(D_i × Post_t) + X_it'δ + ε_it
48
+
49
+ where α_i are unit fixed effects and γ_t are time fixed effects.
50
+
51
+ Warning: TWFE can be biased with staggered treatment timing
52
+ and heterogeneous treatment effects. Consider using
53
+ more robust estimators (e.g., Callaway-Sant'Anna) for
54
+ staggered designs.
55
+ """
56
+
57
+ def fit( # type: ignore[override]
58
+ self,
59
+ data: pd.DataFrame,
60
+ outcome: str,
61
+ treatment: str,
62
+ time: str,
63
+ unit: str,
64
+ covariates: Optional[List[str]] = None
65
+ ) -> DiDResults:
66
+ """
67
+ Fit Two-Way Fixed Effects model.
68
+
69
+ Parameters
70
+ ----------
71
+ data : pd.DataFrame
72
+ Panel data.
73
+ outcome : str
74
+ Name of outcome variable column.
75
+ treatment : str
76
+ Name of treatment indicator column.
77
+ time : str
78
+ Name of time period column.
79
+ unit : str
80
+ Name of unit identifier column.
81
+ covariates : list, optional
82
+ List of covariate column names.
83
+
84
+ Returns
85
+ -------
86
+ DiDResults
87
+ Estimation results.
88
+ """
89
+ # Validate unit column exists
90
+ if unit not in data.columns:
91
+ raise ValueError(f"Unit column '{unit}' not found in data")
92
+
93
+ # Check for staggered treatment timing and warn if detected
94
+ self._check_staggered_treatment(data, treatment, time, unit)
95
+
96
+ # Warn if time has more than 2 unique values (not a binary post indicator)
97
+ n_unique_time = data[time].nunique()
98
+ if n_unique_time > 2:
99
+ warnings.warn(
100
+ f"The '{time}' column has {n_unique_time} unique values. "
101
+ f"TwoWayFixedEffects expects a binary (0/1) post indicator. "
102
+ f"Multi-period time values produce 'treated * period_number' instead of "
103
+ f"'treated * post_indicator', which may not estimate the standard DiD ATT. "
104
+ f"Consider creating a binary post column: "
105
+ f"df['post'] = (df['{time}'] >= cutoff).astype(int)",
106
+ UserWarning,
107
+ stacklevel=2,
108
+ )
109
+ elif n_unique_time == 2:
110
+ unique_vals = set(data[time].unique())
111
+ if unique_vals != {0, 1} and unique_vals != {False, True}:
112
+ warnings.warn(
113
+ f"The '{time}' column has values {sorted(unique_vals)} instead of {{0, 1}}. "
114
+ f"The ATT estimate is mathematically correct (within-transformation "
115
+ f"absorbs the scaling), but 0/1 encoding is recommended for clarity. "
116
+ f"Consider: df['{time}'] = (df['{time}'] == {max(unique_vals)}).astype(int)",
117
+ UserWarning,
118
+ stacklevel=2,
119
+ )
120
+
121
+ # Use unit-level clustering if not specified (use local variable to avoid mutation)
122
+ cluster_var = self.cluster if self.cluster is not None else unit
123
+
124
+ # Create treatment × post interaction from raw data before demeaning.
125
+ # This must be within-transformed alongside the outcome and covariates
126
+ # so that the regression uses demeaned regressors (FWL theorem).
127
+ data = data.copy()
128
+ data["_treatment_post"] = data[treatment] * data[time]
129
+
130
+ # Demean outcome, covariates, AND interaction in a single pass
131
+ all_vars = [outcome] + (covariates or []) + ["_treatment_post"]
132
+ data_demeaned = _within_transform_util(
133
+ data, all_vars, unit, time, suffix="_demeaned"
134
+ )
135
+
136
+ # Extract variables for regression
137
+ y = data_demeaned[f"{outcome}_demeaned"].values
138
+ X_list = [data_demeaned["_treatment_post_demeaned"].values]
139
+
140
+ if covariates:
141
+ for cov in covariates:
142
+ X_list.append(data_demeaned[f"{cov}_demeaned"].values)
143
+
144
+ X = np.column_stack([np.ones(len(y))] + X_list)
145
+
146
+ # ATT is the coefficient on treatment_post (index 1)
147
+ att_idx = 1
148
+
149
+ # Degrees of freedom adjustment for fixed effects
150
+ n_units = data[unit].nunique()
151
+ n_times = data[time].nunique()
152
+ df_adjustment = n_units + n_times - 2
153
+
154
+ # Always use LinearRegression for initial fit (unified code path)
155
+ # For wild bootstrap, we don't need cluster SEs from the initial fit
156
+ cluster_ids = data[cluster_var].values
157
+
158
+ # Pass rank_deficient_action to LinearRegression
159
+ # If "error", let LinearRegression raise immediately
160
+ # If "warn" or "silent", suppress generic warning and use TWFE's context-specific
161
+ # error/warning messages (more informative for panel data)
162
+ if self.rank_deficient_action == "error":
163
+ reg = LinearRegression(
164
+ include_intercept=False,
165
+ robust=True,
166
+ cluster_ids=cluster_ids if self.inference != "wild_bootstrap" else None,
167
+ alpha=self.alpha,
168
+ rank_deficient_action="error",
169
+ ).fit(X, y, df_adjustment=df_adjustment)
170
+ else:
171
+ # Suppress generic warning, TWFE provides context-specific messages below
172
+ with warnings.catch_warnings():
173
+ warnings.filterwarnings("ignore", message="Rank-deficient design matrix")
174
+ reg = LinearRegression(
175
+ include_intercept=False,
176
+ robust=True,
177
+ cluster_ids=cluster_ids if self.inference != "wild_bootstrap" else None,
178
+ alpha=self.alpha,
179
+ rank_deficient_action="silent",
180
+ ).fit(X, y, df_adjustment=df_adjustment)
181
+
182
+ coefficients = reg.coefficients_
183
+ residuals = reg.residuals_
184
+ fitted = reg.fitted_values_
185
+ r_squared = reg.r_squared()
186
+ att = coefficients[att_idx]
187
+
188
+ # Check for unidentified coefficients (collinearity)
189
+ # Build column names for informative error messages
190
+ column_names = ["intercept", "treatment×post"]
191
+ if covariates:
192
+ column_names.extend(covariates)
193
+
194
+ nan_mask = np.isnan(coefficients)
195
+ if np.any(nan_mask):
196
+ dropped_indices = np.where(nan_mask)[0]
197
+ dropped_names = [column_names[i] if i < len(column_names)
198
+ else f"column {i}" for i in dropped_indices]
199
+
200
+ # Determine the source of collinearity for better error message
201
+ if att_idx in dropped_indices:
202
+ # Treatment coefficient is unidentified
203
+ raise ValueError(
204
+ f"Treatment effect cannot be identified due to collinearity. "
205
+ f"Dropped columns: {', '.join(dropped_names)}. "
206
+ "This can happen when: (1) treatment is perfectly collinear with "
207
+ "unit/time fixed effects, (2) all treated units are treated in all "
208
+ "periods, or (3) a covariate is collinear with the treatment indicator. "
209
+ "Check your data structure and model specification."
210
+ )
211
+ else:
212
+ # Only covariates are dropped - this is a warning, not an error
213
+ # The ATT can still be estimated
214
+ # Respect rank_deficient_action setting for warning
215
+ if self.rank_deficient_action == "warn":
216
+ warnings.warn(
217
+ f"Some covariates are collinear and were dropped: "
218
+ f"{', '.join(dropped_names)}. The treatment effect is still identified.",
219
+ UserWarning,
220
+ stacklevel=2,
221
+ )
222
+
223
+ # Get inference - either from bootstrap or analytical
224
+ if self.inference == "wild_bootstrap":
225
+ # Override with wild cluster bootstrap inference
226
+ se, p_value, conf_int, t_stat, vcov, _ = self._run_wild_bootstrap_inference(
227
+ X, y, residuals, cluster_ids, att_idx
228
+ )
229
+ else:
230
+ # Use analytical inference from LinearRegression
231
+ vcov = reg.vcov_
232
+ inference = reg.get_inference(att_idx)
233
+ se = inference.se
234
+ t_stat = inference.t_stat
235
+ p_value = inference.p_value
236
+ conf_int = inference.conf_int
237
+
238
+ # Count observations
239
+ treated_units = data[data[treatment] == 1][unit].unique()
240
+ n_treated = len(treated_units)
241
+ n_control = n_units - n_treated
242
+
243
+ # Determine inference method and bootstrap info
244
+ inference_method = "analytical"
245
+ n_bootstrap_used = None
246
+ n_clusters_used = None
247
+ if self._bootstrap_results is not None:
248
+ inference_method = "wild_bootstrap"
249
+ n_bootstrap_used = self._bootstrap_results.n_bootstrap
250
+ n_clusters_used = self._bootstrap_results.n_clusters
251
+
252
+ self.results_ = DiDResults(
253
+ att=att,
254
+ se=se,
255
+ t_stat=t_stat,
256
+ p_value=p_value,
257
+ conf_int=conf_int,
258
+ n_obs=len(y),
259
+ n_treated=n_treated,
260
+ n_control=n_control,
261
+ alpha=self.alpha,
262
+ coefficients={"ATT": float(att)},
263
+ vcov=vcov,
264
+ residuals=residuals,
265
+ fitted_values=fitted,
266
+ r_squared=r_squared,
267
+ inference_method=inference_method,
268
+ n_bootstrap=n_bootstrap_used,
269
+ n_clusters=n_clusters_used,
270
+ )
271
+
272
+ self.is_fitted_ = True
273
+ return self.results_
274
+
275
+ def _within_transform(
276
+ self,
277
+ data: pd.DataFrame,
278
+ outcome: str,
279
+ unit: str,
280
+ time: str,
281
+ covariates: Optional[List[str]] = None
282
+ ) -> pd.DataFrame:
283
+ """
284
+ Apply within transformation to remove unit and time fixed effects.
285
+
286
+ This implements the standard two-way within transformation:
287
+ y_it - y_i. - y_.t + y_..
288
+
289
+ Parameters
290
+ ----------
291
+ data : pd.DataFrame
292
+ Panel data.
293
+ outcome : str
294
+ Outcome variable name.
295
+ unit : str
296
+ Unit identifier column.
297
+ time : str
298
+ Time period column.
299
+ covariates : list, optional
300
+ Covariate column names.
301
+
302
+ Returns
303
+ -------
304
+ pd.DataFrame
305
+ Data with demeaned variables.
306
+ """
307
+ variables = [outcome] + (covariates or [])
308
+ return _within_transform_util(data, variables, unit, time, suffix="_demeaned")
309
+
310
+ def _check_staggered_treatment(
311
+ self,
312
+ data: pd.DataFrame,
313
+ treatment: str,
314
+ time: str,
315
+ unit: str,
316
+ ) -> None:
317
+ """
318
+ Check for staggered treatment timing and warn if detected.
319
+
320
+ Identifies if different units start treatment at different times,
321
+ which can bias TWFE estimates when treatment effects are heterogeneous.
322
+
323
+ Note: This check requires ``time`` to have actual period values (not
324
+ binary 0/1). With binary time, all treated units appear to start at
325
+ time=1, so staggering is undetectable.
326
+ """
327
+ # Find first treatment time for each unit
328
+ treated_obs = data[data[treatment] == 1]
329
+ if len(treated_obs) == 0:
330
+ return # No treated observations
331
+
332
+ # Get first treatment time per unit
333
+ first_treat_times = treated_obs.groupby(unit)[time].min()
334
+ unique_treat_times = first_treat_times.unique()
335
+
336
+ if len(unique_treat_times) > 1:
337
+ n_groups = len(unique_treat_times)
338
+ warnings.warn(
339
+ f"Staggered treatment timing detected: {n_groups} treatment cohorts "
340
+ f"start treatment at different times. TWFE can be biased when treatment "
341
+ f"effects are heterogeneous across time. Consider using:\n"
342
+ f" - CallawaySantAnna estimator for robust estimates\n"
343
+ f" - TwoWayFixedEffects.decompose() to diagnose the decomposition\n"
344
+ f" - bacon_decompose() to see weight on 'forbidden' comparisons",
345
+ UserWarning,
346
+ stacklevel=3,
347
+ )
348
+
349
+ def decompose(
350
+ self,
351
+ data: pd.DataFrame,
352
+ outcome: str,
353
+ unit: str,
354
+ time: str,
355
+ first_treat: str,
356
+ weights: str = "approximate",
357
+ ) -> "BaconDecompositionResults":
358
+ """
359
+ Perform Goodman-Bacon decomposition of TWFE estimate.
360
+
361
+ Decomposes the TWFE estimate into a weighted average of all possible
362
+ 2x2 DiD comparisons, revealing which comparisons drive the estimate
363
+ and whether problematic "forbidden comparisons" are involved.
364
+
365
+ Parameters
366
+ ----------
367
+ data : pd.DataFrame
368
+ Panel data with unit and time identifiers.
369
+ outcome : str
370
+ Name of outcome variable column.
371
+ unit : str
372
+ Name of unit identifier column.
373
+ time : str
374
+ Name of time period column.
375
+ first_treat : str
376
+ Name of column indicating when each unit was first treated.
377
+ Use 0 (or np.inf) for never-treated units.
378
+ weights : str, default="approximate"
379
+ Weight calculation method:
380
+ - "approximate": Fast simplified formula (default). Good for
381
+ diagnostic purposes where relative weights are sufficient.
382
+ - "exact": Variance-based weights from Goodman-Bacon (2021)
383
+ Theorem 1. Use for publication-quality decompositions.
384
+
385
+ Returns
386
+ -------
387
+ BaconDecompositionResults
388
+ Decomposition results showing:
389
+ - TWFE estimate and its weighted-average breakdown
390
+ - List of all 2x2 comparisons with estimates and weights
391
+ - Total weight by comparison type (clean vs forbidden)
392
+
393
+ Examples
394
+ --------
395
+ >>> twfe = TwoWayFixedEffects()
396
+ >>> decomp = twfe.decompose(
397
+ ... data, outcome='y', unit='id', time='t', first_treat='treat_year'
398
+ ... )
399
+ >>> decomp.print_summary()
400
+ >>> # Check weight on forbidden comparisons
401
+ >>> if decomp.total_weight_later_vs_earlier > 0.2:
402
+ ... print("Warning: significant forbidden comparison weight")
403
+
404
+ Notes
405
+ -----
406
+ This decomposition is essential for understanding potential TWFE bias
407
+ in staggered adoption designs. The three comparison types are:
408
+
409
+ 1. **Treated vs Never-treated**: Clean comparisons using never-treated
410
+ units as controls. These are always valid.
411
+
412
+ 2. **Earlier vs Later treated**: Uses later-treated units as controls
413
+ before they receive treatment. These are valid.
414
+
415
+ 3. **Later vs Earlier treated**: Uses already-treated units as controls.
416
+ These "forbidden comparisons" can introduce bias when treatment
417
+ effects are dynamic (changing over time since treatment).
418
+
419
+ See Also
420
+ --------
421
+ bacon_decompose : Standalone decomposition function
422
+ BaconDecomposition : Class-based decomposition interface
423
+ CallawaySantAnna : Robust estimator that avoids forbidden comparisons
424
+ """
425
+ from diff_diff.bacon import BaconDecomposition
426
+
427
+ decomp = BaconDecomposition(weights=weights)
428
+ return decomp.fit(data, outcome, unit, time, first_treat)