taxcalc 4.4.0__py3-none-any.whl → 4.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. taxcalc/__init__.py +1 -1
  2. taxcalc/calcfunctions.py +326 -171
  3. taxcalc/calculator.py +35 -34
  4. taxcalc/cli/tc.py +6 -7
  5. taxcalc/consumption.py +9 -4
  6. taxcalc/data.py +8 -8
  7. taxcalc/decorators.py +3 -3
  8. taxcalc/growdiff.py +5 -0
  9. taxcalc/growfactors.py +1 -1
  10. taxcalc/parameters.py +85 -42
  11. taxcalc/policy.py +1 -1
  12. taxcalc/records.py +1 -0
  13. taxcalc/records_variables.json +6 -0
  14. taxcalc/taxcalcio.py +49 -44
  15. taxcalc/tests/cmpi_cps_expect.txt +6 -6
  16. taxcalc/tests/cmpi_puf_expect.txt +6 -6
  17. taxcalc/tests/conftest.py +42 -41
  18. taxcalc/tests/test_4package.py +9 -7
  19. taxcalc/tests/test_benefits.py +9 -8
  20. taxcalc/tests/test_calcfunctions.py +55 -38
  21. taxcalc/tests/test_calculator.py +11 -6
  22. taxcalc/tests/test_compare.py +44 -50
  23. taxcalc/tests/test_compatible_data.py +9 -7
  24. taxcalc/tests/test_consumption.py +38 -18
  25. taxcalc/tests/test_cpscsv.py +33 -31
  26. taxcalc/tests/test_data.py +31 -24
  27. taxcalc/tests/test_decorators.py +84 -32
  28. taxcalc/tests/test_growdiff.py +16 -13
  29. taxcalc/tests/test_growfactors.py +8 -8
  30. taxcalc/tests/test_parameters.py +54 -58
  31. taxcalc/tests/test_policy.py +14 -12
  32. taxcalc/tests/test_puf_var_stats.py +14 -14
  33. taxcalc/tests/test_pufcsv.py +40 -40
  34. taxcalc/tests/test_records.py +73 -60
  35. taxcalc/tests/test_reforms.py +34 -31
  36. taxcalc/tests/test_responses.py +4 -4
  37. taxcalc/tests/test_taxcalcio.py +76 -62
  38. taxcalc/tests/test_utils.py +78 -46
  39. taxcalc/utils.py +49 -42
  40. taxcalc/validation/taxsim35/taxsim_emulation.json +1 -5
  41. {taxcalc-4.4.0.dist-info → taxcalc-4.4.1.dist-info}/METADATA +19 -5
  42. {taxcalc-4.4.0.dist-info → taxcalc-4.4.1.dist-info}/RECORD +46 -46
  43. {taxcalc-4.4.0.dist-info → taxcalc-4.4.1.dist-info}/WHEEL +1 -1
  44. {taxcalc-4.4.0.dist-info → taxcalc-4.4.1.dist-info}/LICENSE +0 -0
  45. {taxcalc-4.4.0.dist-info → taxcalc-4.4.1.dist-info}/entry_points.txt +0 -0
  46. {taxcalc-4.4.0.dist-info → taxcalc-4.4.1.dist-info}/top_level.txt +0 -0
@@ -4,8 +4,6 @@ Tests of Tax-Calculator utility functions.
4
4
  # CODING-STYLE CHECKS:
5
5
  # pycodestyle test_utils.py
6
6
  # pylint --disable=locally-disabled test_utils.py
7
- #
8
- # pylint: disable=missing-docstring
9
7
 
10
8
  import os
11
9
  import math
@@ -13,25 +11,29 @@ import random
13
11
  import numpy as np
14
12
  import pandas as pd
15
13
  import pytest
16
- # pylint: disable=import-error
17
- from taxcalc import Policy, Records, Calculator
18
- from taxcalc.utils import (DIST_VARIABLES,
19
- DIST_TABLE_COLUMNS, DIST_TABLE_LABELS,
20
- DIFF_VARIABLES,
21
- DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS,
22
- SOI_AGI_BINS,
23
- create_difference_table,
24
- weighted_sum, weighted_mean,
25
- wage_weighted, agi_weighted,
26
- expanded_income_weighted,
27
- add_income_table_row_variable,
28
- add_quantile_table_row_variable,
29
- mtr_graph_data, atr_graph_data,
30
- xtr_graph_plot, write_graph_file,
31
- read_egg_csv, read_egg_json, delete_file,
32
- bootstrap_se_ci,
33
- certainty_equivalent,
34
- ce_aftertax_expanded_income)
14
+ from taxcalc.policy import Policy
15
+ from taxcalc.records import Records
16
+ from taxcalc.calculator import Calculator
17
+ from taxcalc.utils import (
18
+ DIST_VARIABLES,
19
+ DIST_TABLE_COLUMNS, DIST_TABLE_LABELS,
20
+ DIFF_VARIABLES,
21
+ DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS,
22
+ SOI_AGI_BINS,
23
+ create_difference_table,
24
+ weighted_sum, weighted_mean,
25
+ wage_weighted, agi_weighted,
26
+ expanded_income_weighted,
27
+ add_income_table_row_variable,
28
+ add_quantile_table_row_variable,
29
+ mtr_graph_data, atr_graph_data,
30
+ xtr_graph_plot, write_graph_file,
31
+ read_egg_csv, read_egg_json, delete_file,
32
+ bootstrap_se_ci,
33
+ certainty_equivalent,
34
+ ce_aftertax_expanded_income,
35
+ json_to_dict
36
+ )
35
37
 
36
38
 
37
39
  DATA = [[1.0, 2, 'a'],
@@ -56,6 +58,7 @@ DATA_FLOAT = [[1.0, 2, 'a'],
56
58
 
57
59
 
58
60
  def test_validity_of_name_lists():
61
+ """Test docstring"""
59
62
  assert len(DIST_TABLE_COLUMNS) == len(DIST_TABLE_LABELS)
60
63
  records_varinfo = Records(data=None)
61
64
  assert set(DIST_VARIABLES).issubset(records_varinfo.CALCULATED_VARS |
@@ -68,7 +71,9 @@ def test_validity_of_name_lists():
68
71
 
69
72
 
70
73
  def test_create_tables(cps_subsample):
74
+ """Test docstring"""
71
75
  # pylint: disable=too-many-statements,too-many-branches
76
+
72
77
  # create a current-law Policy object and Calculator object calc1
73
78
  rec = Records.cps_constructor(data=cps_subsample)
74
79
  pol = Policy()
@@ -108,7 +113,7 @@ def test_create_tables(cps_subsample):
108
113
  test_failure = True
109
114
  print('diff xbin', tabcol)
110
115
  for val in diff[tabcol].values:
111
- print('{:.1f},'.format(val))
116
+ print(f'{val:.1f},')
112
117
 
113
118
  diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
114
119
  calc2.dataframe(DIFF_VARIABLES),
@@ -136,7 +141,7 @@ def test_create_tables(cps_subsample):
136
141
  test_failure = True
137
142
  print('diff xdec', tabcol)
138
143
  for val in diff[tabcol].values:
139
- print('{:.1f},'.format(val))
144
+ print(f'{val:.1f},')
140
145
 
141
146
  tabcol = 'share_of_change'
142
147
  expected = [0.0,
@@ -154,13 +159,13 @@ def test_create_tables(cps_subsample):
154
159
  100.0,
155
160
  13.2,
156
161
  8.3,
157
- 1.4,]
162
+ 1.4]
158
163
  if not np.allclose(diff[tabcol].values.astype('float'), expected,
159
164
  atol=0.1, rtol=0.0):
160
165
  test_failure = True
161
166
  print('diff xdec', tabcol)
162
167
  for val in diff[tabcol].values:
163
- print('{:.1f},'.format(val))
168
+ print(f'{val:.1f},')
164
169
 
165
170
  tabcol = 'pc_aftertaxinc'
166
171
  expected = [np.nan,
@@ -184,7 +189,7 @@ def test_create_tables(cps_subsample):
184
189
  test_failure = True
185
190
  print('diff xdec', tabcol)
186
191
  for val in diff[tabcol].values:
187
- print('{:.1f},'.format(val))
192
+ print(f'{val:.1f},')
188
193
 
189
194
  # test creating various distribution tables
190
195
 
@@ -212,7 +217,7 @@ def test_create_tables(cps_subsample):
212
217
  test_failure = True
213
218
  print('dist xdec', tabcol)
214
219
  for val in dist[tabcol].values:
215
- print('{:.1f},'.format(val))
220
+ print(f'{val:.1f},')
216
221
 
217
222
  tabcol = 'count_ItemDed'
218
223
  expected = [0.0,
@@ -236,7 +241,7 @@ def test_create_tables(cps_subsample):
236
241
  test_failure = True
237
242
  print('dist xdec', tabcol)
238
243
  for val in dist[tabcol].values:
239
- print('{:.1f},'.format(val))
244
+ print(f'{val:.1f},')
240
245
 
241
246
  tabcol = 'expanded_income'
242
247
  expected = [0.0,
@@ -260,7 +265,7 @@ def test_create_tables(cps_subsample):
260
265
  test_failure = True
261
266
  print('dist xdec', tabcol)
262
267
  for val in dist[tabcol].values:
263
- print('{:.1f},'.format(val))
268
+ print(f'{val:.1f},')
264
269
 
265
270
  tabcol = 'aftertax_income'
266
271
  expected = [0.0,
@@ -284,7 +289,7 @@ def test_create_tables(cps_subsample):
284
289
  test_failure = True
285
290
  print('dist xdec', tabcol)
286
291
  for val in dist[tabcol].values:
287
- print('{:.1f},'.format(val))
292
+ print(f'{val:.1f},')
288
293
 
289
294
  dist, _ = calc2.distribution_tables(None, 'standard_income_bins')
290
295
  assert isinstance(dist, pd.DataFrame)
@@ -308,7 +313,7 @@ def test_create_tables(cps_subsample):
308
313
  test_failure = True
309
314
  print('dist xbin', tabcol)
310
315
  for val in dist[tabcol].values:
311
- print('{:.1f},'.format(val))
316
+ print(f'{val:.1f},')
312
317
 
313
318
  tabcol = 'count_ItemDed'
314
319
  expected = [0.0,
@@ -330,10 +335,10 @@ def test_create_tables(cps_subsample):
330
335
  test_failure = True
331
336
  print('dist xbin', tabcol)
332
337
  for val in dist[tabcol].values:
333
- print('{:.1f},'.format(val))
338
+ print(f'{val:.1f},')
334
339
 
335
340
  if test_failure:
336
- assert 1 == 2
341
+ assert False, 'ERROR: test failure'
337
342
 
338
343
 
339
344
  def test_diff_count_precision():
@@ -409,11 +414,11 @@ def test_diff_count_precision():
409
414
  cilo = bsd['cilo'] * 1e-3
410
415
  cihi = bsd['cihi'] * 1e-3
411
416
  if dump:
412
- res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
413
- print(
414
- res.format('STANDARD-BIN10: ',
415
- data_estimate, bs_samples, alpha, stderr, cilo, cihi)
417
+ res = (
418
+ f'EST={data_estimate:.1f} B={bs_samples} alpha={alpha:.3f} '
419
+ f'se={stderr:.2f} ci=[ {cilo:.2f} , {cihi:.2f} ]'
416
420
  )
421
+ print(f'STANDARD-BIN10: {res}')
417
422
  assert abs((stderr / 1.90) - 1) < 0.0008
418
423
  # NOTE: a se of 1.90 thousand implies that when comparing the difference
419
424
  # in the weighted number of filing units in STANDARD bin 10 with a
@@ -442,11 +447,11 @@ def test_diff_count_precision():
442
447
  cilo = bsd['cilo'] * 1e-3
443
448
  cihi = bsd['cihi'] * 1e-3
444
449
  if dump:
445
- res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
446
- print(
447
- res.format('STANDARD-BIN11: ',
448
- data_estimate, bs_samples, alpha, stderr, cilo, cihi)
450
+ res = (
451
+ f'EST={data_estimate:.1f} B={bs_samples} alpha={alpha:.3f} '
452
+ f'se={stderr:.2f} ci=[ {cilo:.2f} , {cihi:.2f} ]'
449
453
  )
454
+ print(f'STANDARD-BIN11: {res}')
450
455
  assert abs((stderr / 0.85) - 1) < 0.0040
451
456
  # NOTE: a se of 0.85 thousand implies that when comparing the difference
452
457
  # in the weighted number of filing units in STANDARD bin 11 with a
@@ -468,6 +473,7 @@ def test_diff_count_precision():
468
473
 
469
474
 
470
475
  def test_weighted_mean():
476
+ """Test docstring"""
471
477
  dfx = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
472
478
  grouped = dfx.groupby('label')
473
479
  diffs = grouped.apply(weighted_mean, 'tax_diff', include_groups=False)
@@ -477,18 +483,21 @@ def test_weighted_mean():
477
483
 
478
484
 
479
485
  def test_wage_weighted():
486
+ """Test docstring"""
480
487
  dfx = pd.DataFrame(data=WEIGHT_DATA, columns=['var', 's006', 'e00200'])
481
488
  wvar = wage_weighted(dfx, 'var')
482
489
  assert round(wvar, 4) == 2.5714
483
490
 
484
491
 
485
492
  def test_agi_weighted():
493
+ """Test docstring"""
486
494
  dfx = pd.DataFrame(data=WEIGHT_DATA, columns=['var', 's006', 'c00100'])
487
495
  wvar = agi_weighted(dfx, 'var')
488
496
  assert round(wvar, 4) == 2.5714
489
497
 
490
498
 
491
499
  def test_expanded_income_weighted():
500
+ """Test docstring"""
492
501
  dfx = pd.DataFrame(data=WEIGHT_DATA,
493
502
  columns=['var', 's006', 'expanded_income'])
494
503
  wvar = expanded_income_weighted(dfx, 'var')
@@ -496,6 +505,7 @@ def test_expanded_income_weighted():
496
505
 
497
506
 
498
507
  def test_weighted_sum():
508
+ """Test docstring"""
499
509
  dfx = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
500
510
  grouped = dfx.groupby('label')
501
511
  diffs = grouped.apply(weighted_sum, 'tax_diff', include_groups=False)
@@ -508,6 +518,7 @@ EPSILON = 1e-5
508
518
 
509
519
 
510
520
  def test_add_income_trow_var():
521
+ """Test docstring"""
511
522
  dta = np.arange(1, 1e6, 5000)
512
523
  vdf = pd.DataFrame(data=dta, columns=['expanded_income'])
513
524
  vdf = add_income_table_row_variable(vdf, 'expanded_income', SOI_AGI_BINS)
@@ -520,6 +531,7 @@ def test_add_income_trow_var():
520
531
 
521
532
 
522
533
  def test_add_quantile_trow_var():
534
+ """Test docstring"""
523
535
  dfx = pd.DataFrame(data=DATA, columns=['expanded_income', 's006', 'label'])
524
536
  dfb = add_quantile_table_row_variable(dfx, 'expanded_income',
525
537
  100, decile_details=False,
@@ -537,6 +549,7 @@ def test_add_quantile_trow_var():
537
549
 
538
550
 
539
551
  def test_dist_table_sum_row(cps_subsample):
552
+ """Test docstring"""
540
553
  rec = Records.cps_constructor(data=cps_subsample)
541
554
  calc = Calculator(policy=Policy(), records=rec)
542
555
  calc.calc_all()
@@ -566,6 +579,7 @@ def test_dist_table_sum_row(cps_subsample):
566
579
 
567
580
 
568
581
  def test_diff_table_sum_row(cps_subsample):
582
+ """Test docstring"""
569
583
  rec = Records.cps_constructor(data=cps_subsample)
570
584
  # create a current-law Policy object and Calculator calc1
571
585
  pol = Policy()
@@ -595,6 +609,7 @@ def test_diff_table_sum_row(cps_subsample):
595
609
 
596
610
 
597
611
  def test_mtr_graph_data(cps_subsample):
612
+ """Test docstring"""
598
613
  recs = Records.cps_constructor(data=cps_subsample)
599
614
  calc = Calculator(policy=Policy(), records=recs)
600
615
  year = calc.current_year
@@ -607,7 +622,7 @@ def test_mtr_graph_data(cps_subsample):
607
622
  income_measure='expanded_income',
608
623
  dollar_weighting=True)
609
624
  with pytest.raises(ValueError):
610
- mtr_graph_data(None, year, mars=list())
625
+ mtr_graph_data(None, year, mars=[])
611
626
  with pytest.raises(ValueError):
612
627
  mtr_graph_data(None, year, mars='ALL', mtr_variable='e00200s')
613
628
  with pytest.raises(ValueError):
@@ -627,6 +642,7 @@ def test_mtr_graph_data(cps_subsample):
627
642
 
628
643
 
629
644
  def test_atr_graph_data(cps_subsample):
645
+ """Test docstring"""
630
646
  pol = Policy()
631
647
  rec = Records.cps_constructor(data=cps_subsample)
632
648
  calc = Calculator(policy=pol, records=rec)
@@ -636,7 +652,7 @@ def test_atr_graph_data(cps_subsample):
636
652
  with pytest.raises(ValueError):
637
653
  atr_graph_data(None, year, mars=0)
638
654
  with pytest.raises(ValueError):
639
- atr_graph_data(None, year, mars=list())
655
+ atr_graph_data(None, year, mars=[])
640
656
  with pytest.raises(ValueError):
641
657
  atr_graph_data(None, year, atr_measure='badtax')
642
658
  calc.calc_all()
@@ -651,6 +667,7 @@ def test_atr_graph_data(cps_subsample):
651
667
 
652
668
 
653
669
  def test_xtr_graph_plot(cps_subsample):
670
+ """Test docstring"""
654
671
  recs = Records.cps_constructor(data=cps_subsample)
655
672
  calc = Calculator(policy=Policy(), records=recs)
656
673
  mtr = 0.20 * np.ones_like(cps_subsample['e00200'])
@@ -673,11 +690,14 @@ def test_xtr_graph_plot(cps_subsample):
673
690
 
674
691
 
675
692
  def temporary_filename(suffix=''):
693
+ """Function docstring"""
676
694
  # Return string containing the temporary filename.
677
- return 'tmp{}{}'.format(random.randint(10000000, 99999999), suffix)
695
+ rint = random.randint(10000000, 99999999)
696
+ return f'tmp{rint}{suffix}'
678
697
 
679
698
 
680
699
  def test_write_graph_file(cps_subsample):
700
+ """Test docstring"""
681
701
  recs = Records.cps_constructor(data=cps_subsample)
682
702
  calc = Calculator(policy=Policy(), records=recs)
683
703
  mtr = 0.20 * np.ones_like(cps_subsample['e00200'])
@@ -699,7 +719,7 @@ def test_write_graph_file(cps_subsample):
699
719
  os.remove(htmlfname)
700
720
  except OSError:
701
721
  pass # sometimes we can't remove a generated temporary file
702
- assert 'write_graph_file()_ok' == 'no'
722
+ assert False, 'ERROR: write_graph_file() failed'
703
723
  # if try was successful, try to remove the file
704
724
  if os.path.isfile(htmlfname):
705
725
  try:
@@ -709,6 +729,7 @@ def test_write_graph_file(cps_subsample):
709
729
 
710
730
 
711
731
  def test_ce_aftertax_income(cps_subsample):
732
+ """Test docstring"""
712
733
  # test certainty_equivalent() function with con>cmin
713
734
  con = 5000
714
735
  cmin = 1000
@@ -752,19 +773,22 @@ def test_ce_aftertax_income(cps_subsample):
752
773
 
753
774
 
754
775
  def test_read_egg_csv():
776
+ """Test docstring"""
755
777
  with pytest.raises(ValueError):
756
778
  read_egg_csv('bad_filename')
757
779
 
758
780
 
759
781
  def test_read_egg_json():
782
+ """Test docstring"""
760
783
  with pytest.raises(ValueError):
761
784
  read_egg_json('bad_filename')
762
785
 
763
786
 
764
787
  def test_create_delete_temp_file():
788
+ """Test docstring"""
765
789
  # test temporary_filename() and delete_file() functions
766
790
  fname = temporary_filename()
767
- with open(fname, 'w') as tmpfile:
791
+ with open(fname, 'w', encoding='utf-8') as tmpfile:
768
792
  tmpfile.write('any content will do')
769
793
  assert os.path.isfile(fname) is True
770
794
  delete_file(fname)
@@ -772,6 +796,7 @@ def test_create_delete_temp_file():
772
796
 
773
797
 
774
798
  def test_bootstrap_se_ci():
799
+ """Test docstring"""
775
800
  # Use treated mouse data from Table 2.1 and
776
801
  # results from Table 2.2 and Table 13.1 in
777
802
  # Bradley Efron and Robert Tibshirani,
@@ -787,6 +812,13 @@ def test_bootstrap_se_ci():
787
812
 
788
813
 
789
814
  def test_table_columns_labels():
815
+ """Test docstring"""
790
816
  # check that length of two lists are the same
791
817
  assert len(DIST_TABLE_COLUMNS) == len(DIST_TABLE_LABELS)
792
818
  assert len(DIFF_TABLE_COLUMNS) == len(DIFF_TABLE_LABELS)
819
+
820
+
821
+ def test_invalid_json_to_dict():
822
+ """Test docstring"""
823
+ with pytest.raises(ValueError):
824
+ json_to_dict('invalid_json_text')
taxcalc/utils.py CHANGED
@@ -172,7 +172,8 @@ def add_quantile_table_row_variable(dframe, income_measure, num_quantiles,
172
172
  and the top decile is broken into three subgroups
173
173
  (90-95, 95-99, and top 1%).
174
174
  """
175
- # pylint: disable=too-many-arguments,too-many-locals
175
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
176
+ # pylint: disable=too-many-locals
176
177
  assert isinstance(dframe, pd.DataFrame)
177
178
  assert income_measure in dframe
178
179
  assert 's006' in dframe
@@ -272,7 +273,7 @@ def get_sums(dframe):
272
273
  -------
273
274
  Pandas Series object containing column sums indexed by dframe column names.
274
275
  """
275
- sums = dict()
276
+ sums = {}
276
277
  for col in dframe.columns.values.tolist():
277
278
  if col != 'table_row':
278
279
  sums[col] = dframe[col].sum()
@@ -353,6 +354,7 @@ def create_distribution_table(vdf, groupby, income_measure,
353
354
  if pop_quantiles:
354
355
  assert groupby == 'weighted_deciles'
355
356
  # sort the data given specified groupby and income_measure
357
+ dframe = None
356
358
  if groupby == 'weighted_deciles':
357
359
  dframe = add_quantile_table_row_variable(vdf, income_measure, 10,
358
360
  pop_quantiles=pop_quantiles,
@@ -544,6 +546,7 @@ def create_difference_table(vdf1, vdf2, groupby, tax_to_diff,
544
546
  else:
545
547
  df2['count'] = df2['s006']
546
548
  # add table_row column to df2 given specified groupby and income_measure
549
+ dframe = None
547
550
  if groupby == 'weighted_deciles':
548
551
  dframe = add_quantile_table_row_variable(
549
552
  df2, baseline_expanded_income, 10,
@@ -680,7 +683,7 @@ def create_diagnostic_table(dframe_list, year_list):
680
683
  agi = vdf['c00100']
681
684
  odict['AGI ($b)'] = round((agi * wghts).sum() * in_billions, 3)
682
685
  # number of itemizers
683
- val = (wghts[vdf['c04470'] > 0.].sum())
686
+ val = wghts[vdf['c04470'] > 0.].sum()
684
687
  odict['Itemizers (#m)'] = round(val * in_millions, 2)
685
688
  # itemized deduction
686
689
  ided1 = vdf['c04470'] * wghts
@@ -761,7 +764,7 @@ def create_diagnostic_table(dframe_list, year_list):
761
764
  assert isinstance(year_list[0], int)
762
765
  assert isinstance(dframe_list[0], pd.DataFrame)
763
766
  # construct diagnostic table
764
- tlist = list()
767
+ tlist = []
765
768
  for year, vardf in zip(year_list, dframe_list):
766
769
  odict = diagnostic_table_odict(vardf)
767
770
  ddf = pd.DataFrame(data=odict, index=[year], columns=odict.keys())
@@ -854,8 +857,8 @@ def mtr_graph_data(vdf, year,
854
857
  -------
855
858
  dictionary object suitable for passing to xtr_graph_plot utility function
856
859
  """
857
- # pylint: disable=too-many-arguments,too-many-statements
858
- # pylint: disable=too-many-locals,too-many-branches
860
+ # pylint: disable=too-many-arguments,,too-many-positional-arguments
861
+ # pylint: disable=too-many-locals,too-many-branches,too-many-statements
859
862
  # check validity of function arguments
860
863
  # . . check income_measure value
861
864
  weighting_function = weighted_mean
@@ -930,26 +933,25 @@ def mtr_graph_data(vdf, year,
930
933
  lines['base'] = mtr1_series
931
934
  lines['reform'] = mtr2_series
932
935
  # construct dictionary containing merged data and auto-generated labels
933
- data = dict()
936
+ data = {}
934
937
  data['lines'] = lines
935
938
  if dollar_weighting:
936
- income_str = 'Dollar-weighted {}'.format(income_str)
937
- mtr_str = 'Dollar-weighted {}'.format(mtr_str)
938
- data['ylabel'] = '{} MTR'.format(mtr_str)
939
- xlabel_str = 'Baseline {} Percentile'.format(income_str)
939
+ income_str = f'Dollar-weighted {income_str}'
940
+ mtr_str = f'Dollar-weighted {mtr_str}'
941
+ data['ylabel'] = f'{mtr_str} MTR'
942
+ xlabel_str = f'Baseline {income_str} Percentile'
940
943
  if mars != 'ALL':
941
- xlabel_str = '{} for MARS={}'.format(xlabel_str, mars)
944
+ xlabel_str = f'{xlabel_str} for MARS={mars}'
942
945
  data['xlabel'] = xlabel_str
943
- var_str = '{}'.format(mtr_variable)
946
+ var_str = f'{mtr_variable}'
944
947
  if mtr_variable == 'e00200p' and alt_e00200p_text != '':
945
- var_str = '{}'.format(alt_e00200p_text)
948
+ var_str = f'{alt_e00200p_text}'
946
949
  if mtr_variable == 'e00200p' and mtr_wrt_full_compen:
947
- var_str = '{} wrt full compensation'.format(var_str)
948
- title_str = 'Mean Marginal Tax Rate for {} by Income Percentile'
949
- title_str = title_str.format(var_str)
950
+ var_str = f'{var_str} wrt full compensation'
951
+ title_str = f'Mean Marginal Tax Rate for {var_str} by Income Percentile'
950
952
  if mars != 'ALL':
951
- title_str = '{} for MARS={}'.format(title_str, mars)
952
- title_str = '{} for {}'.format(title_str, year)
953
+ title_str = f'{title_str} for MARS={mars}'
954
+ title_str = f'{title_str} for {year}'
953
955
  data['title'] = title_str
954
956
  return data
955
957
 
@@ -1067,17 +1069,17 @@ def atr_graph_data(vdf, year,
1067
1069
  # include only percentiles with average income no less than min_avginc
1068
1070
  lines = lines[included]
1069
1071
  # construct dictionary containing plot lines and auto-generated labels
1070
- data = dict()
1072
+ data = {}
1071
1073
  data['lines'] = lines
1072
- data['ylabel'] = '{} Average Tax Rate'.format(atr_str)
1074
+ data['ylabel'] = f'{atr_str} Average Tax Rate'
1073
1075
  xlabel_str = 'Baseline Expanded-Income Percentile'
1074
1076
  if mars != 'ALL':
1075
- xlabel_str = '{} for MARS={}'.format(xlabel_str, mars)
1077
+ xlabel_str = f'{xlabel_str} for MARS={mars}'
1076
1078
  data['xlabel'] = xlabel_str
1077
1079
  title_str = 'Average Tax Rate by Income Percentile'
1078
1080
  if mars != 'ALL':
1079
- title_str = '{} for MARS={}'.format(title_str, mars)
1080
- title_str = '{} for {}'.format(title_str, year)
1081
+ title_str = f'{title_str} for MARS={mars}'
1082
+ title_str = f'{title_str} for {year}'
1081
1083
  data['title'] = title_str
1082
1084
  return data
1083
1085
 
@@ -1148,7 +1150,7 @@ def xtr_graph_plot(data,
1148
1150
  raster graphics file. There is no option to make the bokeh.plotting
1149
1151
  figure generate a vector graphics file such as an EPS file.
1150
1152
  """
1151
- # pylint: disable=too-many-arguments
1153
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
1152
1154
  if title == '':
1153
1155
  title = data['title']
1154
1156
  fig = bp.figure(width=width, height=height, title=title)
@@ -1238,13 +1240,13 @@ def pch_graph_data(vdf, year, pop_quantiles=False):
1238
1240
  # include only percentiles with average income no less than min_avginc
1239
1241
  line = line[included]
1240
1242
  # construct dictionary containing plot line and auto-generated labels
1241
- data = dict()
1243
+ data = {}
1242
1244
  data['line'] = line
1243
1245
  data['ylabel'] = 'Change in After-Tax Expanded Income'
1244
1246
  data['xlabel'] = 'Baseline Expanded-Income Percentile'
1245
1247
  title_str = ('Percentage Change in After-Tax Expanded Income '
1246
1248
  'by Income Percentile')
1247
- title_str = '{} for {}'.format(title_str, year)
1249
+ title_str = f'{title_str} for {year}'
1248
1250
  data['title'] = title_str
1249
1251
  return data
1250
1252
 
@@ -1286,7 +1288,7 @@ def pch_graph_plot(data,
1286
1288
  -----
1287
1289
  See Notes to xtr_graph_plot function.
1288
1290
  """
1289
- # pylint: disable=too-many-arguments
1291
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
1290
1292
  if title == '':
1291
1293
  title = data['title']
1292
1294
  fig = bp.figure(width=width, height=height, title=title)
@@ -1309,8 +1311,13 @@ def pch_graph_plot(data,
1309
1311
  fig.yaxis.axis_label_text_font_size = '12pt'
1310
1312
  fig.yaxis.axis_label_text_font_style = 'normal'
1311
1313
  fig.yaxis[0].formatter = PrintfTickFormatter(format='%.1f')
1312
- # return fig # bokeh 3.4.1 cannot save this figure for some unknown reason
1313
- return None
1314
+ # bokeh cannot save this fig saying:
1315
+ # bokeh.core.serialization.SerializationError:
1316
+ # can't serialize <class 'range'>
1317
+ # so the "return fig" statement is replaced by Python's implicit
1318
+ # "return None" until the above logic can be made compatible with
1319
+ # modern bokeh packages
1320
+ # return fig
1314
1321
 
1315
1322
 
1316
1323
  def write_graph_file(figure, filename, title):
@@ -1471,7 +1478,7 @@ def ce_aftertax_expanded_income(df1, df2,
1471
1478
  cmin = 1000
1472
1479
  # compute aggregate combined tax revenue and aggregate after-tax income
1473
1480
  billion = 1.0e-9
1474
- cedict = dict()
1481
+ cedict = {}
1475
1482
  cedict['tax1'] = weighted_sum(df1, 'combined') * billion
1476
1483
  cedict['tax2'] = weighted_sum(df2, 'combined') * billion
1477
1484
  if require_no_agg_tax_change:
@@ -1495,8 +1502,8 @@ def ce_aftertax_expanded_income(df1, df2,
1495
1502
  ati2 = df2['expanded_income'] - df2['combined']
1496
1503
  # calculate certainty-equivaluent after-tax income in df1 and df2
1497
1504
  cedict['crra'] = crras
1498
- ce1 = list()
1499
- ce2 = list()
1505
+ ce1 = []
1506
+ ce2 = []
1500
1507
  for crra in crras:
1501
1508
  eu1 = expected_utility(ati1, prob, crra, cmin)
1502
1509
  ce1.append(certainty_equivalent(eu1, crra, cmin))
@@ -1517,8 +1524,8 @@ def read_egg_csv(fname, index_col=None):
1517
1524
  path_in_egg = implibres.files('taxcalc').joinpath(fname)
1518
1525
  with implibres.as_file(path_in_egg) as rname:
1519
1526
  vdf = pd.read_csv(rname, index_col=index_col)
1520
- except Exception:
1521
- raise ValueError(f'could not read {fname} data from egg')
1527
+ except Exception as exc:
1528
+ raise ValueError(f'could not read {fname} data from egg') from exc
1522
1529
  # cannot call read_egg_ function in unit tests
1523
1530
  return vdf # pragma: no cover
1524
1531
 
@@ -1531,10 +1538,10 @@ def read_egg_json(fname):
1531
1538
  try:
1532
1539
  path_in_egg = implibres.files('taxcalc').joinpath(fname)
1533
1540
  with implibres.as_file(path_in_egg) as rname:
1534
- vdf = json.loads(rname)
1535
- except Exception:
1536
- raise ValueError(f'could not read {fname} data from egg')
1537
- # cannot call read_egg_ function in unit tests
1541
+ pdict = json.loads(rname)
1542
+ except Exception as exc:
1543
+ raise ValueError(f'could not read {fname} data from package') from exc
1544
+ # cannot call read_egg_ function in pytest unit tests
1538
1545
  return pdict # pragma: no cover
1539
1546
 
1540
1547
 
@@ -1557,7 +1564,7 @@ def bootstrap_se_ci(data, seed, num_samples, statistic, alpha):
1557
1564
  assert isinstance(num_samples, int)
1558
1565
  assert callable(statistic) # function that computes statistic from data
1559
1566
  assert isinstance(alpha, float)
1560
- bsest = dict()
1567
+ bsest = {}
1561
1568
  bsest['seed'] = seed
1562
1569
  np.random.seed(seed)
1563
1570
  dlen = len(data)
@@ -1607,7 +1614,7 @@ def json_to_dict(json_text):
1607
1614
  linenum = 0
1608
1615
  for line in text_lines:
1609
1616
  linenum += 1
1610
- msg += '{:04d}{}'.format(linenum, line) + '\n'
1617
+ msg += f'{linenum:04d}{line}\n'
1611
1618
  msg += bline + '\n'
1612
- raise ValueError(msg)
1619
+ raise ValueError(msg) from valerr
1613
1620
  return ordered_dict
@@ -36,14 +36,10 @@
36
36
  // income of the receiving spouse, if made under a
37
37
  // divorce or separation agreement executed after
38
38
  // Dec. 31, 2018.
39
-
40
- // (4) PT_qbid_limit_switch = false implies TAXSIM35-like QBI deduction logic.
41
39
  {
42
40
  "AMT_child_em_c_age": {"2013": 24},
43
41
 
44
42
  "EITC_excess_InvestIncome_rt": {"2013": 1.0},
45
43
 
46
- "ALD_AlimonyReceived_hc": {"2019": 1.0},
47
-
48
- "PT_qbid_limit_switch": {"2018": false}
44
+ "ALD_AlimonyReceived_hc": {"2019": 1.0}
49
45
  }