taxcalc 4.4.0__py3-none-any.whl → 4.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. taxcalc/__init__.py +1 -1
  2. taxcalc/calcfunctions.py +326 -171
  3. taxcalc/calculator.py +35 -34
  4. taxcalc/cli/tc.py +6 -7
  5. taxcalc/consumption.json +1 -1
  6. taxcalc/consumption.py +9 -4
  7. taxcalc/cps_weights.csv.gz +0 -0
  8. taxcalc/data.py +8 -8
  9. taxcalc/decorators.py +3 -3
  10. taxcalc/growdiff.json +1 -1
  11. taxcalc/growdiff.py +5 -0
  12. taxcalc/growfactors.csv +26 -25
  13. taxcalc/growfactors.py +1 -1
  14. taxcalc/parameters.py +85 -42
  15. taxcalc/policy.py +2 -2
  16. taxcalc/policy_current_law.json +87 -87
  17. taxcalc/puf_ratios.csv +15 -14
  18. taxcalc/puf_weights.csv.gz +0 -0
  19. taxcalc/records.py +1 -0
  20. taxcalc/records_variables.json +6 -0
  21. taxcalc/reforms/ext.json +21 -21
  22. taxcalc/taxcalcio.py +49 -44
  23. taxcalc/tests/cmpi_cps_expect.txt +6 -6
  24. taxcalc/tests/cmpi_puf_expect.txt +6 -6
  25. taxcalc/tests/conftest.py +43 -42
  26. taxcalc/tests/cpscsv_agg_expect.csv +22 -22
  27. taxcalc/tests/puf_var_wght_means_by_year.csv +70 -70
  28. taxcalc/tests/pufcsv_agg_expect.csv +22 -22
  29. taxcalc/tests/test_4package.py +9 -7
  30. taxcalc/tests/test_benefits.py +9 -8
  31. taxcalc/tests/test_calcfunctions.py +55 -38
  32. taxcalc/tests/test_calculator.py +11 -6
  33. taxcalc/tests/test_compare.py +45 -51
  34. taxcalc/tests/test_compatible_data.py +9 -7
  35. taxcalc/tests/test_consumption.py +38 -18
  36. taxcalc/tests/test_cpscsv.py +33 -31
  37. taxcalc/tests/test_data.py +31 -24
  38. taxcalc/tests/test_decorators.py +84 -32
  39. taxcalc/tests/test_growdiff.py +16 -13
  40. taxcalc/tests/test_growfactors.py +8 -8
  41. taxcalc/tests/test_parameters.py +55 -59
  42. taxcalc/tests/test_policy.py +14 -12
  43. taxcalc/tests/test_puf_var_stats.py +14 -14
  44. taxcalc/tests/test_pufcsv.py +40 -40
  45. taxcalc/tests/test_records.py +73 -60
  46. taxcalc/tests/test_reforms.py +35 -32
  47. taxcalc/tests/test_responses.py +4 -4
  48. taxcalc/tests/test_taxcalcio.py +76 -62
  49. taxcalc/tests/test_utils.py +78 -46
  50. taxcalc/utils.py +49 -42
  51. taxcalc/validation/taxsim35/taxsim_emulation.json +1 -5
  52. {taxcalc-4.4.0.dist-info → taxcalc-4.5.0.dist-info}/METADATA +19 -5
  53. {taxcalc-4.4.0.dist-info → taxcalc-4.5.0.dist-info}/RECORD +57 -57
  54. {taxcalc-4.4.0.dist-info → taxcalc-4.5.0.dist-info}/WHEEL +1 -1
  55. {taxcalc-4.4.0.dist-info → taxcalc-4.5.0.dist-info}/LICENSE +0 -0
  56. {taxcalc-4.4.0.dist-info → taxcalc-4.5.0.dist-info}/entry_points.txt +0 -0
  57. {taxcalc-4.4.0.dist-info → taxcalc-4.5.0.dist-info}/top_level.txt +0 -0
@@ -8,8 +8,6 @@ and from the Census CPS file for the corresponding year. If you have
8
8
  acquired from IRS the most recent SOI PUF file and want to execute
9
9
  this program, contact the Tax-Calculator development team to discuss
10
10
  your options.
11
-
12
- Read Tax-Calculator/TESTING.md for details.
13
11
  """
14
12
  # CODING-STYLE CHECKS:
15
13
  # pycodestyle test_pufcsv.py
@@ -20,8 +18,9 @@ import json
20
18
  import pytest
21
19
  import numpy as np
22
20
  import pandas as pd
23
- # pylint: disable=import-error
24
- from taxcalc import Policy, Records, Calculator
21
+ from taxcalc.policy import Policy
22
+ from taxcalc.records import Records
23
+ from taxcalc.calculator import Calculator
25
24
 
26
25
 
27
26
  START_YEAR = 2017
@@ -57,7 +56,7 @@ def test_agg(tests_path, puf_fullsample):
57
56
  if not np.allclose(adt[icol].values, edt[str(icol)].values):
58
57
  diffs = True
59
58
  if diffs:
60
- new_filename = '{}{}'.format(aggres_path[:-10], 'actual.csv')
59
+ new_filename = f'{aggres_path[:-10]}actual.csv'
61
60
  adt.to_csv(new_filename, float_format='%.1f')
62
61
  msg = 'PUFCSV AGG RESULTS DIFFER FOR FULL-SAMPLE\n'
63
62
  msg += '-------------------------------------------------\n'
@@ -118,27 +117,32 @@ def mtr_bin_counts(mtr_data, bin_edges, recid):
118
117
  res = ''
119
118
  (bincount, _) = np.histogram(mtr_data.round(decimals=4), bins=bin_edges)
120
119
  sum_bincount = np.sum(bincount)
121
- res += '{} :'.format(sum_bincount)
120
+ res += f'{sum_bincount} :'
122
121
  for idx in range(len(bin_edges) - 1):
123
- res += ' {:6d}'.format(bincount[idx])
122
+ res += f' {bincount[idx]:6d}'
124
123
  res += '\n'
125
124
  if sum_bincount < mtr_data.size:
126
125
  res += 'WARNING: sum of bin counts is too low\n'
127
- recinfo = ' mtr={:.2f} for recid={}\n'
128
126
  mtr_min = mtr_data.min()
129
127
  mtr_max = mtr_data.max()
130
128
  bin_min = min(bin_edges)
131
129
  bin_max = max(bin_edges)
132
130
  if mtr_min < bin_min:
133
- res += ' min(mtr)={:.2f}\n'.format(mtr_min)
131
+ res += f' min(mtr)={mtr_min:.2f}\n'
134
132
  for idx in range(mtr_data.size):
135
133
  if mtr_data[idx] < bin_min:
136
- res += recinfo.format(mtr_data[idx], recid[idx])
134
+ res += (
135
+ f' mtr={mtr_data[idx]:.2f} '
136
+ f'for recid={recid[idx]}\n'
137
+ )
137
138
  if mtr_max > bin_max:
138
- res += ' max(mtr)={:.2f}\n'.format(mtr_max)
139
+ res += f' max(mtr)={mtr_max:.2f}\n'
139
140
  for idx in range(mtr_data.size):
140
141
  if mtr_data[idx] > bin_max:
141
- res += recinfo.format(mtr_data[idx], recid[idx])
142
+ res += (
143
+ f' mtr={mtr_data[idx]:.2f} '
144
+ f'for recid={recid[idx]}\n'
145
+ )
142
146
  return res
143
147
 
144
148
 
@@ -169,24 +173,20 @@ def nonsmall_diffs(linelist1, linelist2, small=0.0):
169
173
  for line1, line2 in zip(linelist1, linelist2):
170
174
  if line1 == line2:
171
175
  continue
172
- else:
173
- tokens1 = line1.replace(',', '').split()
174
- tokens2 = line2.replace(',', '').split()
175
- for tok1, tok2 in zip(tokens1, tokens2):
176
- tok1_isfloat = isfloat(tok1)
177
- tok2_isfloat = isfloat(tok2)
178
- if tok1_isfloat and tok2_isfloat:
179
- if abs(float(tok1) - float(tok2)) <= smallamt:
180
- continue
181
- else:
182
- return True
183
- elif not tok1_isfloat and not tok2_isfloat:
184
- if tok1 == tok2:
185
- continue
186
- else:
187
- return True
188
- else:
189
- return True
176
+ tokens1 = line1.replace(',', '').split()
177
+ tokens2 = line2.replace(',', '').split()
178
+ for tok1, tok2 in zip(tokens1, tokens2):
179
+ tok1_isfloat = isfloat(tok1)
180
+ tok2_isfloat = isfloat(tok2)
181
+ if tok1_isfloat and tok2_isfloat:
182
+ if abs(float(tok1) - float(tok2)) <= smallamt:
183
+ continue
184
+ return True
185
+ if not tok1_isfloat and not tok2_isfloat:
186
+ if tok1 == tok2:
187
+ continue
188
+ return True
189
+ return True
190
190
  return False
191
191
 
192
192
 
@@ -207,7 +207,7 @@ def test_mtr(tests_path, puf_path):
207
207
  res += 'MTR computed using NEGATIVE finite_diff '
208
208
  else:
209
209
  res += 'MTR computed using POSITIVE finite_diff '
210
- res += 'for tax year {}\n'.format(MTR_TAX_YEAR)
210
+ res += f'for tax year {MTR_TAX_YEAR}\n'
211
211
  # create a Policy object (clp) containing current-law policy parameters
212
212
  clp = Policy()
213
213
  clp.set_year(MTR_TAX_YEAR)
@@ -216,15 +216,15 @@ def test_mtr(tests_path, puf_path):
216
216
  recid = puf.RECID # pylint: disable=no-member
217
217
  # create a Calculator object using clp policy and puf records
218
218
  calc = Calculator(policy=clp, records=puf)
219
- res += '{} = {}\n'.format('Total number of data records', puf.array_length)
219
+ res += f'Total number of data records = {puf.array_length}\n'
220
220
  res += 'PTAX mtr histogram bin edges:\n'
221
- res += ' {}\n'.format(PTAX_MTR_BIN_EDGES)
221
+ res += f' {PTAX_MTR_BIN_EDGES}\n'
222
222
  res += 'ITAX mtr histogram bin edges:\n'
223
- res += ' {}\n'.format(ITAX_MTR_BIN_EDGES)
223
+ res += f' {ITAX_MTR_BIN_EDGES}\n'
224
224
  variable_header = 'PTAX and ITAX mtr histogram bin counts for'
225
225
  # compute marginal tax rate (mtr) histograms for each mtr variable
226
226
  for var_str in Calculator.MTR_VALID_VARIABLES:
227
- zero_out = (var_str == 'e01400')
227
+ zero_out = var_str == 'e01400'
228
228
  (mtr_ptax, mtr_itax, _) = calc.mtr(variable_str=var_str,
229
229
  negative_finite_diff=MTR_NEG_DIFF,
230
230
  zero_out_calculated_vars=zero_out,
@@ -250,17 +250,17 @@ def test_mtr(tests_path, puf_path):
250
250
  # only MARS==2 filing units have valid MTR values
251
251
  mtr_ptax = mtr_ptax[calc.array('MARS') == 2]
252
252
  mtr_itax = mtr_itax[calc.array('MARS') == 2]
253
- res += '{} {}:\n'.format(variable_header, var_str)
253
+ res += f'{variable_header} {var_str}:\n'
254
254
  res += mtr_bin_counts(mtr_ptax, PTAX_MTR_BIN_EDGES, recid)
255
255
  res += mtr_bin_counts(mtr_itax, ITAX_MTR_BIN_EDGES, recid)
256
256
  # check for differences between actual and expected results
257
257
  mtrres_path = os.path.join(tests_path, 'pufcsv_mtr_expect.txt')
258
- with open(mtrres_path, 'r') as expected_file:
258
+ with open(mtrres_path, 'r', encoding='utf-8') as expected_file:
259
259
  txt = expected_file.read()
260
260
  expected_results = txt.rstrip('\n\t ') + '\n' # cleanup end of file txt
261
261
  if nonsmall_diffs(res.splitlines(True), expected_results.splitlines(True)):
262
- new_filename = '{}{}'.format(mtrres_path[:-10], 'actual.txt')
263
- with open(new_filename, 'w') as new_file:
262
+ new_filename = f'{mtrres_path[:-10]}actual.txt'
263
+ with open(new_filename, 'w', encoding='utf-8') as new_file:
264
264
  new_file.write(res)
265
265
  msg = 'PUFCSV MTR RESULTS DIFFER\n'
266
266
  msg += '-------------------------------------------------\n'
@@ -372,7 +372,7 @@ def test_puf_availability(tests_path, puf_path):
372
372
  pufvars = set(list(pufdf))
373
373
  # make set of variable names that are marked as puf.csv available
374
374
  rvpath = os.path.join(tests_path, '..', 'records_variables.json')
375
- with open(rvpath, 'r') as rvfile:
375
+ with open(rvpath, 'r', encoding='utf-8') as rvfile:
376
376
  rvdict = json.load(rvfile)
377
377
  recvars = set()
378
378
  for vname, vdict in rvdict['read'].items():
@@ -1,39 +1,49 @@
1
+ """
2
+ Test Records class and its methods.
3
+ """
1
4
  # CODING-STYLE CHECKS:
2
5
  # pycodestyle test_records.py
6
+ # pylint --disable=locally-disabled test_records.py
3
7
 
4
8
  import os
5
9
  import json
10
+ from io import StringIO
6
11
  import numpy as np
7
- from numpy.testing import assert_array_equal
8
12
  import pandas as pd
9
13
  import pytest
10
- from io import StringIO
11
- from taxcalc import GrowFactors, Policy, Records, Calculator
14
+ from taxcalc import GrowFactors, Policy, Records
12
15
 
13
16
 
14
- def test_incorrect_Records_instantiation(cps_subsample):
17
+ def test_incorrect_records_instantiation(cps_subsample, cps_fullsample):
18
+ """Test docstring"""
15
19
  with pytest.raises(ValueError):
16
- recs = Records(data=list())
20
+ _ = Records(data=[])
17
21
  with pytest.raises(ValueError):
18
- recs = Records(data=cps_subsample, gfactors=list())
22
+ _ = Records(data=cps_subsample, gfactors=[])
19
23
  with pytest.raises(ValueError):
20
- recs = Records(data=cps_subsample, gfactors=None, weights=list())
24
+ _ = Records(data=cps_subsample, gfactors=None, weights=[])
21
25
  with pytest.raises(ValueError):
22
- recs = Records(data=cps_subsample, gfactors=None, weights=None,
23
- start_year=list())
26
+ _ = Records(data=cps_subsample, gfactors=None, weights=None,
27
+ start_year=[])
24
28
  with pytest.raises(ValueError):
25
- recs = Records(data=cps_subsample, gfactors=None, weights=None,
26
- adjust_ratios=list())
29
+ _ = Records(data=cps_subsample, gfactors=None, weights=None,
30
+ adjust_ratios=[])
31
+ # test error raise when num of records is greater than num of weights
32
+ wghts_path = os.path.join(Records.CODE_PATH, Records.PUF_WEIGHTS_FILENAME)
33
+ puf_wghts = pd.read_csv(wghts_path)
34
+ with pytest.raises(ValueError):
35
+ _ = Records(data=cps_fullsample, weights=puf_wghts, start_year=2020)
27
36
 
28
37
 
29
- def test_correct_Records_instantiation(cps_subsample):
38
+ def test_correct_records_instantiation(cps_subsample):
39
+ """Test docstring"""
30
40
  rec1 = Records.cps_constructor(data=cps_subsample, gfactors=None)
31
41
  assert rec1
32
- assert np.all(rec1.MARS != 0)
33
- assert rec1.current_year == rec1.data_year
34
- sum_e00200_in_cps_year = rec1.e00200.sum()
42
+ assert np.all(getattr(rec1, 'MARS') != 0)
43
+ assert getattr(rec1, 'current_year') == getattr(rec1, 'data_year')
44
+ sum_e00200_in_cps_year = getattr(rec1, 'e00200').sum()
35
45
  rec1.increment_year()
36
- sum_e00200_in_cps_year_plus_one = rec1.e00200.sum()
46
+ sum_e00200_in_cps_year_plus_one = getattr(rec1, 'e00200').sum()
37
47
  assert sum_e00200_in_cps_year_plus_one == sum_e00200_in_cps_year
38
48
  wghts_path = os.path.join(Records.CODE_PATH, Records.CPS_WEIGHTS_FILENAME)
39
49
  wghts_df = pd.read_csv(wghts_path)
@@ -46,80 +56,83 @@ def test_correct_Records_instantiation(cps_subsample):
46
56
  adjust_ratios=ratios_df,
47
57
  exact_calculations=False)
48
58
  assert rec2
49
- assert np.all(rec2.MARS != 0)
50
- assert rec2.current_year == rec2.data_year
59
+ assert np.all(getattr(rec2, 'MARS') != 0)
60
+ assert getattr(rec2, 'current_year') == getattr(rec2, 'data_year')
51
61
 
52
62
 
53
63
  def test_read_cps_data(cps_fullsample):
64
+ """Test docstring"""
54
65
  data = Records.read_cps_data()
55
66
  assert data.equals(cps_fullsample)
56
67
 
57
68
 
58
69
  @pytest.mark.parametrize("csv", [
59
70
  (
60
- u'RECID,MARS,e00200,e00200p,e00200s\n'
61
- u'1, 2, 200000, 200000, 0.03\n'
71
+ 'RECID,MARS,e00200,e00200p,e00200s\n'
72
+ '1, 2, 200000, 200000, 0.03\n'
62
73
  ),
63
74
  (
64
- u'RECID,MARS,e00900,e00900p,e00900s\n'
65
- u'1, 2, 200000, 200000, 0.03\n'
75
+ 'RECID,MARS,e00900,e00900p,e00900s\n'
76
+ '1, 2, 200000, 200000, 0.03\n'
66
77
  ),
67
78
  (
68
- u'RECID,MARS,e02100,e02100p,e02100s\n'
69
- u'1, 2, 200000, 200000, 0.03\n'
79
+ 'RECID,MARS,e02100,e02100p,e02100s\n'
80
+ '1, 2, 200000, 200000, 0.03\n'
70
81
  ),
71
82
  (
72
- u'RECID,MARS,e00200,e00200p,e00200s\n'
73
- u'1, 4, 200000, 100000, 100000\n'
83
+ 'RECID,MARS,e00200,e00200p,e00200s\n'
84
+ '1, 4, 200000, 100000, 100000\n'
74
85
  ),
75
86
  (
76
- u'RECID,MARS,e00900,e00900p,e00900s\n'
77
- u'1, 4, 200000, 100000, 100000\n'
87
+ 'RECID,MARS,e00900,e00900p,e00900s\n'
88
+ '1, 4, 200000, 100000, 100000\n'
78
89
  ),
79
90
  (
80
- u'RECID,MARS,e02100,e02100p,e02100s\n'
81
- u'1, 4, 200000, 100000, 100000\n'
91
+ 'RECID,MARS,e02100,e02100p,e02100s\n'
92
+ '1, 4, 200000, 100000, 100000\n'
82
93
  ),
83
94
  (
84
- u'RECID,MARS,k1bx14s\n'
85
- u'1, 4, 0.03\n'
95
+ 'RECID,MARS,k1bx14s\n'
96
+ '1, 4, 0.03\n'
86
97
  ),
87
98
  (
88
- u'RxCID,MARS\n'
89
- u'1, 2\n'
99
+ 'RxCID,MARS\n'
100
+ '1, 2\n'
90
101
  ),
91
102
  (
92
- u'RECID,e00300\n'
93
- u'1, 456789\n'
103
+ 'RECID,e00300\n'
104
+ '1, 456789\n'
94
105
  ),
95
106
  (
96
- u'RECID,MARS\n'
97
- u'1, 6\n'
107
+ 'RECID,MARS\n'
108
+ '1, 6\n'
98
109
  ),
99
110
  (
100
- u'RECID,MARS,EIC\n'
101
- u'1, 5, 4\n'
111
+ 'RECID,MARS,EIC\n'
112
+ '1, 5, 4\n'
102
113
  ),
103
114
  (
104
- u'RECID,MARS,e00600,e00650\n'
105
- u'1, 1, 8, 9\n'
115
+ 'RECID,MARS,e00600,e00650\n'
116
+ '1, 1, 8, 9\n'
106
117
  ),
107
118
  (
108
- u'RECID,MARS,e01500,e01700\n'
109
- u'1, 1, 6, 7\n'
119
+ 'RECID,MARS,e01500,e01700\n'
120
+ '1, 1, 6, 7\n'
110
121
  ),
111
122
  (
112
- u'RECID,MARS,PT_SSTB_income\n'
113
- u'1, 1, 2\n'
123
+ 'RECID,MARS,PT_SSTB_income\n'
124
+ '1, 1, 2\n'
114
125
  )
115
126
  ])
116
127
  def test_read_data(csv):
128
+ """Test docstring"""
117
129
  df = pd.read_csv(StringIO(csv))
118
130
  with pytest.raises(ValueError):
119
131
  Records(data=df)
120
132
 
121
133
 
122
134
  def test_for_duplicate_names():
135
+ """Test docstring"""
123
136
  records_varinfo = Records(data=None)
124
137
  varnames = set()
125
138
  for varname in records_varinfo.USABLE_READ_VARS:
@@ -142,15 +155,16 @@ def test_records_variables_content(tests_path):
142
155
  """
143
156
  Check completeness and consistency of records_variables.json content.
144
157
  """
158
+ # pylint: disable=too-many-locals
159
+
145
160
  # specify test information
146
161
  reqkeys = ['type', 'desc', 'form']
147
162
  first_year = Policy.JSON_START_YEAR
148
163
  last_form_year = 2017
149
164
  # read JSON variable file into a dictionary
150
165
  path = os.path.join(tests_path, '..', 'records_variables.json')
151
- vfile = open(path, 'r')
152
- allvars = json.load(vfile)
153
- vfile.close()
166
+ with open(path, 'r', encoding='utf-8') as vfile:
167
+ allvars = json.load(vfile)
154
168
  assert isinstance(allvars, dict)
155
169
  # check elements in each variable dictionary
156
170
  for iotype in ['read', 'calc']:
@@ -181,20 +195,19 @@ def test_records_variables_content(tests_path):
181
195
  indefinite_yrange = False
182
196
  eyr = int(yrlist[1])
183
197
  if fyr != (prior_eyr + 1):
184
- msg1 = '{} fyr {}'.format(vname, fyr)
185
- msg2 = '!= prior_eyr_1 {}'.format(prior_eyr + 1)
198
+ msg1 = f'{vname} fyr {fyr}'
199
+ msg2 = f'!= prior_eyr_1 {prior_eyr + 1}'
186
200
  assert msg1 == msg2
187
201
  if eyr > last_form_year:
188
- msg1 = '{} eyr {}'.format(vname, eyr)
189
- msg2 = '> last_form_year {}'.format(last_form_year)
202
+ msg1 = f'{vname} eyr {eyr}'
203
+ msg2 = f'> last_form_year {last_form_year}'
190
204
  assert msg1 == msg2
191
205
  prior_eyr = eyr
192
206
  if not indefinite_yrange and len(yranges) > 0:
193
- prior_ey_ok = (prior_eyr == last_form_year or
194
- prior_eyr == last_form_year - 1)
207
+ prior_ey_ok = prior_eyr in (last_form_year, last_form_year - 1)
195
208
  if not prior_ey_ok:
196
- msg1 = '{} prior_eyr {}'.format(vname, prior_eyr)
197
- msg2 = '!= last_form_year {}'.format(last_form_year)
209
+ msg1 = f'{vname} prior_eyr {prior_eyr}'
210
+ msg2 = f'!= last_form_year {last_form_year}'
198
211
  assert msg1 == msg2
199
212
 
200
213
 
@@ -206,7 +219,7 @@ def test_csv_input_vars_md_contents(tests_path):
206
219
  civ_path = os.path.join(tests_path, '..', 'validation',
207
220
  'CSV_INPUT_VARS.md')
208
221
  civ_set = set()
209
- with open(civ_path, 'r') as civfile:
222
+ with open(civ_path, 'r', encoding='utf-8') as civfile:
210
223
  msg = 'DUPLICATE VARIABLE(S) IN CSV_INPUT_VARS.MD FILE:\n'
211
224
  found_duplicates = False
212
225
  for line in civfile:
@@ -219,7 +232,7 @@ def test_csv_input_vars_md_contents(tests_path):
219
232
  continue # skip two lines that are the table head
220
233
  if var in civ_set:
221
234
  found_duplicates = True
222
- msg += 'VARIABLE= {}\n'.format(var)
235
+ msg += f'VARIABLE= {var}\n'
223
236
  else:
224
237
  civ_set.add(var)
225
238
  if found_duplicates:
@@ -230,5 +243,5 @@ def test_csv_input_vars_md_contents(tests_path):
230
243
  valid_less_civ = records_varinfo.USABLE_READ_VARS - civ_set
231
244
  msg = 'VARIABLE(S) IN USABLE_READ_VARS BUT NOT CSV_INPUT_VARS.MD:\n'
232
245
  for var in valid_less_civ:
233
- msg += 'VARIABLE= {}\n'.format(var)
246
+ msg += f'VARIABLE= {var}\n' # pylint: disable=consider-using-join
234
247
  raise ValueError(msg)
@@ -11,8 +11,9 @@ import json
11
11
  import pytest
12
12
  import numpy as np
13
13
  import pandas as pd
14
- # pylint: disable=import-error
15
- from taxcalc import Calculator, Policy, Records
14
+ from taxcalc.policy import Policy
15
+ from taxcalc.records import Records
16
+ from taxcalc.calculator import Calculator
16
17
 
17
18
 
18
19
  def test_2017_law_reform(tests_path):
@@ -24,7 +25,7 @@ def test_2017_law_reform(tests_path):
24
25
  # create pre metadata dictionary for 2017_law.json reform in fyear
25
26
  pol = Policy()
26
27
  reform_file = os.path.join(tests_path, '..', 'reforms', '2017_law.json')
27
- with open(reform_file, 'r') as rfile:
28
+ with open(reform_file, 'r', encoding='utf-8') as rfile:
28
29
  rtext = rfile.read()
29
30
  pol.implement_reform(Policy.read_json_reform(rtext))
30
31
  assert not pol.parameter_warnings
@@ -57,7 +58,7 @@ def test_2017_law_reform(tests_path):
57
58
  }
58
59
  assert isinstance(pre_expect, dict)
59
60
  assert set(pre_expect.keys()).issubset(set(pre_mdata.keys()))
60
- for name in pre_expect:
61
+ for name in pre_expect: # pylint: disable=consider-using-dict-items
61
62
  aval = pre_mdata[name]
62
63
  if aval.ndim == 2:
63
64
  act = aval[0][0] # comparing only first item in a vector parameter
@@ -65,11 +66,11 @@ def test_2017_law_reform(tests_path):
65
66
  act = aval[0]
66
67
  exp = pre_expect[name]['value']
67
68
  if pre_expect[name]['relation'] == '<':
68
- assert act < exp, '{} a={} !< e={}'.format(name, act, exp)
69
+ assert act < exp, f'{name} a={act} !< e={exp}'
69
70
  elif pre_expect[name]['relation'] == '>':
70
- assert act > exp, '{} a={} !> e={}'.format(name, act, exp)
71
+ assert act > exp, f'{name} a={act} !> e={exp}'
71
72
  elif pre_expect[name]['relation'] == '=':
72
- assert act == exp, '{} a={} != e={}'.format(name, act, exp)
73
+ assert act == exp, f'{name} a={act} != e={exp}'
73
74
 
74
75
 
75
76
  @pytest.mark.rtr
@@ -88,7 +89,8 @@ def test_round_trip_reforms(fyear, tests_path):
88
89
  and subsequent reform files that represent recent legislation are
89
90
  specified in a consistent manner.
90
91
  """
91
- # pylint: disable=too-many-locals
92
+ # pylint: disable=too-many-locals,too-many-statements
93
+
92
94
  # create clp metadata dictionary for current-law policy in fyear
93
95
  clp_pol = Policy()
94
96
  clp_pol.set_year(fyear)
@@ -143,21 +145,23 @@ def test_round_trip_reforms(fyear, tests_path):
143
145
  assert clp_mdata.keys() == rtr_mdata.keys()
144
146
  fail_dump = False
145
147
  if fail_dump:
146
- rtr_fails = open('fails_rtr', 'w', encoding='utf-8')
147
- clp_fails = open('fails_clp', 'w', encoding='utf-8')
148
+ rtr_fails = open( # pylint: disable=consider-using-with
149
+ 'fails_rtr', 'w', encoding='utf-8'
150
+ )
151
+ clp_fails = open( # pylint: disable=consider-using-with
152
+ 'fails_clp', 'w', encoding='utf-8'
153
+ )
148
154
  fail_params = []
149
155
  msg = '\nRound-trip-reform and current-law-policy param values differ for:'
150
- for pname in clp_mdata.keys():
156
+ for pname in clp_mdata.keys(): # pylint: disable=consider-using-dict-items
151
157
  rtr_val = rtr_mdata[pname]
152
158
  clp_val = clp_mdata[pname]
153
159
  if not np.allclose(rtr_val, clp_val):
154
160
  fail_params.append(pname)
155
- msg += '\n {} in {} : rtr={} clp={}'.format(
156
- pname, fyear, rtr_val, clp_val
157
- )
161
+ msg += '\n {pname} in {fyear} : rtr={rtr_val} clp={clp_val}'
158
162
  if fail_dump:
159
- rtr_fails.write('{} {} {}\n'.format(pname, fyear, rtr_val))
160
- clp_fails.write('{} {} {}\n'.format(pname, fyear, clp_val))
163
+ rtr_fails.write(f'{pname} {fyear} {rtr_val}\n')
164
+ clp_fails.write(f'{pname} {fyear} {clp_val}\n')
161
165
  if fail_dump:
162
166
  rtr_fails.close()
163
167
  clp_fails.close()
@@ -188,7 +192,7 @@ def test_reform_json_and_output(tests_path):
188
192
  # varnames AGI STD TaxInc ITAX PTAX
189
193
  stats = calc.dataframe(varlist)
190
194
  stats['RECID'] = stats['RECID'].astype(int)
191
- with open(resfilename, 'w') as resfile:
195
+ with open(resfilename, 'w', encoding='utf-8') as resfile:
192
196
  stats.to_csv(resfile, index=False, float_format='%.2f')
193
197
 
194
198
  # embedded function used only in test_reform_json_and_output
@@ -217,7 +221,7 @@ def test_reform_json_and_output(tests_path):
217
221
  weights=None,
218
222
  adjust_ratios=None)
219
223
  # specify list of reform failures
220
- failures = list()
224
+ failures = []
221
225
  # specify current-law-policy Calculator object
222
226
  calc = Calculator(policy=Policy(), records=cases, verbose=False)
223
227
  calc.advance_to_year(tax_year)
@@ -239,7 +243,7 @@ def test_reform_json_and_output(tests_path):
239
243
  if jrf.endswith('ext.json'):
240
244
  continue # skip ext.json, which is tested below in test_ext_reform
241
245
  # determine reform's baseline by reading contents of jrf
242
- with open(jrf, 'r') as rfile:
246
+ with open(jrf, 'r', encoding='utf-8') as rfile:
243
247
  jrf_text = rfile.read()
244
248
  pre_tcja_baseline = 'Reform_Baseline: 2017_law.json' in jrf_text
245
249
  # implement the reform relative to its baseline
@@ -263,7 +267,7 @@ def test_reform_json_and_output(tests_path):
263
267
  if failures:
264
268
  msg = 'Following reforms have res-vs-out differences:\n'
265
269
  for ref in failures:
266
- msg += '{}\n'.format(os.path.basename(ref))
270
+ msg += f'{os.path.basename(ref)}\n'
267
271
  raise ValueError(msg)
268
272
 
269
273
 
@@ -285,7 +289,7 @@ def reform_results(rid, reform_dict, puf_data, reform_2017_law):
285
289
  calc1 = Calculator(policy=pol, records=rec, verbose=False)
286
290
  # create reform Calculator object, calc2
287
291
  start_year = reform_dict['start_year']
288
- reform = dict()
292
+ reform = {}
289
293
  for name, value in reform_dict['value'].items():
290
294
  reform[name] = {start_year: value}
291
295
  pol.implement_reform(reform)
@@ -296,7 +300,7 @@ def reform_results(rid, reform_dict, puf_data, reform_2017_law):
296
300
  # calculate baseline and reform output for several years
297
301
  output_type = reform_dict['output_type']
298
302
  num_years = 4
299
- results = list()
303
+ results = []
300
304
  for _ in range(0, num_years):
301
305
  calc1.calc_all()
302
306
  baseline = calc1.array(output_type)
@@ -308,9 +312,9 @@ def reform_results(rid, reform_dict, puf_data, reform_2017_law):
308
312
  calc1.increment_year()
309
313
  calc2.increment_year()
310
314
  # write actual results to actual_str
311
- actual_str = '{}'.format(rid)
315
+ actual_str = f'{rid}'
312
316
  for iyr in range(0, num_years):
313
- actual_str += ',{:.1f}'.format(results[iyr])
317
+ actual_str += f',{results[iyr]:.1f}'
314
318
  return actual_str
315
319
 
316
320
 
@@ -329,7 +333,7 @@ def fixture_reforms_dict(tests_path):
329
333
  Read reforms.json and convert to dictionary.
330
334
  """
331
335
  reforms_path = os.path.join(tests_path, 'reforms.json')
332
- with open(reforms_path, 'r') as rfile:
336
+ with open(reforms_path, 'r', encoding='utf-8') as rfile:
333
337
  rjson = rfile.read()
334
338
  return json.loads(rjson)
335
339
 
@@ -338,21 +342,20 @@ NUM_REFORMS = 64 # when changing this also change num_reforms in conftest.py
338
342
 
339
343
 
340
344
  @pytest.mark.requires_pufcsv
341
- @pytest.mark.parametrize('rid', [i for i in range(1, NUM_REFORMS + 1)])
345
+ @pytest.mark.parametrize('rid', list(range(1, NUM_REFORMS + 1)))
342
346
  def test_reforms(rid, test_reforms_init, tests_path, baseline_2017_law,
343
347
  reforms_dict, puf_subsample):
344
348
  """
345
349
  Write actual reform results to files.
346
350
  """
347
- # pylint: disable=too-many-arguments
351
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
348
352
  assert test_reforms_init == NUM_REFORMS
349
353
  actual = reform_results(rid, reforms_dict[str(rid)],
350
354
  puf_subsample, baseline_2017_law)
351
- afile_path = os.path.join(tests_path,
352
- 'reform_actual_{}.csv'.format(rid))
353
- with open(afile_path, 'w') as afile:
355
+ afile_path = os.path.join(tests_path, f'reform_actual_{rid}.csv')
356
+ with open(afile_path, 'w', encoding='utf-8') as afile:
354
357
  afile.write('rid,res1,res2,res3,res4\n')
355
- afile.write('{}\n'.format(actual))
358
+ afile.write(f'{actual}\n')
356
359
 
357
360
 
358
361
  @pytest.mark.extend_tcja
@@ -383,4 +386,4 @@ def test_ext_reform(tests_path):
383
386
  iitax_ext = calc_ext.array('iitax')
384
387
  rdiff = iitax_ext - iitax_end
385
388
  weighted_sum_rdiff = (rdiff * calc_end.array('s006')).sum() * 1.0e-9
386
- assert np.allclose([weighted_sum_rdiff], [-215.659], rtol=0.0, atol=0.01)
389
+ assert np.allclose([weighted_sum_rdiff], [-214.11], rtol=0.0, atol=0.01)
@@ -7,8 +7,8 @@ Test example JSON response assumption files in taxcalc/responses directory
7
7
 
8
8
  import os
9
9
  import glob
10
- # pylint: disable=import-error
11
- from taxcalc import Consumption, GrowDiff
10
+ from taxcalc.consumption import Consumption
11
+ from taxcalc.growdiff import GrowDiff
12
12
 
13
13
 
14
14
  def test_response_json(tests_path):
@@ -20,8 +20,8 @@ def test_response_json(tests_path):
20
20
  responses_path = os.path.join(tests_path, '..', 'responses', '*.json')
21
21
  for jpf in glob.glob(responses_path):
22
22
  # read contents of jpf (JSON parameter filename)
23
- jfile = open(jpf, 'r')
24
- jpf_text = jfile.read()
23
+ with open(jpf, 'r', encoding='utf-8') as jfile:
24
+ jpf_text = jfile.read()
25
25
  # check that jpf_text can be used to construct objects
26
26
  response_file = ('"consumption"' in jpf_text and
27
27
  '"growdiff_baseline"' in jpf_text and