lifejacket 0.2.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,10 +16,10 @@ logging.basicConfig(
16
16
 
17
17
  def perform_desired_small_sample_correction(
18
18
  small_sample_correction,
19
- per_user_joint_adaptive_meat_contributions,
20
- per_user_classical_meat_contributions,
21
- per_user_classical_bread_inverse_contributions,
22
- num_users,
19
+ per_subject_joint_adjusted_meat_contributions,
20
+ per_subject_classical_meat_contributions,
21
+ per_subject_classical_bread_inverse_contributions,
22
+ num_subjects,
23
23
  theta_dim,
24
24
  ):
25
25
 
@@ -28,7 +28,7 @@ def perform_desired_small_sample_correction(
28
28
  # solving linear systems (discussed more below), we typically don't have
29
29
  # issues with the conditioning of just the classical bread.
30
30
  classical_bread_inverse_matrix = jnp.mean(
31
- per_user_classical_bread_inverse_contributions, axis=0
31
+ per_subject_classical_bread_inverse_contributions, axis=0
32
32
  )
33
33
  classical_bread_matrix = invert_matrix_and_check_conditioning(
34
34
  classical_bread_inverse_matrix,
@@ -36,60 +36,62 @@ def perform_desired_small_sample_correction(
36
36
 
37
37
  # These will hold either corrective matrices or scalar weights depending on
38
38
  # what small sample correction is requested.
39
- per_user_adaptive_corrections = None
40
- per_user_classical_corrections = None
39
+ per_subject_adjusted_corrections = None
40
+ per_subject_classical_corrections = None
41
41
 
42
- per_user_adaptive_correction_weights = np.ones(num_users)
43
- per_user_classical_correction_weights = np.ones(num_users)
42
+ per_subject_adjusted_correction_weights = np.ones(num_subjects)
43
+ per_subject_classical_correction_weights = np.ones(num_subjects)
44
44
  if small_sample_correction == SmallSampleCorrections.NONE:
45
45
  logger.info(
46
- "No small sample correction requested. Using the raw per-user joint adaptive bread inverse contributions."
46
+ "No small sample correction requested. Using the raw per-subject joint adjusted bread inverse contributions."
47
47
  )
48
48
  elif small_sample_correction == SmallSampleCorrections.HC1theta:
49
49
  logger.info(
50
- "Using HC1 small sample correction at the user trajectory level. Note that we are treating the number of parameters as simply the size of theta, despite the presence of betas."
51
- )
52
- per_user_adaptive_correction_weights = per_user_classical_correction_weights = (
53
- num_users / (num_users - theta_dim) * np.ones(num_users)
50
+ "Using HC1 small sample correction at the subject trajectory level. Note that we are treating the number of parameters as simply the size of theta, despite the presence of betas."
54
51
  )
52
+ per_subject_adjusted_correction_weights = (
53
+ per_subject_classical_correction_weights
54
+ ) = (num_subjects / (num_subjects - theta_dim) * np.ones(num_subjects))
55
55
  elif small_sample_correction in {
56
56
  SmallSampleCorrections.HC2theta,
57
57
  SmallSampleCorrections.HC3theta,
58
58
  }:
59
- logger.info("Using %s small sample correction at the user trajectory level.")
59
+ logger.info("Using %s small sample correction at the subject trajectory level.")
60
60
 
61
61
  power = 1 if small_sample_correction == SmallSampleCorrections.HC2theta else 2
62
62
 
63
- # It turns out to typically not make sense to compute the adaptive analog
64
- # of the classical leverages, since this involves correcting the joint adaptive meat matrix
63
+ # It turns out to typically not make sense to compute the adjusted analog
64
+ # of the classical leverages, since this involves correcting the joint adjusted meat matrix
65
65
  # involving all beta and theta parameters. HC2/HC3 corrections assume that
66
- # the number of parameters is smaller than the number of users, which will not typically be
67
- # the case if the number of users is small enough for these corrections to be important.
68
- # Therefore we also use the "classical" leverages for the adaptive correction weights, which
66
+ # the number of parameters is smaller than the number of subjects, which will not typically be
67
+ # the case if the number of subjects is small enough for these corrections to be important.
68
+ # Therefore we also use the "classical" leverages for the adjusted correction weights, which
69
69
  # is sensible, corresponding to only adjusting based on the estimating equations for theta.
70
70
 
71
71
  # ALSO note that one way to test correctness of the leverages is that they should sum
72
72
  # to the number of inference parameters, ie the size of theta. I tested that this is
73
- # true both for the classical leverages and the larger joint adaptive leverages when they
73
+ # true both for the classical leverages and the larger joint adjusted leverages when they
74
74
  # were still used, lending credence to the below calculations.
75
75
 
76
76
  # TODO: Write a unit test for some level of logic here and then rewrite this to not require
77
77
  # the classical bread explicitly. May be slower, probably needs a for loop so that can use
78
78
  # a solver for each matrix multiplication after a QR decomposition of the bread inverse
79
79
  # transpose.
80
- classical_leverages_per_user = (
80
+ classical_leverages_per_subject = (
81
81
  np.einsum(
82
82
  "nij,ji->n",
83
- per_user_classical_bread_inverse_contributions,
83
+ per_subject_classical_bread_inverse_contributions,
84
84
  classical_bread_matrix,
85
85
  )
86
- / num_users
86
+ / num_subjects
87
87
  )
88
- per_user_classical_correction_weights = 1 / (
89
- (1 - classical_leverages_per_user) ** power
88
+ per_subject_classical_correction_weights = 1 / (
89
+ (1 - classical_leverages_per_subject) ** power
90
90
  )
91
91
 
92
- per_user_adaptive_correction_weights = per_user_classical_correction_weights
92
+ per_subject_adjusted_correction_weights = (
93
+ per_subject_classical_correction_weights
94
+ )
93
95
  else:
94
96
  raise ValueError(
95
97
  f"Unknown small sample correction: {small_sample_correction}. "
@@ -98,28 +100,28 @@ def perform_desired_small_sample_correction(
98
100
 
99
101
  # If we used matrix corrections, they will be stored as these corrections.
100
102
  # Otherwise, store the scalar weights.
101
- if per_user_adaptive_corrections is None:
102
- per_user_adaptive_corrections = per_user_adaptive_correction_weights
103
- if per_user_classical_corrections is None:
104
- per_user_classical_corrections = per_user_classical_correction_weights
103
+ if per_subject_adjusted_corrections is None:
104
+ per_subject_adjusted_corrections = per_subject_adjusted_correction_weights
105
+ if per_subject_classical_corrections is None:
106
+ per_subject_classical_corrections = per_subject_classical_correction_weights
105
107
 
106
108
  # The scalar corrections will have computed weights that need to be applied here,
107
- # whereas the matrix corrections will have been applied to the per-user
109
+ # whereas the matrix corrections will have been applied to the per-subject
108
110
  # contributions already.
109
- joint_adaptive_meat_matrix = jnp.mean(
110
- per_user_adaptive_correction_weights[:, None, None]
111
- * per_user_joint_adaptive_meat_contributions,
111
+ joint_adjusted_meat_matrix = jnp.mean(
112
+ per_subject_adjusted_correction_weights[:, None, None]
113
+ * per_subject_joint_adjusted_meat_contributions,
112
114
  axis=0,
113
115
  )
114
116
  classical_meat_matrix = jnp.mean(
115
- per_user_classical_correction_weights[:, None, None]
116
- * per_user_classical_meat_contributions,
117
+ per_subject_classical_correction_weights[:, None, None]
118
+ * per_subject_classical_meat_contributions,
117
119
  axis=0,
118
120
  )
119
121
 
120
122
  return (
121
- joint_adaptive_meat_matrix,
123
+ joint_adjusted_meat_matrix,
122
124
  classical_meat_matrix,
123
- per_user_adaptive_corrections,
124
- per_user_classical_corrections,
125
+ per_subject_adjusted_corrections,
126
+ per_subject_classical_corrections,
125
127
  )
@@ -0,0 +1,56 @@
1
+ Metadata-Version: 2.4
2
+ Name: lifejacket
3
+ Version: 1.0.0
4
+ Summary: Consistent standard errors for longitudinal data collected under pooling online decision policies.
5
+ Author-email: Nowell Closser <nowellclosser@gmail.com>
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: click>=8.0
9
+ Requires-Dist: jax>=0.4.0
10
+ Requires-Dist: jaxlib>=0.4.0
11
+ Requires-Dist: numpy>=1.20.0
12
+ Requires-Dist: pandas>=1.3.0
13
+ Requires-Dist: scipy>=1.7.0
14
+ Requires-Dist: plotext>=5.0.0
15
+ Provides-Extra: dev
16
+ Requires-Dist: pytest>=7.0; extra == "dev"
17
+ Requires-Dist: black>=22.0; extra == "dev"
18
+ Requires-Dist: flake8>=4.0; extra == "dev"
19
+
20
+ ```python
21
+ _ _ __ _ _ _
22
+ | (_)/ _| (_) | | | |
23
+ | |_| |_ ___ _ __ _ ___| | _____| |_
24
+ | | | _/ _ \ |/ _` |/ __| |/ / _ \ __|
25
+ | | | || __/ | (_| | (__| < __/ |_
26
+ |_|_|_| \___| |\__,_|\___|_|\_\___|\__|
27
+ _/ |
28
+ |__/
29
+ ```
30
+
31
+ Save your standard errors from pooling in online decision-making algorithms.
32
+
33
+ ## Setup (if not using conda)
34
+ ### Create and activate a virtual environment
35
+ - `python3 -m venv .venv; source /.venv/bin/activate`
36
+
37
+ ### Adding a package
38
+ - Add to `requirements.txt` with a specific version or no version if you want the latest stable
39
+ - Run `pip freeze > requirements.txt` to lock the versions of your package and all its subpackages
40
+
41
+ ## Running the code
42
+ - `export PYTHONPATH to the absolute path of this repository on your computer
43
+ - `./run_local_synthetic.sh`, which outputs to `simulated_data/` by default. See all the possible flags to be toggled in the script code.
44
+
45
+ ## Linting/Formatting
46
+
47
+ ## Testing
48
+ python -m pytest
49
+ python -m pytest tests/unit_tests
50
+ python -m pytest tests/integration_tests
51
+
52
+
53
+
54
+ ## TODO
55
+ 1. Add precommit hooks (pip freeze, linting, formatting)
56
+
@@ -0,0 +1,17 @@
1
+ lifejacket/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ lifejacket/after_study_analysis.py,sha256=-6LCfl0dz9eHkP5aLs-J2YqpxVJ57J8Ov4wEWKbZMWA,83315
3
+ lifejacket/arg_threading_helpers.py,sha256=jiPdG-1USYUXqXAEfRPL-3KBKno7uMr4-fCl9GrZ7Fw,17599
4
+ lifejacket/calculate_derivatives.py,sha256=wFMS1pSm5MI8D7FYK2nPEAdvtVJAxKy8OVtUj7giII0,37520
5
+ lifejacket/constants.py,sha256=2L05p6NJ7l3qRZ1hD2KlrvzWF1ReSmWRUkULPIhdvJo,842
6
+ lifejacket/deployment_conditioning_monitor.py,sha256=5PMBfDQfyxwoYXr3qh1pTdMBefnSjjQMFyCj3_sItVY,43526
7
+ lifejacket/form_adjusted_meat_adjustments_directly.py,sha256=ML7FOB_hnDEM1ndC2X4j_oaqdqn-EPr9j-MetlOqSE8,13740
8
+ lifejacket/get_datum_for_blowup_supervised_learning.py,sha256=SjA3H7H8RBZc9GxzgsccqZoLZnAD68mxaPH1E87s2lA,59092
9
+ lifejacket/helper_functions.py,sha256=ldkanFn5b2lwBlni-HtXPVd7QvxEKmzL1UsJMsG6f6g,23478
10
+ lifejacket/input_checks.py,sha256=RhS2tnq74jOU5_pVSXsxthSmuGhlmoMc5eJiourQiS0,47513
11
+ lifejacket/small_sample_corrections.py,sha256=bnbtxjEKZBiFrCSF2WA0vczaVeIqao1NekxJRZdmwCU,5692
12
+ lifejacket/vmap_helpers.py,sha256=pZqYN3p9Ty9DPOeeY9TKbRJXR2AV__HBwwDFOvdOQ84,2688
13
+ lifejacket-1.0.0.dist-info/METADATA,sha256=UGZsalPpzX8K7BnstZ0RDFq-1ZpL5CsXcUd_hYvQWus,1773
14
+ lifejacket-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
+ lifejacket-1.0.0.dist-info/entry_points.txt,sha256=4k8ibVIUT-OHxPaaDv-QwWpC64ErzhdemHpTAXCnb8w,67
16
+ lifejacket-1.0.0.dist-info/top_level.txt,sha256=vKl8m7jOQ4pkbzVuHCJsq-8LcXRrOAWnok3bBo9qpsE,11
17
+ lifejacket-1.0.0.dist-info/RECORD,,
@@ -1,100 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: lifejacket
3
- Version: 0.2.0
4
- Summary: A package for after-study analysis of adaptive experiments in which data is pooled across users.
5
- Author-email: Nowell Closser <nowellclosser@gmail.com>
6
- Requires-Python: >=3.10
7
- Description-Content-Type: text/markdown
8
- Requires-Dist: click>=8.0
9
- Requires-Dist: jax>=0.4.0
10
- Requires-Dist: jaxlib>=0.4.0
11
- Requires-Dist: numpy>=1.20.0
12
- Requires-Dist: pandas>=1.3.0
13
- Requires-Dist: scipy>=1.7.0
14
- Requires-Dist: plotext>=5.0.0
15
- Provides-Extra: dev
16
- Requires-Dist: pytest>=7.0; extra == "dev"
17
- Requires-Dist: black>=22.0; extra == "dev"
18
- Requires-Dist: flake8>=4.0; extra == "dev"
19
-
20
- ```python
21
- _ _ __ _ _ _
22
- | (_)/ _| (_) | | | |
23
- | |_| |_ ___ _ __ _ ___| | _____| |_
24
- | | | _/ _ \ |/ _` |/ __| |/ / _ \ __|
25
- | | | || __/ | (_| | (__| < __/ |_
26
- |_|_|_| \___| |\__,_|\___|_|\_\___|\__|
27
- _/ |
28
- |__/
29
- ```
30
-
31
- Save your standard errors from pooling in adaptive experiments.
32
-
33
- ## Setup (if not using conda)
34
- ### Create and activate a virtual environment
35
- - `python3 -m venv .venv; source /.venv/bin/activate`
36
-
37
- ### Adding a package
38
- - Add to `requirements.txt` with a specific version or no version if you want the latest stable
39
- - Run `pip freeze > requirements.txt` to lock the versions of your package and all its subpackages
40
-
41
- ## Running the code
42
- - `export PYTHONPATH to the absolute path of this repository on your computer
43
- - `./run_local_synthetic.sh`, which outputs to `simulated_data/` by default. See all the possible flags to be toggled in the script code.
44
-
45
- ## Linting/Formatting
46
-
47
- ## Testing
48
- python -m pytest
49
- python -m pytest tests/unit_tests
50
- python -m pytest tests/integration_tests
51
-
52
-
53
- # Talk about gitignored cluster simulation scripts
54
-
55
-
56
-
57
-
58
-
59
-
60
- ### Important Large-Scale Simulations
61
-
62
- #### No adaptivity
63
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=0.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
64
-
65
- #### No adaptivity, 5 batches incremental recruitment
66
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=10000 --steepness=0.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
67
-
68
- #### Some adaptivity, no action_centering
69
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
70
-
71
- #### Some adaptivity, no action_centering, 5 batches incremental recruitment
72
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=10000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
73
-
74
- #### More adaptivity, no action_centering
75
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=5.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
76
-
77
- #### Even more adaptivity, no action_centering
78
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=10.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
79
-
80
- #### Some adaptivity, RL action_centering, no inference action centering
81
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
82
-
83
- #### Some adaptivity, inference action_centering, no RL action centering
84
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_action_centering.py"
85
-
86
- #### Some adaptivity, inference and RL action_centering
87
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_action_centering.py"
88
-
89
- #### Some adaptivity, inference and RL action_centering, even more T
90
- sbatch --array=[0-999] -t 1-00:00 --mem=50G run_and_analysis_parallel_synthetic --T=25 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
91
-
92
- #### Some adaptivity, inference and RL action_centering, even more T, 5 batches incremental recruitment
93
- sbatch --array=[0-999] -t 1-00:00 --mem=50G run_and_analysis_parallel_synthetic --T=25 --n=50000 --recruit_n=10000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
94
-
95
-
96
-
97
- ## TODO
98
- 1. Add precommit hooks (pip freeze, linting, formatting)
99
- 2. Run tests on PRs
100
-
@@ -1,17 +0,0 @@
1
- lifejacket/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- lifejacket/after_study_analysis.py,sha256=_Weeca51EXWlQkx3IWd2t1jImBxODOTe9-8gtKBSlus,82168
3
- lifejacket/arg_threading_helpers.py,sha256=7HV7qkiJtm-E-cpAhtv4n_BpCVjz9tC0nGENbc090h8,17282
4
- lifejacket/calculate_derivatives.py,sha256=SceXFWtK56uCCdXGD7v8JijgYz0UCBKzcnrPH_nAqNE,37536
5
- lifejacket/constants.py,sha256=2L05p6NJ7l3qRZ1hD2KlrvzWF1ReSmWRUkULPIhdvJo,842
6
- lifejacket/form_adaptive_meat_adjustments_directly.py,sha256=bSLrVYLZR1-Qlm5yIdktzv8ZQTVhHTlhVVL2wEjLTmw,13737
7
- lifejacket/get_datum_for_blowup_supervised_learning.py,sha256=V8H4PE49dQwsKjj93QEu2BKbhwPr56QMtx2jhan39-c,58357
8
- lifejacket/helper_functions.py,sha256=xOhRG-Cm4ZdRNm-O0faHna583d74pLWY5_jfnokegWc,23295
9
- lifejacket/input_checks.py,sha256=KcDdfsdCVCKKcx07FfOKJb3KVX6xFuWwAufGJ3msAuc,46972
10
- lifejacket/small_sample_corrections.py,sha256=f8jmg9U9ZN77WadJud70tt6NMxCTsSGtlsdF_-mfws4,5543
11
- lifejacket/trial_conditioning_monitor.py,sha256=qNTHh0zY2P7zJxox_OwhEEK8Ed1l0TPOjGDqNxMNoIQ,42164
12
- lifejacket/vmap_helpers.py,sha256=pZqYN3p9Ty9DPOeeY9TKbRJXR2AV__HBwwDFOvdOQ84,2688
13
- lifejacket-0.2.0.dist-info/METADATA,sha256=XzZQXRKzohTg0FOv0sIh7Ii6Snj2m5HVIdeXVYUphe4,7274
14
- lifejacket-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
- lifejacket-0.2.0.dist-info/entry_points.txt,sha256=4k8ibVIUT-OHxPaaDv-QwWpC64ErzhdemHpTAXCnb8w,67
16
- lifejacket-0.2.0.dist-info/top_level.txt,sha256=vKl8m7jOQ4pkbzVuHCJsq-8LcXRrOAWnok3bBo9qpsE,11
17
- lifejacket-0.2.0.dist-info/RECORD,,