lifejacket 0.2.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,80 +16,80 @@ logging.basicConfig(
16
16
 
17
17
  def perform_desired_small_sample_correction(
18
18
  small_sample_correction,
19
- per_user_joint_adaptive_meat_contributions,
20
- per_user_classical_meat_contributions,
21
- per_user_classical_bread_inverse_contributions,
22
- num_users,
19
+ per_subject_joint_adjusted_meat_contributions,
20
+ per_subject_classical_meat_contributions,
21
+ per_subject_classical_bread_contributions,
22
+ num_subjects,
23
23
  theta_dim,
24
24
  ):
25
25
 
26
- # We first compute the classical inverse bread matrix and invert it. While
26
+ # We first compute the classical bread matrix and invert it. While
27
27
  # it is possible to avoid this inversion using a QR decomposition and
28
28
  # solving linear systems (discussed more below), we typically don't have
29
29
  # issues with the conditioning of just the classical bread.
30
- classical_bread_inverse_matrix = jnp.mean(
31
- per_user_classical_bread_inverse_contributions, axis=0
32
- )
30
+ classical_bread_matrix = jnp.mean(per_subject_classical_bread_contributions, axis=0)
33
31
  classical_bread_matrix = invert_matrix_and_check_conditioning(
34
- classical_bread_inverse_matrix,
32
+ classical_bread_matrix,
35
33
  )[0]
36
34
 
37
35
  # These will hold either corrective matrices or scalar weights depending on
38
36
  # what small sample correction is requested.
39
- per_user_adaptive_corrections = None
40
- per_user_classical_corrections = None
37
+ per_subject_adjusted_corrections = None
38
+ per_subject_classical_corrections = None
41
39
 
42
- per_user_adaptive_correction_weights = np.ones(num_users)
43
- per_user_classical_correction_weights = np.ones(num_users)
40
+ per_subject_adjusted_correction_weights = np.ones(num_subjects)
41
+ per_subject_classical_correction_weights = np.ones(num_subjects)
44
42
  if small_sample_correction == SmallSampleCorrections.NONE:
45
43
  logger.info(
46
- "No small sample correction requested. Using the raw per-user joint adaptive bread inverse contributions."
44
+ "No small sample correction requested. Using the raw per-subject joint adjusted bread contributions."
47
45
  )
48
- elif small_sample_correction == SmallSampleCorrections.HC1theta:
46
+ elif small_sample_correction == SmallSampleCorrections.Z1theta:
49
47
  logger.info(
50
- "Using HC1 small sample correction at the user trajectory level. Note that we are treating the number of parameters as simply the size of theta, despite the presence of betas."
51
- )
52
- per_user_adaptive_correction_weights = per_user_classical_correction_weights = (
53
- num_users / (num_users - theta_dim) * np.ones(num_users)
48
+ "Using HC1 small sample correction at the subject trajectory level. Note that we are treating the number of parameters as simply the size of theta, despite the presence of betas."
54
49
  )
50
+ per_subject_adjusted_correction_weights = (
51
+ per_subject_classical_correction_weights
52
+ ) = (num_subjects / (num_subjects - theta_dim) * np.ones(num_subjects))
55
53
  elif small_sample_correction in {
56
- SmallSampleCorrections.HC2theta,
57
- SmallSampleCorrections.HC3theta,
54
+ SmallSampleCorrections.Z2theta,
55
+ SmallSampleCorrections.Z3theta,
58
56
  }:
59
- logger.info("Using %s small sample correction at the user trajectory level.")
57
+ logger.info("Using %s small sample correction at the subject trajectory level.")
60
58
 
61
- power = 1 if small_sample_correction == SmallSampleCorrections.HC2theta else 2
59
+ power = 1 if small_sample_correction == SmallSampleCorrections.Z2theta else 2
62
60
 
63
- # It turns out to typically not make sense to compute the adaptive analog
64
- # of the classical leverages, since this involves correcting the joint adaptive meat matrix
61
+ # It turns out to typically not make sense to compute the adjusted analog
62
+ # of the classical leverages, since this involves correcting the joint adjusted meat matrix
65
63
  # involving all beta and theta parameters. HC2/HC3 corrections assume that
66
- # the number of parameters is smaller than the number of users, which will not typically be
67
- # the case if the number of users is small enough for these corrections to be important.
68
- # Therefore we also use the "classical" leverages for the adaptive correction weights, which
64
+ # the number of parameters is smaller than the number of subjects, which will not typically be
65
+ # the case if the number of subjects is small enough for these corrections to be important.
66
+ # Therefore we also use the "classical" leverages for the adjusted correction weights, which
69
67
  # is sensible, corresponding to only adjusting based on the estimating equations for theta.
70
68
 
71
69
  # ALSO note that one way to test correctness of the leverages is that they should sum
72
70
  # to the number of inference parameters, ie the size of theta. I tested that this is
73
- # true both for the classical leverages and the larger joint adaptive leverages when they
71
+ # true both for the classical leverages and the larger joint adjusted leverages when they
74
72
  # were still used, lending credence to the below calculations.
75
73
 
76
74
  # TODO: Write a unit test for some level of logic here and then rewrite this to not require
77
75
  # the classical bread explicitly. May be slower, probably needs a for loop so that can use
78
- # a solver for each matrix multiplication after a QR decomposition of the bread inverse
76
+ # a solver for each matrix multiplication after a QR decomposition of the bread
79
77
  # transpose.
80
- classical_leverages_per_user = (
78
+ classical_leverages_per_subject = (
81
79
  np.einsum(
82
80
  "nij,ji->n",
83
- per_user_classical_bread_inverse_contributions,
81
+ per_subject_classical_bread_contributions,
84
82
  classical_bread_matrix,
85
83
  )
86
- / num_users
84
+ / num_subjects
87
85
  )
88
- per_user_classical_correction_weights = 1 / (
89
- (1 - classical_leverages_per_user) ** power
86
+ per_subject_classical_correction_weights = 1 / (
87
+ (1 - classical_leverages_per_subject) ** power
90
88
  )
91
89
 
92
- per_user_adaptive_correction_weights = per_user_classical_correction_weights
90
+ per_subject_adjusted_correction_weights = (
91
+ per_subject_classical_correction_weights
92
+ )
93
93
  else:
94
94
  raise ValueError(
95
95
  f"Unknown small sample correction: {small_sample_correction}. "
@@ -98,28 +98,28 @@ def perform_desired_small_sample_correction(
98
98
 
99
99
  # If we used matrix corrections, they will be stored as these corrections.
100
100
  # Otherwise, store the scalar weights.
101
- if per_user_adaptive_corrections is None:
102
- per_user_adaptive_corrections = per_user_adaptive_correction_weights
103
- if per_user_classical_corrections is None:
104
- per_user_classical_corrections = per_user_classical_correction_weights
101
+ if per_subject_adjusted_corrections is None:
102
+ per_subject_adjusted_corrections = per_subject_adjusted_correction_weights
103
+ if per_subject_classical_corrections is None:
104
+ per_subject_classical_corrections = per_subject_classical_correction_weights
105
105
 
106
106
  # The scalar corrections will have computed weights that need to be applied here,
107
- # whereas the matrix corrections will have been applied to the per-user
107
+ # whereas the matrix corrections will have been applied to the per-subject
108
108
  # contributions already.
109
- joint_adaptive_meat_matrix = jnp.mean(
110
- per_user_adaptive_correction_weights[:, None, None]
111
- * per_user_joint_adaptive_meat_contributions,
109
+ joint_adjusted_meat_matrix = jnp.mean(
110
+ per_subject_adjusted_correction_weights[:, None, None]
111
+ * per_subject_joint_adjusted_meat_contributions,
112
112
  axis=0,
113
113
  )
114
114
  classical_meat_matrix = jnp.mean(
115
- per_user_classical_correction_weights[:, None, None]
116
- * per_user_classical_meat_contributions,
115
+ per_subject_classical_correction_weights[:, None, None]
116
+ * per_subject_classical_meat_contributions,
117
117
  axis=0,
118
118
  )
119
119
 
120
120
  return (
121
- joint_adaptive_meat_matrix,
121
+ joint_adjusted_meat_matrix,
122
122
  classical_meat_matrix,
123
- per_user_adaptive_corrections,
124
- per_user_classical_corrections,
123
+ per_subject_adjusted_corrections,
124
+ per_subject_classical_corrections,
125
125
  )
@@ -0,0 +1,56 @@
1
+ Metadata-Version: 2.4
2
+ Name: lifejacket
3
+ Version: 1.0.2
4
+ Summary: Consistent standard errors for longitudinal data collected under pooling online decision policies.
5
+ Author-email: Nowell Closser <nowellclosser@gmail.com>
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: click>=8.0
9
+ Requires-Dist: jax>=0.4.0
10
+ Requires-Dist: jaxlib>=0.4.0
11
+ Requires-Dist: numpy>=1.20.0
12
+ Requires-Dist: pandas>=1.3.0
13
+ Requires-Dist: scipy>=1.7.0
14
+ Requires-Dist: plotext>=5.0.0
15
+ Provides-Extra: dev
16
+ Requires-Dist: pytest>=7.0; extra == "dev"
17
+ Requires-Dist: black>=22.0; extra == "dev"
18
+ Requires-Dist: flake8>=4.0; extra == "dev"
19
+
20
+ ```python
21
+ _ _ __ _ _ _
22
+ | (_)/ _| (_) | | | |
23
+ | |_| |_ ___ _ __ _ ___| | _____| |_
24
+ | | | _/ _ \ |/ _` |/ __| |/ / _ \ __|
25
+ | | | || __/ | (_| | (__| < __/ |_
26
+ |_|_|_| \___| |\__,_|\___|_|\_\___|\__|
27
+ _/ |
28
+ |__/
29
+ ```
30
+
31
+ Save your standard errors from pooling in online decision-making algorithms.
32
+
33
+ ## Setup (if not using conda)
34
+ ### Create and activate a virtual environment
35
+ - `python3 -m venv .venv; source /.venv/bin/activate`
36
+
37
+ ### Adding a package
38
+ - Add to `requirements.txt` with a specific version or no version if you want the latest stable
39
+ - Run `pip freeze > requirements.txt` to lock the versions of your package and all its subpackages
40
+
41
+ ## Running the code
42
+ - `export PYTHONPATH to the absolute path of this repository on your computer
43
+ - `./run_local_synthetic.sh`, which outputs to `simulated_data/` by default. See all the possible flags to be toggled in the script code.
44
+
45
+ ## Linting/Formatting
46
+
47
+ ## Testing
48
+ python -m pytest
49
+ python -m pytest tests/unit_tests
50
+ python -m pytest tests/integration_tests
51
+
52
+
53
+
54
+ ## TODO
55
+ 1. Add precommit hooks (pip freeze, linting, formatting)
56
+
@@ -0,0 +1,17 @@
1
+ lifejacket/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ lifejacket/arg_threading_helpers.py,sha256=jiPdG-1USYUXqXAEfRPL-3KBKno7uMr4-fCl9GrZ7Fw,17599
3
+ lifejacket/calculate_derivatives.py,sha256=Jv-N54lbUSi36H9hTKtupy0ReoXKzI8mFlgqmQddL5U,37476
4
+ lifejacket/constants.py,sha256=cQjA2-YRwsGDMjsndkbFK7m56xbVpUo-XCyyg9gY1BA,313
5
+ lifejacket/deployment_conditioning_monitor.py,sha256=Go4YhJZfkwj9g0mP0vYCVROKS397CIviHUtprdLNYTk,43813
6
+ lifejacket/form_adjusted_meat_adjustments_directly.py,sha256=AVlGOuw_FgVDcVnhQs1GorxtBMKUXvmYCQZgqG675k4,13539
7
+ lifejacket/get_datum_for_blowup_supervised_learning.py,sha256=sCH-PlrFlLJgCYpTmdeasiHwHYSEy9wxspkOTDuDPuY,58594
8
+ lifejacket/helper_functions.py,sha256=SdAbUwXNx-3JFsyTfLyliQ7kUOm0eABaiNgoYLR8NG0,16967
9
+ lifejacket/input_checks.py,sha256=q7HFZq5n18edQU8X5laONsBgWSMidLRy6Nhqdw5FpOw,47084
10
+ lifejacket/post_deployment_analysis.py,sha256=XdKObve0hOXVwPWSDD2lEEfrWAdcZK-c-uh53HIrKLM,82664
11
+ lifejacket/small_sample_corrections.py,sha256=aB6qi-r3ANoBMgf2Oo5-lCXCy_L4H3FlBffGwPcfXkg,5610
12
+ lifejacket/vmap_helpers.py,sha256=pZqYN3p9Ty9DPOeeY9TKbRJXR2AV__HBwwDFOvdOQ84,2688
13
+ lifejacket-1.0.2.dist-info/METADATA,sha256=L48u8IMsEXMTwtBJlFwh4mJ-pZUD8NcQEuqgjV0zkjo,1773
14
+ lifejacket-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
+ lifejacket-1.0.2.dist-info/entry_points.txt,sha256=CZ9AUPN0xfnpYqwtGTr6n9l5mpyEOddsXX8fnxKRB6U,71
16
+ lifejacket-1.0.2.dist-info/top_level.txt,sha256=vKl8m7jOQ4pkbzVuHCJsq-8LcXRrOAWnok3bBo9qpsE,11
17
+ lifejacket-1.0.2.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ lifejacket = lifejacket.post_deployment_analysis:cli
@@ -1,100 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: lifejacket
3
- Version: 0.2.1
4
- Summary: A package for after-study analysis of adaptive experiments in which data is pooled across users.
5
- Author-email: Nowell Closser <nowellclosser@gmail.com>
6
- Requires-Python: >=3.10
7
- Description-Content-Type: text/markdown
8
- Requires-Dist: click>=8.0
9
- Requires-Dist: jax>=0.4.0
10
- Requires-Dist: jaxlib>=0.4.0
11
- Requires-Dist: numpy>=1.20.0
12
- Requires-Dist: pandas>=1.3.0
13
- Requires-Dist: scipy>=1.7.0
14
- Requires-Dist: plotext>=5.0.0
15
- Provides-Extra: dev
16
- Requires-Dist: pytest>=7.0; extra == "dev"
17
- Requires-Dist: black>=22.0; extra == "dev"
18
- Requires-Dist: flake8>=4.0; extra == "dev"
19
-
20
- ```python
21
- _ _ __ _ _ _
22
- | (_)/ _| (_) | | | |
23
- | |_| |_ ___ _ __ _ ___| | _____| |_
24
- | | | _/ _ \ |/ _` |/ __| |/ / _ \ __|
25
- | | | || __/ | (_| | (__| < __/ |_
26
- |_|_|_| \___| |\__,_|\___|_|\_\___|\__|
27
- _/ |
28
- |__/
29
- ```
30
-
31
- Save your standard errors from pooling in online decision-making algorithms.
32
-
33
- ## Setup (if not using conda)
34
- ### Create and activate a virtual environment
35
- - `python3 -m venv .venv; source /.venv/bin/activate`
36
-
37
- ### Adding a package
38
- - Add to `requirements.txt` with a specific version or no version if you want the latest stable
39
- - Run `pip freeze > requirements.txt` to lock the versions of your package and all its subpackages
40
-
41
- ## Running the code
42
- - `export PYTHONPATH to the absolute path of this repository on your computer
43
- - `./run_local_synthetic.sh`, which outputs to `simulated_data/` by default. See all the possible flags to be toggled in the script code.
44
-
45
- ## Linting/Formatting
46
-
47
- ## Testing
48
- python -m pytest
49
- python -m pytest tests/unit_tests
50
- python -m pytest tests/integration_tests
51
-
52
-
53
- # Talk about gitignored cluster simulation scripts
54
-
55
-
56
-
57
-
58
-
59
-
60
- ### Important Large-Scale Simulations
61
-
62
- #### No adaptivity
63
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=0.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
64
-
65
- #### No adaptivity, 5 batches incremental recruitment
66
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=10000 --steepness=0.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
67
-
68
- #### Some adaptivity, no action_centering
69
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
70
-
71
- #### Some adaptivity, no action_centering, 5 batches incremental recruitment
72
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=10000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
73
-
74
- #### More adaptivity, no action_centering
75
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=5.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
76
-
77
- #### Even more adaptivity, no action_centering
78
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=10.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
79
-
80
- #### Some adaptivity, RL action_centering, no inference action centering
81
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
82
-
83
- #### Some adaptivity, inference action_centering, no RL action centering
84
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=0 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_action_centering.py"
85
-
86
- #### Some adaptivity, inference and RL action_centering
87
- sbatch --array=[0-999] -t 0-5:00 --mem=50G run_and_analysis_parallel_synthetic --T=10 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_action_centering.py"
88
-
89
- #### Some adaptivity, inference and RL action_centering, even more T
90
- sbatch --array=[0-999] -t 1-00:00 --mem=50G run_and_analysis_parallel_synthetic --T=25 --n=50000 --recruit_n=50000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
91
-
92
- #### Some adaptivity, inference and RL action_centering, even more T, 5 batches incremental recruitment
93
- sbatch --array=[0-999] -t 1-00:00 --mem=50G run_and_analysis_parallel_synthetic --T=25 --n=50000 --recruit_n=10000 --steepness=3.0 --alg_state_feats=intercept,past_reward --action_centering_RL=1 --inference_loss_func_filename="functions_to_pass_to_analysis/get_least_squares_loss_inference_no_action_centering.py" --theta_calculation_func_filename="functions_to_pass_to_analysis/estimate_theta_least_squares_no_action_centering.py"
94
-
95
-
96
-
97
- ## TODO
98
- 1. Add precommit hooks (pip freeze, linting, formatting)
99
- 2. Run tests on PRs
100
-
@@ -1,17 +0,0 @@
1
- lifejacket/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- lifejacket/after_study_analysis.py,sha256=jTOPDhDWThlE0pyVtprkYBJ_W7SHzeloKFrnI_WLBks,82288
3
- lifejacket/arg_threading_helpers.py,sha256=7HV7qkiJtm-E-cpAhtv4n_BpCVjz9tC0nGENbc090h8,17282
4
- lifejacket/calculate_derivatives.py,sha256=SceXFWtK56uCCdXGD7v8JijgYz0UCBKzcnrPH_nAqNE,37536
5
- lifejacket/constants.py,sha256=2L05p6NJ7l3qRZ1hD2KlrvzWF1ReSmWRUkULPIhdvJo,842
6
- lifejacket/form_adaptive_meat_adjustments_directly.py,sha256=bSLrVYLZR1-Qlm5yIdktzv8ZQTVhHTlhVVL2wEjLTmw,13737
7
- lifejacket/get_datum_for_blowup_supervised_learning.py,sha256=V8H4PE49dQwsKjj93QEu2BKbhwPr56QMtx2jhan39-c,58357
8
- lifejacket/helper_functions.py,sha256=xOhRG-Cm4ZdRNm-O0faHna583d74pLWY5_jfnokegWc,23295
9
- lifejacket/input_checks.py,sha256=KcDdfsdCVCKKcx07FfOKJb3KVX6xFuWwAufGJ3msAuc,46972
10
- lifejacket/small_sample_corrections.py,sha256=f8jmg9U9ZN77WadJud70tt6NMxCTsSGtlsdF_-mfws4,5543
11
- lifejacket/trial_conditioning_monitor.py,sha256=qNTHh0zY2P7zJxox_OwhEEK8Ed1l0TPOjGDqNxMNoIQ,42164
12
- lifejacket/vmap_helpers.py,sha256=pZqYN3p9Ty9DPOeeY9TKbRJXR2AV__HBwwDFOvdOQ84,2688
13
- lifejacket-0.2.1.dist-info/METADATA,sha256=vFb90EnjvF_CxGN2XKlk6b8s1iK-aqF0p4Wr0dEIKxA,7287
14
- lifejacket-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
- lifejacket-0.2.1.dist-info/entry_points.txt,sha256=4k8ibVIUT-OHxPaaDv-QwWpC64ErzhdemHpTAXCnb8w,67
16
- lifejacket-0.2.1.dist-info/top_level.txt,sha256=vKl8m7jOQ4pkbzVuHCJsq-8LcXRrOAWnok3bBo9qpsE,11
17
- lifejacket-0.2.1.dist-info/RECORD,,
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- lifejacket = lifejacket.after_study_analysis:cli