cccpm 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cccpm/utils.py ADDED
@@ -0,0 +1,386 @@
1
+ import os
2
+ import numpy as np
3
+ import pandas as pd
4
+
5
+ from sklearn.utils import check_X_y
6
+ from sklearn.impute import SimpleImputer
7
+
8
+ from scipy.stats import ConstantInputWarning, NearConstantInputWarning
9
+
10
+ import seaborn as sns
11
+ import matplotlib.pyplot as plt
12
+
13
+ import warnings
14
+
15
+ import logging
16
+
17
+ from cccpm.reporting.plots.plots import pairplot_flexible
18
+
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ warnings.filterwarnings("ignore", category=ConstantInputWarning)
23
+ warnings.filterwarnings("ignore", category=NearConstantInputWarning)
24
+
25
+ def train_test_split(train, test, X, y, covariates):
26
+ return X[train], X[test], y[train], y[test], covariates[train], covariates[test]
27
+
28
+
29
+ def matrix_to_upper_triangular_vector(matrix):
30
+ """
31
+ Convert a 2D square matrix to a vector containing only the elements
32
+ of the strictly upper triangular part (excluding the diagonal).
33
+
34
+ Parameters:
35
+ matrix (np.ndarray): Input 2D square matrix of shape (n, n).
36
+
37
+ Returns:
38
+ np.ndarray: A vector containing the strictly upper triangular elements.
39
+ """
40
+ if not (matrix.ndim == 2 and matrix.shape[0] == matrix.shape[1]):
41
+ raise ValueError("Input must be a 2D square matrix.")
42
+
43
+ n = matrix.shape[0]
44
+ # Get the indices of the strictly upper triangular part
45
+ row_indices, col_indices = np.triu_indices(n, k=1)
46
+ # Extract the elements at these indices
47
+ upper_triangular_elements = matrix[row_indices, col_indices]
48
+
49
+ return upper_triangular_elements
50
+
51
+
52
+ def vector_to_upper_triangular_matrix(vector):
53
+ """
54
+ Convert a vector containing strictly upper triangular elements back
55
+ to a 2D square matrix.
56
+
57
+ Parameters:
58
+ vector (np.ndarray): A vector containing the strictly upper triangular elements.
59
+
60
+ Returns:
61
+ np.ndarray: The reconstructed 2D square matrix.
62
+ """
63
+ # Calculate the size of the matrix from the vector length
64
+ size = int((np.sqrt(8 * vector.size + 1) - 1) / 2) + 1
65
+ if size * (size - 1) // 2 != vector.size:
66
+ raise ValueError("Vector size does not match the number of elements for a valid square matrix.")
67
+
68
+ matrix = np.zeros((size, size))
69
+ # Get the indices of the strictly upper triangular part
70
+ row_indices, col_indices = np.triu_indices(size, k=1)
71
+ # Place the elements into the matrix
72
+ matrix[row_indices, col_indices] = vector
73
+ matrix[col_indices, row_indices] = vector
74
+ return matrix
75
+
76
+
77
+ def matrix_to_vector_3d(matrix_3d):
78
+ """
79
+ Convert a 3D connectivity matrix to a 2D array of upper-triangular vectors.
80
+
81
+ Parameters
82
+ ----------
83
+ matrix_3d: np.ndarray
84
+ Input 3D array of shape (n_samples, n, n), where each 2D matrix is square.
85
+
86
+ Returns
87
+ -------
88
+ upper: np.ndarray
89
+ 2D array of shape (n_samples, n*(n - 1)/2) containing strictly upper-triangular elements of each matrix.
90
+ """
91
+ n_samples, n, _ = matrix_3d.shape
92
+ row_idx, col_idx = np.triu_indices(n, k=1)
93
+ flat = matrix_3d.reshape(n_samples, n * n)
94
+ upper = flat[:, np.ravel_multi_index((row_idx, col_idx), (n, n))]
95
+ return upper
96
+
97
+
98
+ def vector_to_matrix_3d(vector_2d, shape):
99
+ """
100
+ Convert a vector containing strictly upper triangular parts back to a 3D matrix.
101
+
102
+ Parameters:
103
+ vector_2d (np.ndarray): A 2D array where each row is a vector of the strictly upper triangular part of a 2D matrix.
104
+ shape (tuple): The shape of the original 3D matrix, (n_samples, n, n).
105
+
106
+ Returns:
107
+ np.ndarray: The reconstructed 3D matrix of shape (n_samples, n, n).
108
+ """
109
+ n_samples, n, _ = shape
110
+ # Create an empty 3D matrix to fill
111
+ matrix_3d = np.zeros((n_samples, n, n))
112
+
113
+ # Create an index matrix for the strictly upper triangular indices
114
+ row_indices, col_indices = np.tril_indices(n, k=-1) # k=1 excludes the diagonal
115
+ upper_tri_indices = np.ravel_multi_index((row_indices, col_indices), (n, n))
116
+
117
+ # Flatten the 3D matrix along the last two dimensions
118
+ flat_matrix = matrix_3d.reshape(n_samples, -1)
119
+
120
+ # Place the strictly upper triangular elements into the corresponding positions
121
+ np.put_along_axis(flat_matrix, upper_tri_indices[None, :], vector_2d, axis=1)
122
+
123
+ return matrix_3d
124
+
125
+ def get_colors_from_colormap(n_colors, colormap_name='tab10'):
126
+ """
127
+ Get a set of distinct colors from a specified colormap.
128
+
129
+ Parameters:
130
+ n_colors (int): Number of distinct colors needed.
131
+ colormap_name (str): Name of the colormap to use (e.g., 'tab10').
132
+
133
+ Returns:
134
+ list: A list of color strings.
135
+ """
136
+ cmap = plt.get_cmap(colormap_name)
137
+ colors = [cmap(i / (n_colors - 1)) for i in range(n_colors)]
138
+ return colors
139
+
140
+
141
+ def check_data(X, y, covariates, impute_missings: bool = False):
142
+ """
143
+ Validate and format input data for modeling.
144
+
145
+ Parameters
146
+ ----------
147
+ X: array-like
148
+ Feature data of shape (n_samples, n_features) or
149
+ connectivity matrices of shape (n_samples, n, n). 3D matrices are vectorized.
150
+ y: array-like
151
+ Target values; 1D array of shape (n_samples,) or
152
+ 2D array of shape (n_samples, 1) to be squeezed.
153
+ covariates: array-like or pandas.Series or pandas.DataFrame
154
+ Covariate data. Series are converted to 2D; DataFrames are one-hot encoded.
155
+ impute_missings: bool, default=False
156
+ If True, allow NaNs in X for imputation; NaNs in y always raise an error.
157
+
158
+ Returns
159
+ -------
160
+ X_checked: np.ndarray
161
+ 2D array of validated (and vectorized) feature data.
162
+ y_checked: np.ndarray
163
+ 1D array of target values.
164
+ cov_arr: np.ndarray
165
+ 2D array of covariates.
166
+ """
167
+ # Convert to numpy for dimension checks
168
+ X_arr = np.asarray(X)
169
+ # Handle 3D connectivity matrices
170
+ if X_arr.ndim == 3:
171
+ X_arr = matrix_to_vector_3d(X_arr)
172
+ elif X_arr.ndim != 2:
173
+ raise ValueError(f"X must be 2D or 3D, got shape {X_arr.shape}")
174
+
175
+ # Ensure y is 1D vector
176
+ y_arr = np.asarray(y)
177
+ if y_arr.ndim == 2:
178
+ if 1 in y_arr.shape:
179
+ y_arr = y_arr.ravel()
180
+ else:
181
+ raise ValueError(f"y must be a vector, got shape {y_arr.shape}")
182
+ elif y_arr.ndim != 1:
183
+ raise ValueError(f"y must be 1D array, got shape {y_arr.shape}")
184
+
185
+ # Validate X and y with sklearn
186
+ if impute_missings:
187
+ try:
188
+ X_checked, y_checked = check_X_y(
189
+ X_arr, y_arr,
190
+ ensure_all_finite='allow-nan',
191
+ allow_nd=True,
192
+ y_numeric=True
193
+ )
194
+ except ValueError:
195
+ logger.info(
196
+ "y contains NaN values. Only missing values in X and covariates can be imputed."
197
+ )
198
+ raise
199
+ else:
200
+ try:
201
+ X_checked, y_checked = check_X_y(
202
+ X_arr, y_arr,
203
+ ensure_all_finite=True,
204
+ allow_nd=True,
205
+ y_numeric=True
206
+ )
207
+ except ValueError:
208
+ logger.info(
209
+ "Your input contains NaN values. Fix NaNs or use impute_missing_values=True."
210
+ )
211
+ raise
212
+
213
+ # Process covariates
214
+ if isinstance(covariates, pd.Series):
215
+ cov_df = covariates.to_frame()
216
+ elif isinstance(covariates, pd.DataFrame):
217
+ cov_df = pd.get_dummies(covariates, drop_first=True)
218
+ else:
219
+ cov_df = covariates
220
+
221
+ if isinstance(cov_df, (pd.Series, pd.DataFrame)):
222
+ cov_arr = cov_df.to_numpy()
223
+ else:
224
+ cov_arr = np.asarray(cov_df)
225
+
226
+ # Ensure covariates are 2D
227
+ if cov_arr.ndim == 1:
228
+ cov_arr = cov_arr.reshape(-1, 1)
229
+ elif cov_arr.ndim != 2:
230
+ raise ValueError(f"covariates must be 1D or 2D, got shape {cov_arr.shape}")
231
+
232
+ return X_checked, y_checked, cov_arr
233
+
234
+
235
+ def impute_missing_values(X_train, X_test, cov_train, cov_test):
236
+ # Initialize imputers with chosen strategy (e.g., mean, median, most_frequent)
237
+ x_imputer = SimpleImputer(strategy='mean')
238
+ cov_imputer = SimpleImputer(strategy='mean')
239
+
240
+ # Fit on training data and transform both training and test data
241
+ X_train = x_imputer.fit_transform(X_train)
242
+ X_test = x_imputer.transform(X_test)
243
+ cov_train = cov_imputer.fit_transform(cov_train)
244
+ cov_test = cov_imputer.transform(cov_test)
245
+ return X_train, X_test, cov_train, cov_test
246
+
247
+ def select_stable_edges(stability_edges, stability_threshold):
248
+ return {'positive': np.where(stability_edges['positive'] > stability_threshold)[0],
249
+ 'negative': np.where(stability_edges['negative'] > stability_threshold)[0]}
250
+
251
+
252
+ def generate_data_insights(X, y, covariates, results_directory):
253
+ """
254
+ Generate summary statistics and diagnostic plots about the input data.
255
+ Saves outputs to a subfolder in results_directory.
256
+
257
+ Handles both pandas DataFrames and NumPy arrays.
258
+ """
259
+ # Create output folder
260
+ output_dir = os.path.join(results_directory, "data_insights")
261
+ os.makedirs(output_dir, exist_ok=True)
262
+
263
+ X_names, y_name, covariates_names = get_variable_names(X, y, covariates)
264
+ pd.Series(X_names).to_csv(os.path.join(output_dir, "X_names.csv"), index=False, header=False)
265
+ pd.Series([y_name]).to_csv(os.path.join(output_dir, "y_name.csv"), index=False, header=False)
266
+ pd.Series(covariates_names).to_csv(os.path.join(output_dir, "covariate_names.csv"), index=False, header=False)
267
+
268
+ # Convert X to DataFrame if needed
269
+ if isinstance(X, np.ndarray):
270
+ X = pd.DataFrame(X, columns=[f"feature {i + 1}" for i in range(X.shape[1])])
271
+
272
+ # Convert y to Series
273
+ if isinstance(y, np.ndarray):
274
+ y = pd.Series(np.squeeze(y), name="target")
275
+ elif isinstance(y, pd.DataFrame):
276
+ y = y.iloc[:, 0]
277
+ y.name = y.name or "target"
278
+ elif isinstance(y, pd.Series):
279
+ y.name = y.name or "target"
280
+
281
+ # Convert covariates to DataFrame
282
+ if covariates is not None:
283
+ if isinstance(covariates, np.ndarray):
284
+ if len(covariates.shape) == 1:
285
+ covariates = pd.DataFrame(covariates, columns=["covariate 1"])
286
+ else:
287
+ covariates = pd.DataFrame(covariates, columns=[f"covariate {i + 1}" for i in range(covariates.shape[1])])
288
+ elif isinstance(covariates, pd.Series):
289
+ covariates = covariates.to_frame()
290
+ if covariates.columns[0] is None:
291
+ covariates.columns = ["covariate 1"]
292
+
293
+ # --- Combine all data to check for missing values ---
294
+ full_data = pd.concat([X, y.rename("target"), covariates], axis=1)
295
+ missing_total = full_data.isnull().sum().sum()
296
+
297
+ # --- Summary ---
298
+ summary = {
299
+ "Number of samples": len(X),
300
+ "Number of features (connectivity values)": X.shape[1],
301
+ "Number of covariates": covariates.shape[1] if covariates is not None else 0,
302
+ "Total missing values": missing_total
303
+ }
304
+ summary_df = pd.DataFrame.from_dict(summary, orient="index", columns=["Value"])
305
+ summary_df.to_csv(os.path.join(output_dir, "summary.csv"))
306
+
307
+ # --- Target Histogram ---
308
+ plt.figure(figsize=(4, 3))
309
+ sns.histplot(y, bins=30, color="gray", edgecolor="white")
310
+ plt.title("Distribution of Target Variable")
311
+ plt.xlabel(y.name)
312
+ plt.ylabel("Count")
313
+ plt.tight_layout()
314
+ plt.savefig(os.path.join(output_dir, "target_distribution.png"), dpi=300)
315
+ plt.close()
316
+
317
+ # --- Scatter Matrix: Covariates and Target ---
318
+ if covariates is not None and not covariates.empty:
319
+ cov_y = pd.concat([covariates, y.rename("target")], axis=1)
320
+ pairplot_flexible(cov_y, os.path.join(output_dir, "scatter_matrix.png"))
321
+
322
+ # --- Optional: Missing Values Heatmap ---
323
+ if missing_total > 0:
324
+ plt.figure(figsize=(10, 6))
325
+ sns.heatmap(full_data.isnull(), cbar=False, yticklabels=False)
326
+ plt.title("Missing Values Heatmap")
327
+ plt.tight_layout()
328
+ plt.savefig(os.path.join(output_dir, "missing_values_heatmap.png"), dpi=300)
329
+ plt.close()
330
+ return
331
+
332
+
333
+ def get_variable_names(X, y, covariates):
334
+ """
335
+ Generate names for features, target, and covariates based on input types.
336
+
337
+ Parameters
338
+ ----------
339
+ X : array-like or pandas.DataFrame
340
+ Feature data. If DataFrame, column names are returned; otherwise,
341
+ generic names "feature_{i}" are generated for each feature (i from
342
+ 0 to n_features - 1).
343
+ y : array-like, pandas.Series, or pandas.DataFrame
344
+ Target vector. If Series, its name is used; if DataFrame, the first
345
+ column name is used; otherwise, the default name "target" is returned.
346
+ covariates : array-like, pandas.Series, or pandas.DataFrame
347
+ Covariate data. If Series, its name is returned as a single-element
348
+ list; if DataFrame, its column names are returned; otherwise, generic
349
+ names "covariate_{i}" are generated for each covariate column.
350
+
351
+ Returns
352
+ -------
353
+ X_names : list of str
354
+ Names for each feature column.
355
+ y_name : str
356
+ Name for the target variable.
357
+ covar_names : list of str
358
+ Names for each covariate column.
359
+ """
360
+ # Features
361
+ X_names = list(X.columns) if isinstance(X, pd.DataFrame) else [
362
+ f"feature_{i}" for i in range(X.shape[1])
363
+ ]
364
+
365
+ # Target
366
+ if isinstance(y, (pd.Series, pd.DataFrame)):
367
+ y_name = y.name if isinstance(y, pd.Series) else y.columns[0]
368
+ else:
369
+ y_name = "target"
370
+
371
+ # Covariates
372
+ if isinstance(covariates, (pd.Series, pd.DataFrame)):
373
+ covar_names = (
374
+ [covariates.name]
375
+ if isinstance(covariates, pd.Series)
376
+ else list(covariates.columns)
377
+ )
378
+ else:
379
+ if len(covariates.shape) == 1:
380
+ covar_names = ["covariate_1"]
381
+ else:
382
+ covar_names = [
383
+ f"covariate_{i}" for i in range(covariates.shape[1])
384
+ ]
385
+
386
+ return X_names, y_name, covar_names
@@ -0,0 +1,105 @@
1
+ Metadata-Version: 2.4
2
+ Name: cccpm
3
+ Version: 0.2.1
4
+ Summary: Confound-Corrected Connectome-based Predictive Modeling Python Package
5
+ License: MIT
6
+ Author: Nils Winter
7
+ Author-email: nils.r.winter@uni-muenster.de
8
+ Requires-Python: >=3.10,<4.0
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
15
+ Classifier: Programming Language :: Python :: 3.14
16
+ Requires-Dist: arakawa
17
+ Requires-Dist: bleach
18
+ Requires-Dist: netplotbrain
19
+ Requires-Dist: networkx
20
+ Requires-Dist: nilearn
21
+ Requires-Dist: numpy
22
+ Requires-Dist: pandas
23
+ Requires-Dist: plotly
24
+ Requires-Dist: pygam
25
+ Requires-Dist: scikit-image
26
+ Requires-Dist: scikit-learn (>=1.6)
27
+ Requires-Dist: tinycss2
28
+ Requires-Dist: tqdm
29
+ Description-Content-Type: text/markdown
30
+
31
+ [![GitHub Workflow Status](https://github.com/wwu-mmll/confound_corrected_cpm/actions/workflows/test.yml/badge.svg)](https://github.com/wwu-mmll/confound_corrected_cpm/actions/workflows/test.yml)
32
+ [![Coverage Status](https://coveralls.io/repos/github/wwu-mmll/confound_corrected_cpm/badge.svg)](https://coveralls.io/github/wwu-mmll/confound_corrected_cpm)
33
+ [![Github Contributors](https://img.shields.io/github/contributors-anon/wwu-mmll/cpm_python?color=blue)](https://github.com/wwu-mmll/cpm_python/graphs/contributors)
34
+ [![Github Commits](https://img.shields.io/github/commit-activity/y/wwu-mmll/cpm_python)](https://github.com/wwu-mmll/cpm_python/commits/main)
35
+
36
+ # Confound-Corrected Connectome-Based Predictive Modelling in Python
37
+ **Confound-Corrected Connectome-Based Predictive Modelling** is a Python package for performing connectome-based predictive modeling (CPM). This toolbox is designed for researchers in neuroscience and psychiatry, providing robust methods for building predictive models based on structural or functional connectome data. It emphasizes replicability, interpretability, and flexibility, making it a valuable tool for analyzing brain connectivity and its relationship to behavior or clinical outcomes.
38
+
39
+ ---
40
+
41
+ ## What is Connectome-Based Predictive Modeling?
42
+
43
+ Connectome-based predictive modeling (CPM) is a machine learning framework that leverages the brain's connectivity patterns to predict individual differences in behavior, cognition, or clinical status. By identifying key edges in the connectome, CPM creates models that link connectivity metrics with target variables (e.g., clinical scores). This approach is particularly suited for studying complex relationships in neuroimaging data and developing interpretable predictive models.
44
+
45
+ ---
46
+
47
+ ## Key Features
48
+
49
+ - **Univariate Edge Selection**: Supports methods like `pearson`, `spearman`, and their partial correlation counterparts, with options for p-threshold optimization and FDR correction.
50
+ - **Cross-Validation**: Implements nested cross-validation for robust model evaluation.
51
+ - **Edge Stability**: Selects stable edges across folds to improve model reliability.
52
+ - **Confound Adjustment**: Controls for covariates during edge selection and modeling.
53
+ - **Permutation Testing**: Assesses the statistical significance of models using robust permutation-based methods.
54
+
55
+ ---
56
+
57
+ ## Documentation
58
+
59
+ For detailed instructions on installation, usage, and advanced configurations, visit the [documentation website](https://wwu-mmll.github.io/confound_corrected_cpm/).
60
+
61
+ ---
62
+
63
+ ## Installation
64
+
65
+ Install the package from GitHub:
66
+
67
+ ```bash
68
+ git clone https://github.com/mmll/confound_corrected_cpm.git
69
+ cd cpm_python
70
+ pip install .
71
+ ```
72
+
73
+ ## Quick Example
74
+ Here's a quick overview of how to run a CPM analysis:
75
+
76
+ ```python
77
+ from src.cccpm.cpm_analysis import CPMRegression
78
+ from src.cccpm.edge_selection import UnivariateEdgeSelection, PThreshold
79
+ from sklearn.model_selection import KFold
80
+
81
+ # Configure edge selection
82
+ univariate_edge_selection = UnivariateEdgeSelection(
83
+ edge_statistic=["pearson"],
84
+ edge_selection=[PThreshold(threshold=[0.05], correction=["fdr_by"])]
85
+ )
86
+
87
+ # Create the CPMRegression object
88
+ cpm = CPMRegression(
89
+ results_directory="results/",
90
+ cv=KFold(n_splits=10, shuffle=True, random_state=42),
91
+ edge_selection=univariate_edge_selection,
92
+ n_permutations=100
93
+ )
94
+
95
+ # Run the analysis
96
+ X = ... # Connectome data
97
+ y = ... # Target variable
98
+ covariates = ... # Covariates
99
+ cpm.run(X, y, covariates)
100
+ ```
101
+
102
+ ## Contributing
103
+ Contributions are welcome! If you have ideas, feedback, or feature requests, feel free to open an issue or submit a pull request on the GitHub repository.
104
+
105
+
@@ -0,0 +1,26 @@
1
+ cccpm/__init__.py,sha256=zwS8tQC5rU4GvlxSN1W67D3CqkMzrsvTEyFx-C_goFg,44
2
+ cccpm/cpm_analysis.py,sha256=gNvLQT6gAAHetd9m21hKW0VyTgDWEx_G_-fo4Hea5gk,13341
3
+ cccpm/edge_selection.py,sha256=00ZpDU1d0njcw8TIJG5Z_gtx_bI-ftX9AVb2ip3Uo3M,9578
4
+ cccpm/fold.py,sha256=43CgjrtjGgbTUccwq9jjM7XWCXzbGtZYpqMxm7QZS7s,2129
5
+ cccpm/logging.py,sha256=xo3MSBrom1PDhH68icALsOTFGI-ccJzSqEhuNQf1yR4,1375
6
+ cccpm/models.py,sha256=FMQplQ6JGnKRlizasu6IpzzISZP9w-9wnUu2N9q6Mp4,5867
7
+ cccpm/more_models.py,sha256=JC1NYdFMi3Mw661pSosLVQlbsZnk1mrIEaJcPGDs-4k,8194
8
+ cccpm/reporting/__init__.py,sha256=wqXR2WsP6-ZjM9H1eqx-eHn22US9aNGmupt96DilQWc,37
9
+ cccpm/reporting/assets/CCCPM.png,sha256=3jyQZeHUdxK0BNewHKu-0kgGMpZcyJXLgId6PdvKxtU,4253
10
+ cccpm/reporting/html_report.py,sha256=57UXwwDPQbtPJBxcM_0VrsDqe-0jyS4UqtoXlb1SuNo,17784
11
+ cccpm/reporting/plots/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ cccpm/reporting/plots/chord_v2.py,sha256=ROcoaN9NmPjl8bRBDWbfgwaFAVBNXJtYEpjfJLk_Ikw,36149
13
+ cccpm/reporting/plots/cpm_chord_plot.py,sha256=ekR2O4Iq4lmKuQLYP5Q3JXbop7KEjsfYVshP_8OTb0Q,5714
14
+ cccpm/reporting/plots/plots.py,sha256=gpCVNrAuKKZYzL7sip80AGK1XaRn5hkIK3jwvLv0b50,10858
15
+ cccpm/reporting/plots/utils.py,sha256=AqzXa2yPw-mp4kMTczZ_a-339aZjeIcksj0zdUuBK4U,505
16
+ cccpm/reporting/reporting_utils.py,sha256=XxYxylkZqB5WHw7XxGA3aX7n29QFlb0YKAvUQLNgpVY,4203
17
+ cccpm/results_manager.py,sha256=ZwHFUqNlhTPlRmM7xKz4vPOgNIut_ZGuyS2xu68QCVk,21000
18
+ cccpm/scoring.py,sha256=O2RPTjj-IoDS89SmaVgQls6SJejtnXHuUNtbXRDLvMI,1603
19
+ cccpm/simulation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ cccpm/simulation/simulate_multivariate.py,sha256=q59-UGCoXF9y92PVXv1-3PbDxKFBYR2xsxJVHECHT2M,8492
21
+ cccpm/simulation/simulate_sem.py,sha256=cL_ZvWjKA6UnqePEneQPPAxwGbFhl3ic1qKDeadSa7M,10019
22
+ cccpm/simulation/simulate_simple.py,sha256=jsv8kij1T2QWOXn38f7mJcLT5t53oGoMv1J83pIxDds,1306
23
+ cccpm/utils.py,sha256=xoDpWMKKCOJGBORGGy0hbVu5UksonGhyGIsFSLHZ1A0,13684
24
+ cccpm-0.2.1.dist-info/METADATA,sha256=Omde6xQpY1AM2QJ-sFccYBmYACHOXUVW5HCW3wpXcpA,4608
25
+ cccpm-0.2.1.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
26
+ cccpm-0.2.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 2.3.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any