csa-common-lib 2.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. csa_common_lib-2.0.0/PKG-INFO +6 -0
  2. csa_common_lib-2.0.0/README.md +61 -0
  3. csa_common_lib-2.0.0/csa_common_lib/__init__.py +15 -0
  4. csa_common_lib-2.0.0/csa_common_lib/classes/__init__.py +6 -0
  5. csa_common_lib-2.0.0/csa_common_lib/classes/prediction_options.py +253 -0
  6. csa_common_lib-2.0.0/csa_common_lib/classes/prediction_receipt.py +86 -0
  7. csa_common_lib-2.0.0/csa_common_lib/classes/prediction_results.py +82 -0
  8. csa_common_lib-2.0.0/csa_common_lib/enum_types/__init__.py +2 -0
  9. csa_common_lib-2.0.0/csa_common_lib/enum_types/exit_flags.py +49 -0
  10. csa_common_lib-2.0.0/csa_common_lib/enum_types/functions.py +32 -0
  11. csa_common_lib-2.0.0/csa_common_lib/enum_types/job_types.py +36 -0
  12. csa_common_lib-2.0.0/csa_common_lib/enum_types/results.py +47 -0
  13. csa_common_lib-2.0.0/csa_common_lib/helpers/__init__.py +0 -0
  14. csa_common_lib-2.0.0/csa_common_lib/helpers/_arrays.py +7 -0
  15. csa_common_lib-2.0.0/csa_common_lib/helpers/_conversions.py +128 -0
  16. csa_common_lib-2.0.0/csa_common_lib/helpers/_os.py +120 -0
  17. csa_common_lib-2.0.0/csa_common_lib/helpers/_vault.py +134 -0
  18. csa_common_lib-2.0.0/csa_common_lib/toolbox/__init__.py +0 -0
  19. csa_common_lib-2.0.0/csa_common_lib/toolbox/_notifier.py +173 -0
  20. csa_common_lib-2.0.0/csa_common_lib/toolbox/_validate.py +395 -0
  21. csa_common_lib-2.0.0/csa_common_lib/toolbox/classes/__init__.py +0 -0
  22. csa_common_lib-2.0.0/csa_common_lib/toolbox/classes/utilities.py +57 -0
  23. csa_common_lib-2.0.0/csa_common_lib/toolbox/concurrency/__init__.py +0 -0
  24. csa_common_lib-2.0.0/csa_common_lib/toolbox/concurrency/parallel_executor.py +111 -0
  25. csa_common_lib-2.0.0/csa_common_lib/toolbox/concurrency/parallel_helpers.py +119 -0
  26. csa_common_lib-2.0.0/csa_common_lib/toolbox/database/__init__.py +0 -0
  27. csa_common_lib-2.0.0/csa_common_lib/toolbox/database/information.py +68 -0
  28. csa_common_lib-2.0.0/csa_common_lib/toolbox/npz/__init__.py +0 -0
  29. csa_common_lib-2.0.0/csa_common_lib/toolbox/npz/io_operations.py +91 -0
  30. csa_common_lib-2.0.0/csa_common_lib.egg-info/PKG-INFO +6 -0
  31. csa_common_lib-2.0.0/csa_common_lib.egg-info/SOURCES.txt +34 -0
  32. csa_common_lib-2.0.0/csa_common_lib.egg-info/dependency_links.txt +1 -0
  33. csa_common_lib-2.0.0/csa_common_lib.egg-info/requires.txt +6 -0
  34. csa_common_lib-2.0.0/csa_common_lib.egg-info/top_level.txt +1 -0
  35. csa_common_lib-2.0.0/setup.cfg +4 -0
  36. csa_common_lib-2.0.0/setup.py +33 -0
@@ -0,0 +1,6 @@
1
+ Metadata-Version: 2.1
2
+ Name: csa_common_lib
3
+ Version: 2.0.0
4
+ Summary: csa_common_lib is a shared library designed to provide utility modules, class definitions, enumerations, and helper functions for the CSA Prediction Engine Python client. It standardizes and simplifies complex operations across different parts of the CSA Prediction Engine.
5
+ Author: Cambridge Sports Analytics
6
+ Author-email: prediction@csanalytics.io
@@ -0,0 +1,61 @@
1
+ # PSR_LIBRARY
2
+
3
+ The PSR Library is the core framework for calculating relevance-based prediction models and managing interactions with AWS Lambda functions. It includes several modules ranging from core math functions to end-user APIs and internal tools to support the operation of Cambridge Sports Analytics' prediction models.
4
+
5
+ ## Repository Structure
6
+
7
+ The repository is structured into several key components:
8
+
9
+ ### Key Directories
10
+
11
+ - **`_aws_layers/`**: Contains the AWS Lambda layers needed for dependencies such as Python packages to be used in Lambda functions.
12
+
13
+ - **`csa_common_lib/`**: A collection of common utilities and helper functions that can be reused across different modules in the repository, including shared classes, validation utilities, and enumerations.
14
+
15
+ - **`csanalytics/`**: The package intended for the end-user interaction, primarily to interface with Cambridge Sports Analytics' prediction engine API. It provides functions to call predictive models, fetch results, and handle user data.
16
+
17
+ - **`csanalytics_local/`**: Internal tools used for development and infrastructure management, meant for use by CSA engineers.
18
+
19
+ - **`lambda_functions/`**: Contains various AWS Lambda functions that handle job processing, submission, and results retrieval. These functions facilitate interaction with the PSR models through serverless operations.
20
+
21
+ - **`accessid_usage_handler/`**: Lambda function for handling access ID usage.
22
+ - **`filter_response/`**: Lambda function for filtering responses from job results.
23
+ - **`get_accessid_usage/`**: Retrieves access ID usage statistics.
24
+ - **`get_apikey_usage/`**: Retrieves API key usage statistics.
25
+ - **`get_job_results/`**: Fetches results of a job from the server.
26
+ - **`post_job/`**: Submits a job to the PSR prediction engine.
27
+ - **`process_job/`**: Processes a job and manages its state.
28
+ - **`start_state_machine_psr/`**: Starts an AWS Step Functions state machine to manage long-running jobs.
29
+
30
+ - **`psr/`**: The main library where core mathematical modules are implemented. This is the heart of the PSR system, containing the math and algorithms for relevance-based predictions.
31
+
32
+ - **`psr_lambda/`**: Helper functions and utilities for managing AWS Lambda functions specifically for PSR-related tasks. This package helps with the deployment and orchestration of Lambda functions for running predictions.
33
+
34
+ ## Getting Started
35
+
36
+ ### Cloning the Repository
37
+ To get started, clone this repository using:
38
+
39
+ ```bash
40
+ git clone https://github.com/CambridgeSportsAnalytics/PSR_LIBRARY.git
41
+ ```
42
+
43
+ ### Setting Up Your Environment
44
+
45
+ The repository includes various packages and functions that require dependencies for AWS Lambda and Python packages. Make sure you have the necessary Python environment set up, and install required dependencies using:
46
+
47
+ ```bash
48
+ pip install -r requirements.txt
49
+ ```
50
+
51
+ ### Running Lambda Functions
52
+
53
+ To interact with AWS Lambda functions, you can navigate to the lambda_functions/ directory and deploy the functions using the AWS CLI or your preferred deployment method (e.g., AWS SAM or Serverless Framework).
54
+
55
+ ## Documentation
56
+
57
+ For full documentation on each module and function, refer to the inline docstrings and module-specific README files located within each subdirectory. You can contact Cel Kulasekaran or Logan Waien for technical inquiries.
58
+
59
+ ## License
60
+
61
+ (c) 2023 - 2024 Cambridge Sports Analytics, LLC. All rights reserved.
@@ -0,0 +1,15 @@
1
+ """CSA Common Library
2
+ Description of module should go here.
3
+
4
+
5
+ """
6
+
7
+
8
+ # Options classes available for Optimal Variable Grid prediciton,
9
+ # Max Fit prediction, and relevance-based prediction.
10
+ from .classes.prediction_options import GridOptions
11
+ from .classes.prediction_options import MaxFitOptions
12
+ from .classes.prediction_options import PredictionOptions
13
+
14
+ from .classes.prediction_results import PredictionResults
15
+ from .classes.prediction_receipt import PredictionReceipt
@@ -0,0 +1,6 @@
1
+ from .prediction_options import PredictionOptions
2
+ from .prediction_options import MaxFitOptions
3
+ from .prediction_options import GridOptions
4
+
5
+
6
+ from .prediction_results import PredictionResults
@@ -0,0 +1,253 @@
1
+ import copy
2
+ import numpy as np
3
+
4
+
5
+ class PredictionOptions:
6
+ """A configurable options class for relevance-based predictions, including
7
+ predict, maxfit, and grid models. This class provides a comprehensive
8
+ list of all possible input parameters, ensuring flexibility across
9
+ different prediction models. While some parameters are shared across
10
+ inherrited models, setting an unused option for a specific model
11
+ will have no effect, ensuring compatibility and ease of use.
12
+
13
+ threshold : float or ndarray [1-by-T], optional (default=None)
14
+ Evaluation threshold to determine whether observations will be
15
+ included or excluded from the censor function in the
16
+ partial-sample regression. If threshold = None, the model
17
+ will evaluate across thresholds from [0, 0.90) in 0.10 increments.
18
+ is_threshold_percent : bool, optional (default=True)
19
+ Specify whether threshold is in percentage (decimal) units.
20
+ most_eval : bool, optional (default=True)
21
+ Specify the direction of the censor evaluation of the threshold.
22
+ True: [eval_type] score > threshold
23
+ False: [eval_type] score < threshold
24
+ eval_type : str, optional (default="both")
25
+ Specify evaluation censor type, relevance, similarity, or both.
26
+ adj_fit_multiplier : str, optional (default='K')
27
+ Adjusted fit multiplier. Specify either 'log', 'K', or '1'.
28
+ cov_inv : ndarray [K-by-K], optional (default=None)
29
+ Inverse covariance matrix, specify for speed.
30
+
31
+ Returns
32
+ -------
33
+ PredictionsOptions
34
+ Options class to organize and persist parameters used in the
35
+ the prediction models.
36
+
37
+ Raises
38
+ ------
39
+ AttributeError
40
+ When attempting to set or get an attribute that does not
41
+ exist in the options dictionary.
42
+ """
43
+
44
+ def __init__(self, **kwargs):
45
+ self.options = {
46
+ 'threshold': [0],
47
+ 'is_threshold_percent': True,
48
+ 'most_eval': True,
49
+ 'eval_type': 'both',
50
+ 'adj_fit_multiplier': 'K',
51
+ 'cov_inv': None,
52
+ }
53
+
54
+ # Update the options dictionary with any provided kwargs
55
+ self.options.update(kwargs)
56
+
57
+
58
+ def __getattr__(self, name):
59
+ # Avoid recursion by checking if the attribute is already present in __dict__
60
+ if name in self.__dict__:
61
+ return self.__dict__[name]
62
+
63
+ # Check if 'options' is in self.__dict__ to avoid KeyError
64
+ if 'options' in self.__dict__ and name in self.__dict__['options']:
65
+ return self.__dict__['options'][name]
66
+
67
+ # Raise an AttributeError if the attribute is not found
68
+ raise AttributeError(f"'PredictionOptions' object has no attribute '{name}'")
69
+
70
+
71
+ def __setattr__(self, name, value):
72
+ if name == "options":
73
+ super().__setattr__(name, value)
74
+ elif 'options' in self.__dict__ and name in self.options:
75
+ self.options[name] = value
76
+ else:
77
+ raise AttributeError(f"'PredictionOptions' object has no attribute '{name}'")
78
+
79
+
80
+ def display(self):
81
+ for key, value in self.options.items():
82
+ print(f"{key}: {value}")
83
+
84
+
85
+
86
+ def init_from_dict(self, inputs):
87
+ """ Accepts a dictionary of inputs and returns a
88
+ PredictionOptions object updated with all passed optional values.
89
+ Essentially, this is an update method.
90
+
91
+ Args:
92
+ inputs (dict): Intakes a dictionary of inputs deconstructed
93
+ in an AWS Lambda function.
94
+
95
+ Returns:
96
+ PredictionOptions: PredictionOptions obj that
97
+ holds all passed optional values. Non-passed options
98
+ remain default setting
99
+ """
100
+
101
+
102
+ # Iterate through input dict key/value pairs
103
+ for key, value in inputs.items():
104
+ # If obj attribute matches key in input dict
105
+ if hasattr(self, key):
106
+ # Update corresponding attribute in options object to hold dictionary value
107
+ super().__setattr__(key, value) # Use super() to avoid calling custom __setattr__
108
+
109
+
110
+ def clone_with(self, **kwargs):
111
+ """ Returns a clone of the passed PredictionOptions object
112
+ with user-specified attribute overwrites (via key value pairs)
113
+
114
+ Args:
115
+ key/value pair (attr/value): Attributes to overwrite in
116
+ the cloned object lambda function
117
+
118
+ Returns:
119
+ PredictionOptions: PredictionOptions obj
120
+ """
121
+
122
+ # Create a new instance of PredictionOptions to avoid recursive loop in .deepcopy()
123
+ new_copy = self.__class__()
124
+
125
+ # Copy attributes from the original instance to the new instance
126
+ for attr, value in self.__dict__.items():
127
+ setattr(new_copy, attr, value)
128
+
129
+ # Overwrite attributes with passed parameter
130
+ for key, value in kwargs.items():
131
+ setattr(new_copy, key, value)
132
+
133
+ return new_copy
134
+
135
+
136
+ class MaxFitOptions(PredictionOptions):
137
+ """
138
+ MaxFitOptions Class:
139
+ Inherits from PredictionOptions and adds additional options specific
140
+ max fit problems.
141
+
142
+ threshold : not applicable
143
+ Max fit solves for the optimal threshold that maximizes the
144
+ fit (or adjusted fit) value.
145
+ threshold_range : tuple or ndarray
146
+ Min/max range for evaluating maxfit threshold, by default (0, 0.20, 0.50, 0.80)
147
+ If an ndarray is passed in, max fit evaluates over the specified
148
+ threshold values in the ndarray
149
+ stepsize : float, optional (default=0.20)
150
+ Stepsize to evaluate range of thresholds to solve for max fit.
151
+ Decreasing stepsize will increase the grid resolution.
152
+ most_eval : bool, optional (default=True)
153
+ Specify the direction of threshold evluation on the censor score.
154
+ The censor score is determined by eval_type.
155
+ True: censor score > threshold
156
+ False: censor score < threshold
157
+ eval_type : str, optional (default="both")
158
+ Specify censor threshold type, relevance, similarity, or both.
159
+ cov_inv : ndarray [K-by-K], optional (default=None)
160
+ Inverse covariance matrix, specify for speed.
161
+ objective : str, optional (default="adjusted_fit)
162
+ Objective function to optimize, either fit or adjusted_fit.
163
+
164
+ Returns
165
+ -------
166
+ MaxFitOptions
167
+ Options class to organize and persist parameters used for the
168
+ maximum fit prediction model.
169
+
170
+ Raises
171
+ ------
172
+ AttributeError
173
+ When attempting to set or get an attribute that does not
174
+ exist in the options dictionary.
175
+ """
176
+
177
+ def __init__(self, **kwargs):
178
+ super().__init__(**kwargs)
179
+ self.options.update({
180
+ 'threshold': None,
181
+ 'threshold_range': np.array((0, 0.20, 0.50, 0.80), dtype='float32'),
182
+ 'stepsize': 0.20,
183
+ 'objective': 'adjusted_fit',
184
+ })
185
+
186
+ # Update the options dictionary with any provided kwargs
187
+ self.options.update(kwargs)
188
+
189
+
190
+ class GridOptions(MaxFitOptions):
191
+ """
192
+ GridOptions Class:
193
+ Inherits from MaxFitOptions and adds additional options.
194
+
195
+ threshold_range : tuple or ndarray
196
+ Min/max range for evaluating maxfit threshold, by default (0, 0.20, 0.50, 0.80)
197
+ If an ndarray is passed in, max fit evaluates over the specified
198
+ threshold values in the ndarray
199
+ stepsize : float, optional (default=0.20)
200
+ Stepsize to evaluate range of thresholds to solve for max fit.
201
+ Decreasing stepsize will increase the granularity of the search.
202
+ Not applicable if threshold_range is an ndarray.
203
+ most_eval : bool, optional (default=True)
204
+ Specify the direction of threshold evluation on the censor score.
205
+ The censor score is determined by eval_type.
206
+ True: censor score > threshold
207
+ False: censor score < threshold
208
+ eval_type : str, optional (default="both")
209
+ Specify censor threshold type, relevance, similarity, or both.
210
+ cov_inv : ndarray [K-by-K], optional (default=None)
211
+ Inverse covariance matrix, specify for speed.
212
+ objective : str, optional (default="adjusted_fit)
213
+ Objective function to optimize, either fit or adjusted_fit.
214
+ attribute_combi : ndarray [Q-by-K], optional (default=None)
215
+ Matrix of binary row vectors to indicate variable choices.
216
+ Each row is a combination of variables to evaluate.
217
+ If not specified, function will evaluate all possible combinations.
218
+ max_iter : int, optional (default=1_000_000)
219
+ Maximum number of grid cells to evaluate. Since this is a O(n^K)
220
+ computational time, we suggest balancing computation time
221
+ and memory with the maximum number of cells to evaluate.
222
+ k : int, optional (default=1)
223
+ Lower bound for the number of variables to include for any
224
+ combination Q, by default 1.
225
+ _is_retain_all_grid_objects : boolean, optional (default=False)
226
+ Saves and returns the weights grid for all censors, this is the
227
+ the largest matrix in yhat_details. This is typically set to True
228
+ for audit or deep research and development purposes.
229
+
230
+ Returns
231
+ -------
232
+ GridOptions
233
+ Options class to organize and persist parameters used for the
234
+ grid (and grid singularity) prediction model.
235
+
236
+ Raises
237
+ ------
238
+ AttributeError
239
+ When attempting to set or get an attribute that does not
240
+ exist in the options dictionary.
241
+ """
242
+
243
+ def __init__(self, **kwargs):
244
+ super().__init__(**kwargs)
245
+ self.options.update({
246
+ 'attribute_combi': None,
247
+ 'max_iter': 1_000_000,
248
+ 'k': 1,
249
+ '_is_retain_all_grid_objects': False, # Set this to True to retain memory expensive objects for audits or deep R&D
250
+ })
251
+
252
+ # Update the options dictionary with any provided kwargs
253
+ self.options.update(kwargs)
@@ -0,0 +1,86 @@
1
+ import numpy as np
2
+ import pickle
3
+ import json
4
+ import uuid
5
+ from datetime import datetime
6
+ from csa_common_lib.helpers._conversions import convert_ndarray_to_list
7
+ from csa_common_lib.helpers._os import is_valid_path, calc_crc64
8
+
9
+ class PredictionReceipt:
10
+ """Saves and orgnaizes input dimensions, prediction durations,
11
+ timestamps, input options and more. This is meant to assist in
12
+ the validation process of prediction results.
13
+
14
+
15
+ Returns
16
+ -------
17
+ PredictionReceipt
18
+ Receipt class to store and persist information that is relavant
19
+ to cross checking prediction requests
20
+
21
+ Raises
22
+ ------
23
+ AttributeError
24
+ When attempting to set or get an attribute that does not
25
+ exist in the receipt dictionary.
26
+ """
27
+
28
+ def __init__(self, model_type, y, X, theta, options, yhat, prediction_duration=None, seed:int=-1):
29
+ self.prediction_id = str(uuid.uuid4()) # Unique id for the prediction request
30
+ self.timestamp = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # Timestamp of the receipt
31
+ self.prediction_duration = round(prediction_duration, 3) # Time to run a prediction (in seconds)
32
+ self.model_type = str(model_type) # Prediction model that was run
33
+ self.X_dim = X.shape # Save input dimensions
34
+ self.y_dim = y.shape # Save input dimensions
35
+ self.theta_dim = theta.shape # Save input dimensions
36
+ self.options = convert_ndarray_to_list(options.options) # Save input options
37
+ self.yhat = yhat # Save output info
38
+ self.y_checksum = calc_crc64(pickle.dumps(y)) # convert y to bytes and get checksum
39
+ self.X_checksum = calc_crc64(pickle.dumps(X)) # convert X to bytes and get checksum
40
+ self.theta_checksum = calc_crc64(pickle.dumps(theta)) # convert theta to bytes and get checksum
41
+ self.seed = seed # User provided (if applciable). Otherwise will default to -1
42
+
43
+ def display(self, detail:bool=False):
44
+ """Displays basic validation info. Excludes lengthy results objects
45
+ """
46
+ attributes = dir(self)
47
+
48
+ # If suer does not request a detailed display(), remove input options and yhat array
49
+ if detail is False:
50
+ remove_attributes = ['options','yhat']
51
+ attributes = [attr for attr in attributes if attr not in remove_attributes]
52
+
53
+ # Print out a menu of accessible attributes in the receipt
54
+ for attr in attributes:
55
+ if not attr.startswith('__') and not callable(getattr(self, attr)):
56
+ print(f"{attr}: {getattr(self, attr)}")
57
+
58
+
59
+ def save_receipt(self, path:str='', file_name:str=None):
60
+ """Saves prediction_receipts as .json file
61
+ """
62
+
63
+ # Convert timestamp to filename if not supplied
64
+ if file_name is None:
65
+ file_name = self.timestamp.replace(" ", "_").replace(":", "-")
66
+
67
+ # Validate that the user supplied a valid path before saving .json
68
+ try:
69
+ if path != '':
70
+ is_valid_path(path)
71
+ except (FileNotFoundError, PermissionError) as e:
72
+ print(f"Error: {e}")
73
+
74
+ # Convert any nd.arrays to lists (to be json serializable)
75
+ for attr in dir(self):
76
+ attr_value = getattr(self, attr)
77
+ if isinstance(attr_value, np.ndarray):
78
+ setattr(self, attr, attr_value.tolist())
79
+
80
+
81
+ # Turn receipt object into dictionary so that it can be saved as a json file
82
+ obj_dict = self.__dict__
83
+
84
+ # Save to a JSON file
85
+ with open(f'{path}{file_name}.json', 'w') as json_file:
86
+ json.dump(obj_dict, json_file)
@@ -0,0 +1,82 @@
1
+ import numpy as np
2
+
3
+ class PredictionResults:
4
+ """Stores an array of dictionaries containing prediction results
5
+ and filters specific keys (output_details' attributes)
6
+ into their respective arrays.
7
+
8
+ Returns
9
+ -------
10
+ PredictionResults
11
+ Flattened array of dictionaries by keys (output_details attributes)
12
+
13
+ Raises
14
+ ------
15
+ TypeError
16
+ If items in raw_data are not dictionaries.
17
+ """
18
+
19
+
20
+ def __init__(self, results):
21
+ self.raw_data = results
22
+ self._initialize_attributes()
23
+
24
+ # compute weights concentration and add to class
25
+ self.weights_concentration = [np.std(row) for row in self.weights]
26
+
27
+ def _initialize_attributes(self):
28
+
29
+
30
+ if not self.raw_data:
31
+ return
32
+
33
+ if isinstance(self.raw_data, dict):
34
+ first_item = self.raw_data
35
+ self.raw_data = [self.raw_data]
36
+ else:
37
+ first_item = self.raw_data[0]
38
+
39
+ if not isinstance(first_item, dict):
40
+ raise TypeError("PredictionResults: Items in raw_data must be dictionaries")
41
+
42
+ # keys_to_populate = [key for key in first_item if key in allowed_keys]
43
+ allowed_keys = list(self.raw_data[0].keys()) # Pull results keys that we want to capture
44
+
45
+ for key in allowed_keys:
46
+ values = []
47
+ for item in self.raw_data:
48
+ if key in item:
49
+ value = item[key]
50
+ if isinstance(value, np.ndarray) and value.shape == (1, 1):
51
+ value = value[0][0]
52
+ values.append(value)
53
+ setattr(self, key, values)
54
+
55
+
56
+ def attributes(self):
57
+ """Display a list of accessible attributes of the class
58
+
59
+ Returns
60
+ -------
61
+ list
62
+ List of accessble attributes of the class.
63
+ """
64
+
65
+ attribute_list = [key for key in self.__dict__.keys() if not key.startswith('__')]
66
+ return attribute_list
67
+
68
+
69
+ def display(self):
70
+ """Display key-value pairs of all accessible attributes of the class.
71
+ """
72
+ for attr in dir(self):
73
+ if not attr.startswith('__') and not callable(getattr(self, attr)):
74
+ print(f"{attr}: {getattr(self, attr)}")
75
+
76
+
77
+ def __repr__(self):
78
+ """Displays a list of all accessible attributes in the class
79
+ """
80
+ class_name = self.__class__.__name__
81
+ attributes = "\n".join(f"- {key}" for key in self.raw_data[0].keys())
82
+ return f"\nResults:\n--------- \n{attributes}\n--------- "
@@ -0,0 +1,2 @@
1
+ from .functions import PSRFunction
2
+ from .results import PSRResult
@@ -0,0 +1,49 @@
1
+ from enum import Enum
2
+
3
+ # http return codes
4
+ # from http import HTTPStatus
5
+
6
+ class AccessIDStatus(Enum):
7
+ """Enumeration of Access ID Status flags.
8
+
9
+ Parameters
10
+ ----------
11
+ Enum : AccessIDStatus
12
+ Access ID status codes
13
+ """
14
+ VALID = (0, 'Access ID verified.')
15
+ EXPIRED = (1, 'Access ID expired.')
16
+ INVALID = (2, 'Invalid Access ID or Key.')
17
+
18
+ def __str__(self):
19
+ return self.value[1]
20
+
21
+ def __float__(self):
22
+ return float(self.value[0])
23
+
24
+ def __int__(self):
25
+ return self.value[0]
26
+
27
+
28
+ class UserTokenStatus(Enum):
29
+ """Enumeration of user Token Status flags.
30
+
31
+ Parameters
32
+ ----------
33
+ Enum : UserTokenStatus
34
+ User token status codes
35
+ """
36
+ VALID = (0, 'Token verified.')
37
+ INVALID = (1, 'Invalid token.')
38
+ EXPIRED_ACCESS = (2, 'Expired token.')
39
+ MAX_TOKEN = (3, 'Invalid token: Maximum number of tokens reached.')
40
+ NON_EXISTENT = (4, 'Token does not exist.')
41
+
42
+ def __str__(self):
43
+ return self.value[1]
44
+
45
+ def __float__(self):
46
+ return float(self.value[0])
47
+
48
+ def __int__(self):
49
+ return self.value[0]
@@ -0,0 +1,32 @@
1
+ from enum import Enum
2
+
3
+
4
+ class PSRFunction(Enum):
5
+ """Enumeration of PSR library function types.
6
+
7
+ Parameters
8
+ ----------
9
+ Enum : PSR Function Types
10
+ Partial Sample Regression function types.
11
+ """
12
+ PSR = (0, 'psr')
13
+ MAXFIT = (1, 'maxfit')
14
+ GRID = (2, 'grid')
15
+ GRID_SINGULARITY = (3, 'grid_singularity')
16
+ RELEVANCE = (4, 'relevance')
17
+ SIMILARITY = (5, 'similarity')
18
+ INFORMATIVENESS = (6, 'informativeness')
19
+ FIT = (7, 'fit')
20
+ ADJUSTED_FIT = (8, 'adjusted_fit')
21
+ ASYMMETRY = (9, 'asymmetry')
22
+ CO_OCCURENCE = (10, 'co-occurence')
23
+
24
+
25
+ def __str__(self):
26
+ return self.value[1]
27
+
28
+ def __float__(self):
29
+ return float(self.value[0])
30
+
31
+ def __int__(self):
32
+ return self.value[0]
@@ -0,0 +1,36 @@
1
+ from enum import Enum
2
+
3
+
4
+ class JobType(Enum):
5
+ """Enumeration of prediction task types.
6
+
7
+ This enumeration provides a list of task types for different
8
+ prediction job modes. Each task type is represented by a tuple,
9
+ containing a numerical identifier and a string label.
10
+
11
+ Attributes
12
+ ----------
13
+ SINGLE : tuple
14
+ Represents a single prediction task type with an identifier of 0.
15
+ MULTI_Y : tuple
16
+ Represents a multi-y prediction task type with an identifier of 1.
17
+ MULTI_THETA : tuple
18
+ Represents a multi-theta prediction task type with an identifier of 2.
19
+ """
20
+
21
+ SINGLE = (0, 'single')
22
+ MULTI_Y = (1, 'multi_y')
23
+ MULTI_THETA = (2, 'multi_theta')
24
+
25
+
26
+ def __str__(self):
27
+ """Returns the string representation of the task type."""
28
+ return self.value[1]
29
+
30
+ def __float__(self):
31
+ """Returns the numerical identifier of the task type as a float."""
32
+ return float(self.value[0])
33
+
34
+ def __int__(self):
35
+ """Returns the numerical identifier of the task type as an integer."""
36
+ return self.value[0]
@@ -0,0 +1,47 @@
1
+ from enum import Enum
2
+
3
+ # 'weights': weights,
4
+ # 'relevance': r,
5
+ # 'similarity': simlr_x,
6
+ # 'info_x': info_x,
7
+ # 'info_theta': info_theta,
8
+ # 'include': include,
9
+ # 'lambda_sq': verify_row_vector(lambda_sq),
10
+ # 'n': verify_row_vector(n),
11
+ # 'phi': verify_row_vector(phi),
12
+ # 'r_star': verify_row_vector(r_star),
13
+ # 'r_star_percent': verify_row_vector(r_star_percent/100),
14
+ # 'most_eval': repmat(np.array([most_eval]), 1, r_star.size)
15
+
16
+ class PSRResult(Enum):
17
+ """Enumeration of PSR library result types.
18
+
19
+ Parameters
20
+ ----------
21
+ Enum : PSR Result Types
22
+ Partial Sample Regression result types.
23
+ """
24
+ YHAT = (0, 'y_hat')
25
+ FIT = (1, 'fit')
26
+ WEIGHTS = (2, 'weights')
27
+ RELEVANCE = (3, 'relevance')
28
+ SIMILARITY = (4, 'similarity')
29
+ INFO_X = (5, 'info_X')
30
+ INFO_THETA = (6, 'info_theta')
31
+ INCLUDE = (7, 'include')
32
+ LAMBDA_SQ = (8, 'lambda_sq')
33
+ N = (9, 'n')
34
+ PHI = (10, 'phi')
35
+ R_STAR = (11, 'r_star')
36
+ R_STAR_PERCENT = (12, 'r_star_percent')
37
+ ALL = (13, 'all')
38
+
39
+ def __str__(self):
40
+ return self.value[1]
41
+
42
+ def __float__(self):
43
+ return float(self.value[0])
44
+
45
+ def __int__(self):
46
+ return self.value[0]
47
+