algomancy-scenario 0.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,37 @@
1
+ from algomancy_utils.baseparameterset import (
2
+ BaseParameterSet,
3
+ IntegerParameter,
4
+ StringParameter,
5
+ EnumParameter,
6
+ FloatParameter,
7
+ BooleanParameter,
8
+ )
9
+ from .algorithmfactory import AlgorithmFactory
10
+ from .keyperformanceindicator import KpiError, BaseKPI, BASE_KPI, ImprovementDirection
11
+ from .result import BaseScenarioResult, BASE_RESULT_BOUND, ScenarioResult
12
+ from .scenario import Scenario, ScenarioStatus
13
+ from .scenariomanager import ScenarioManager
14
+ from .basealgorithm import ALGORITHM, BaseAlgorithm
15
+
16
+
17
+ __all__ = [
18
+ "BaseParameterSet",
19
+ "IntegerParameter",
20
+ "StringParameter",
21
+ "EnumParameter",
22
+ "FloatParameter",
23
+ "BooleanParameter",
24
+ "BaseAlgorithm",
25
+ "ALGORITHM",
26
+ "AlgorithmFactory",
27
+ "ScenarioStatus",
28
+ "ImprovementDirection",
29
+ "KpiError",
30
+ "BaseKPI",
31
+ "BASE_KPI",
32
+ "BaseScenarioResult",
33
+ "BASE_RESULT_BOUND",
34
+ "ScenarioResult",
35
+ "Scenario",
36
+ "ScenarioManager",
37
+ ]
@@ -0,0 +1,53 @@
1
+ from algomancy_utils.baseparameterset import EmptyParameters
2
+ from typing import Dict, Any, List, Type, Generic
3
+
4
+ from algomancy_utils.logger import Logger
5
+ from .basealgorithm import ALGORITHM
6
+
7
+
8
+ class AlgorithmFactory(Generic[ALGORITHM]):
9
+ """
10
+ Creates algorithm objects
11
+ """
12
+
13
+ def __init__(self, templates: Dict[str, Type[ALGORITHM]], logger: Logger = None):
14
+ self._templates: Dict[str, Type[ALGORITHM]] = templates
15
+ self._logger = logger
16
+
17
+ @property
18
+ def available_algorithms(self) -> List[str]:
19
+ return [str(key) for key in self._templates.keys()]
20
+
21
+ @property
22
+ def templates(self) -> Dict[str, Type[ALGORITHM]]:
23
+ return self._templates
24
+
25
+ def create(self, input_name: str, input_params: Dict[str, Any]) -> ALGORITHM:
26
+ """
27
+
28
+ :param input_name:
29
+ :param input_params:
30
+ :raises AssertionError: Either algorithm template is not found or parameter validation fails.
31
+ :return:
32
+ """
33
+ template: Type[ALGORITHM] = (
34
+ self._templates[input_name] if input_name in self._templates else None
35
+ )
36
+ assert template, f"Algorithm template '{input_name}' not found."
37
+
38
+ algo_params = template.initialize_parameters()
39
+ algo_params.set_validated_values(input_params)
40
+
41
+ return template(algo_params)
42
+
43
+ def get_parameters(self, algo_name: str):
44
+ template: Type[ALGORITHM] = (
45
+ self._templates[algo_name] if algo_name in self._templates else None
46
+ )
47
+ assert template, f"Algorithm template '{algo_name}' not found."
48
+
49
+ algo_params = template.initialize_parameters()
50
+
51
+ data_params = EmptyParameters()
52
+
53
+ return algo_params, data_params
@@ -0,0 +1,74 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import TypeVar
3
+
4
+ from algomancy_data import BASE_DATA_BOUND
5
+ from .result import BASE_RESULT_BOUND
6
+ from algomancy_utils.baseparameterset import BASE_PARAMS_BOUND
7
+
8
+
9
+ class BaseAlgorithm(ABC):
10
+ def __init__(self, name: str, params: BASE_PARAMS_BOUND):
11
+ self._name: str = name
12
+ self.description = str(params.serialize())
13
+ self._params: BASE_PARAMS_BOUND = params
14
+ self._progress: float = 0
15
+
16
+ def __str__(self):
17
+ return f"{self.name} [{self._progress:.0f}%]: {self.description}"
18
+
19
+ @property
20
+ def params(self):
21
+ return self._params
22
+
23
+ @property
24
+ def get_progress(self) -> float:
25
+ return self._progress
26
+
27
+ @property
28
+ def name(self) -> str:
29
+ return self._name
30
+
31
+ def set_progress(self, progress: float):
32
+ assert 0 <= progress <= 100, "progress must be between 0 and 100"
33
+ self._progress = progress
34
+
35
+ def is_complete(self):
36
+ return self._progress == 100
37
+
38
+ def to_dict(self):
39
+ return {
40
+ "name": self.name,
41
+ "parameters": self._params.serialize(),
42
+ }
43
+
44
+ def healthcheck(self) -> bool:
45
+ return True
46
+
47
+ @staticmethod
48
+ @abstractmethod
49
+ def initialize_parameters() -> BASE_PARAMS_BOUND:
50
+ """
51
+ Initializes parameters for the derived Algorithm, which is necessary
52
+ for the GUI logic. It should simply return a default object of the
53
+ associated AlgorithmParameters class.
54
+
55
+ Example:
56
+ @staticmethod
57
+ def initialize_parameters() -> ExampleAlgorithmParams:
58
+ return ExampleAlgorithmParams()
59
+
60
+ Raises:
61
+ NotImplementedError: If the method is not overridden.
62
+
63
+ Returns:
64
+ BASE_PARAMS_BOUND: The initialized set of parameters, derived
65
+ from the BaseAlgorithmParameters class.
66
+ """
67
+ raise NotImplementedError("Abstract method")
68
+
69
+ @abstractmethod
70
+ def run(self, data: BASE_DATA_BOUND) -> BASE_RESULT_BOUND:
71
+ raise NotImplementedError("Abstract method")
72
+
73
+
74
+ ALGORITHM: TypeVar = TypeVar("ALGORITHM", bound=BaseAlgorithm)
@@ -0,0 +1,136 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import Any, Dict, List, Type
5
+
6
+ from algomancy_data import InputFileConfiguration, BASE_DATA_BOUND
7
+ from algomancy_scenario import AlgorithmFactory, ALGORITHM, BASE_KPI
8
+
9
+
10
+ class CoreConfiguration:
11
+ """
12
+ Core configuration shared by GUI and CLI.
13
+
14
+ Contains only fields that are not GUI- or CLI-specific.
15
+ Validation is executed on construction.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ # === path specifications ===
21
+ data_path: str = "data",
22
+ # === data manager configuration ===
23
+ has_persistent_state: bool = False,
24
+ save_type: str | None = "json",
25
+ data_object_type: type[BASE_DATA_BOUND] | None = None,
26
+ # === scenario manager configuration ===
27
+ etl_factory: Any | None = None,
28
+ kpi_templates: Dict[str, Type[BASE_KPI]] | None = None,
29
+ algo_templates: Dict[str, Type[ALGORITHM]] | None = None,
30
+ input_configs: List[InputFileConfiguration] | None = None,
31
+ # === auto start/create features ===
32
+ autocreate: bool | None = None,
33
+ default_algo: str | None = None,
34
+ default_algo_params_values: Dict[str, Any] | None = None,
35
+ autorun: bool | None = None,
36
+ # === misc (core) ===
37
+ title: str = "Algomancy",
38
+ **_: Any,
39
+ ) -> None:
40
+ # paths
41
+ self.data_path = data_path
42
+
43
+ # data / scenario manager
44
+ self.has_persistent_state = has_persistent_state
45
+ self.save_type = save_type
46
+ self.data_object_type = data_object_type
47
+ self.etl_factory = etl_factory
48
+ self.kpi_templates = kpi_templates
49
+ self.algo_templates = algo_templates
50
+ self.input_configs = input_configs
51
+ self.autocreate = autocreate
52
+ self.default_algo = default_algo
53
+ self.default_algo_params_values = default_algo_params_values
54
+ self.autorun = autorun
55
+
56
+ # misc
57
+ self.title = title
58
+
59
+ self._validate_core()
60
+
61
+ # ----- public API -----
62
+ def as_dict(self) -> Dict[str, Any]:
63
+ return {
64
+ "data_path": self.data_path,
65
+ "has_persistent_state": self.has_persistent_state,
66
+ "save_type": self.save_type,
67
+ "data_object_type": self.data_object_type,
68
+ "etl_factory": self.etl_factory,
69
+ "kpi_templates": self.kpi_templates,
70
+ "algo_templates": self.algo_templates,
71
+ "input_configs": self.input_configs,
72
+ "autocreate": self.autocreate,
73
+ "default_algo": self.default_algo,
74
+ "default_algo_params_values": self.default_algo_params_values,
75
+ "autorun": self.autorun,
76
+ "title": self.title,
77
+ }
78
+
79
+ # ----- validation -----
80
+ def _validate_core(self) -> None:
81
+ self._validate_paths_core()
82
+ self._validate_values_core()
83
+ self._validate_algorithm_parameters_core()
84
+
85
+ def _validate_paths_core(self) -> None:
86
+ if self.has_persistent_state:
87
+ if self.data_path is None or self.data_path == "":
88
+ raise ValueError("data_path must be provided")
89
+ if not os.path.isdir(self.data_path):
90
+ raise ValueError(
91
+ f"data_path does not exist or is not a directory: {self.data_path}"
92
+ )
93
+
94
+ def _validate_values_core(self) -> None:
95
+ # required non-null entries for scenario/data managers
96
+ required_fields = {
97
+ "etl_factory": self.etl_factory,
98
+ "kpi_templates": self.kpi_templates,
99
+ "algo_templates": self.algo_templates,
100
+ "input_configs": self.input_configs,
101
+ "data_object_type": self.data_object_type,
102
+ }
103
+ missing = [k for k, v in required_fields.items() if v is None]
104
+ if missing:
105
+ raise ValueError(
106
+ f"Missing required configuration fields: {', '.join(missing)}"
107
+ )
108
+
109
+ # booleans allowed to be False, but must not be None if specified
110
+ for name, val in {
111
+ "has_persistent_state": self.has_persistent_state,
112
+ "autocreate": self.autocreate,
113
+ "autorun": self.autorun,
114
+ }.items():
115
+ if val is None:
116
+ raise ValueError(
117
+ f"Boolean configuration '{name}' must be set to True or False, not None"
118
+ )
119
+
120
+ # save type
121
+ if self.save_type is None:
122
+ raise ValueError("save_type must be set to 'json' or 'parquet'")
123
+ if self.save_type not in {"json", "parquet"}:
124
+ raise ValueError("save_type must be either 'json' or 'parquet'")
125
+
126
+ # title
127
+ if not isinstance(self.title, str) or self.title.strip() == "":
128
+ raise ValueError("title must be a non-empty string")
129
+
130
+ def _validate_algorithm_parameters_core(self) -> None:
131
+ if self.autocreate:
132
+ tmp_factory = AlgorithmFactory(self.algo_templates)
133
+ test_algorithm = tmp_factory.create(
134
+ self.default_algo, self.default_algo_params_values
135
+ )
136
+ assert test_algorithm.healthcheck(), "Failed to create default algorithm"
@@ -0,0 +1,136 @@
1
+ from abc import ABC, abstractmethod
2
+ from enum import StrEnum, auto
3
+ from typing import TypeVar
4
+
5
+ from .result import BASE_RESULT_BOUND
6
+ from algomancy_utils.unit import BaseMeasurement, Measurement, Unit
7
+
8
+
9
+ class ImprovementDirection(StrEnum):
10
+ HIGHER = auto()
11
+ LOWER = auto()
12
+ AT_LEAST = auto()
13
+ AT_MOST = auto()
14
+
15
+
16
+ class KpiError(Exception):
17
+ def __init__(self, message: str) -> None:
18
+ self.message = message
19
+ super().__init__(self.message)
20
+
21
+
22
+ class BaseKPI(ABC):
23
+ def __init__(
24
+ self,
25
+ name: str,
26
+ better_when: ImprovementDirection,
27
+ base_measurement: BaseMeasurement,
28
+ threshold: float | None = None,
29
+ ) -> None:
30
+ self._name = name
31
+ self._better_when = better_when
32
+ self._measurement = Measurement(base_measurement)
33
+ self._threshold = (
34
+ Measurement(base_measurement, threshold) if threshold else None
35
+ )
36
+
37
+ def __str__(self):
38
+ self.pretty()
39
+
40
+ @property
41
+ def measurement(self) -> Measurement:
42
+ return self._measurement
43
+
44
+ @property
45
+ def name(self) -> str:
46
+ return self._name
47
+
48
+ @property
49
+ def better_when(self) -> ImprovementDirection:
50
+ return self._better_when
51
+
52
+ @property
53
+ def value(self) -> float | None:
54
+ return self._measurement.value
55
+
56
+ @property
57
+ def is_binary_kpi(self) -> bool:
58
+ return self._better_when in [
59
+ ImprovementDirection.AT_MOST,
60
+ ImprovementDirection.AT_LEAST,
61
+ ]
62
+
63
+ @property
64
+ def success(self) -> bool:
65
+ # Check the validity of the call
66
+ if not self.is_binary_kpi:
67
+ raise ValueError(f"KPI success is not defined for {self.name}")
68
+ if self._threshold is None:
69
+ raise ValueError(f"KPI threshold is not defined for {self.name}")
70
+
71
+ # Compare with threshold and return
72
+ if self._better_when == ImprovementDirection.AT_MOST:
73
+ return self._measurement.value <= self._threshold.value
74
+ return self._measurement.value >= self._threshold.value
75
+
76
+ @value.setter
77
+ def value(self, value: float):
78
+ self._measurement.value = value
79
+
80
+ def get_threshold_str(self, unit: Unit | None = None) -> str:
81
+ if unit:
82
+ return str(self._threshold.scale_to_unit(unit))
83
+ else:
84
+ return self._threshold.pretty()
85
+
86
+ def pretty(self, unit: Unit | None = None) -> str:
87
+ if self.is_binary_kpi:
88
+ return "✓" if self.success else "✗"
89
+ return self.details(unit)
90
+
91
+ def get_pretty_unit(self) -> Unit:
92
+ return self._measurement.scale().unit
93
+
94
+ def details(self, unit: Unit | None = None) -> str | None:
95
+ if unit:
96
+ return str(self._measurement.scale_to_unit(unit))
97
+ else:
98
+ return self._measurement.pretty()
99
+
100
+ @abstractmethod
101
+ def compute(self, result: BASE_RESULT_BOUND) -> float:
102
+ raise NotImplementedError("Abstract method")
103
+
104
+ def compute_and_check(self, result: BASE_RESULT_BOUND) -> None:
105
+ """
106
+ Computes a key performance indicator (KPI) value using the provided result data
107
+ and a callback function.
108
+
109
+ This method attempts to compute the KPI by invoking a specified callback with
110
+ the result data. If an exception occurs during computation, it logs an error
111
+ message indicating the KPI name and raises a KpiError to indicate failure.
112
+
113
+ :param result: The result data of the type required for KPI computation.
114
+ :type result: Derived from BaseScenarioResult
115
+ :raises KpiError: If an error occurs during the KPI computation.
116
+ """
117
+ try:
118
+ value = self.compute(result)
119
+ if not isinstance(value, (int, float)):
120
+ raise KpiError("KPI callback must return a numeric value.")
121
+ self.value = value
122
+ except Exception as e:
123
+ print(f"Error computing KPI {self.name}: {e}")
124
+ raise KpiError(f"Error computing KPI {self.name}")
125
+
126
+ def to_dict(self):
127
+ return {
128
+ "name": self.name,
129
+ "better_when": self.better_when.name,
130
+ "basis": self._measurement.base_measurement,
131
+ "value": self.value,
132
+ "threshold": self._threshold,
133
+ }
134
+
135
+
136
+ BASE_KPI = TypeVar("BASE_KPI", bound=BaseKPI)
@@ -0,0 +1,49 @@
1
+ from typing import Generic, Dict, Type, List
2
+
3
+ from .keyperformanceindicator import BASE_KPI
4
+
5
+
6
+ class KpiFactory(Generic[BASE_KPI]):
7
+ """
8
+ Factory class for creating KPI instances.
9
+
10
+ This class provides a mechanism to register multiple KPI templates and create
11
+ instances of those templates dynamically. It enables flexibility and reuse
12
+ of KPI-related components in a structured and modular way.
13
+
14
+ Attributes:
15
+ _templates (Dict[str, Type[BASE_KPI]]): Dictionary mapping template names
16
+ to their corresponding KPI classes.
17
+ """
18
+
19
+ def __init__(self, templates: Dict[str, Type[BASE_KPI]]):
20
+ self._templates = templates
21
+
22
+ def create_all(self):
23
+ """
24
+ Creates a dictionary of KPIs using predefined templates.
25
+
26
+ Returns:
27
+ dict: A dictionary where the keys are the original keys from the
28
+ `templates` dictionary, and the values are the results of calling
29
+ the template functions.
30
+ """
31
+ return {name: template() for name, template in self._templates.items()}
32
+
33
+ def create(self, subset: List[str]):
34
+ """
35
+ Creates a dictionary of template instances filtered by the specified subset.
36
+
37
+ Args:
38
+ subset (List[str]): A list of template names to include in the resulting
39
+ dictionary.
40
+
41
+ Returns:
42
+ dict: A dictionary where the keys are the names of the templates from the
43
+ subset and the values are their corresponding instantiated templates.
44
+ """
45
+ return {
46
+ name: template()
47
+ for name, template in self._templates.items()
48
+ if name in subset
49
+ }
File without changes
@@ -0,0 +1,27 @@
1
+ from abc import ABC, abstractmethod
2
+ from datetime import datetime
3
+ from typing import TypeVar
4
+
5
+
6
+ class BaseScenarioResult(ABC):
7
+ def __init__(self, data_id: str):
8
+ self.data_id = data_id
9
+ self.completed_at = datetime.now()
10
+
11
+ @abstractmethod
12
+ def to_dict(self):
13
+ raise NotImplementedError("Abstract method")
14
+
15
+
16
+ BASE_RESULT_BOUND = TypeVar("BASE_RESULT_BOUND", bound=BaseScenarioResult)
17
+
18
+
19
+ class ScenarioResult(BaseScenarioResult):
20
+ def __init__(self, data_id: str):
21
+ super().__init__(data_id)
22
+
23
+ def to_dict(self):
24
+ return {
25
+ "scenario_id": self.data_id,
26
+ "completed_at": self.completed_at,
27
+ }
@@ -0,0 +1,175 @@
1
+ """
2
+ scenario.py - Scenario Management
3
+
4
+ This module defines the Scenario class and related enums for managing simulation scenarios.
5
+ It provides functionality for creating, processing, and analyzing scenarios with different
6
+ algorithms and parameters.
7
+ """
8
+
9
+ import uuid
10
+ from enum import StrEnum, auto
11
+ from typing import Dict, Generic
12
+
13
+ from algomancy_utils.logger import Logger
14
+ from algomancy_data import BASE_DATA_BOUND
15
+ from .basealgorithm import ALGORITHM
16
+ from .keyperformanceindicator import BASE_KPI
17
+
18
+
19
+ class ScenarioStatus(StrEnum):
20
+ """
21
+ Constants representing the possible states of a scenario.
22
+ """
23
+
24
+ CREATED = auto()
25
+ QUEUED = auto()
26
+ PROCESSING = auto()
27
+ COMPLETE = auto()
28
+ FAILED = auto()
29
+
30
+
31
+ class Scenario(Generic[BASE_KPI]):
32
+ """
33
+ Represents a scenario with input data, algorithm, and results.
34
+
35
+ A scenario encapsulates the input data, processing algorithm, parameters,
36
+ and results of a simulation or analysis run.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ tag: str,
42
+ input_data: BASE_DATA_BOUND,
43
+ kpis: Dict[str, BASE_KPI],
44
+ algorithm: ALGORITHM,
45
+ provided_id: str = None,
46
+ ):
47
+ """
48
+ Initializes a new Scenario with the specified parameters.
49
+
50
+ Args:
51
+ tag (str): A user-defined label for the scenario
52
+ input_data (BASE_DATA_BOUND): The data source to use for the scenario. Derived from BaseDataSource.
53
+ kpis: (Dict[str, KPI]): A dictionary of KPIs to compute for the scenario
54
+ algorithm (str): The algorithm to use for processing
55
+ provided_id (str): An optional unique identifier for the scenario. If not provided, a UUID will be generated.
56
+ """
57
+ self.id = provided_id if provided_id else str(uuid.uuid4())
58
+ self.tag = tag # user-defined label
59
+ self._input_data = input_data # includes raw or preprocessed data
60
+ self._kpis = kpis
61
+ self._algorithm = algorithm
62
+
63
+ self.status = ScenarioStatus.CREATED
64
+ self.result = None
65
+
66
+ def __str__(self):
67
+ return f"Scenario: {self.tag} ({str(self._algorithm)}"
68
+
69
+ @property
70
+ def input_data_key(self) -> str:
71
+ return self._input_data.name
72
+
73
+ @property
74
+ def data_source(self) -> BASE_DATA_BOUND:
75
+ return self._input_data
76
+
77
+ @property
78
+ def algorithm_description(self) -> str:
79
+ return self._algorithm.description
80
+
81
+ @property
82
+ def kpis(self) -> Dict[str, BASE_KPI]:
83
+ return self._kpis
84
+
85
+ @property
86
+ def progress(self) -> float:
87
+ return self._algorithm.get_progress
88
+
89
+ def set_queued(self):
90
+ self.status = ScenarioStatus.QUEUED
91
+
92
+ def process(self, logger: Logger = None):
93
+ """
94
+ Processes the scenario using the specified algorithm.
95
+
96
+ This method runs the algorithm in the background, updates the scenario status,
97
+ and computes KPIs based on the results.
98
+
99
+ Exceptions during processing are caught, and the scenario status is set to FAILED.
100
+ """
101
+ if not (
102
+ self.status == ScenarioStatus.CREATED
103
+ or self.status == ScenarioStatus.QUEUED
104
+ ):
105
+ return
106
+
107
+ self.status = ScenarioStatus.PROCESSING
108
+ try:
109
+ self.result = self._algorithm.run(self._input_data)
110
+ self.compute_kpis()
111
+ self.status = ScenarioStatus.COMPLETE
112
+ except Exception as e:
113
+ self.status = ScenarioStatus.FAILED
114
+ if logger:
115
+ logger.error(f"Scenario '{self.tag}' failed to process: {str(e)}")
116
+ self.result = {"error": str(e)}
117
+
118
+ def cancel(self, logger: Logger = None):
119
+ if logger:
120
+ logger.warning(f"Not Yet Implemented: Scenario {self.tag} cancel")
121
+ pass
122
+
123
+ def refresh(self, logger: Logger = None):
124
+ self.status = ScenarioStatus.CREATED
125
+ self.result = None
126
+ if logger:
127
+ logger.log(f"Refreshed scenario {self.tag}")
128
+
129
+ def compute_kpis(self):
130
+ """
131
+ Calculates key performance indicators (KPIs) for the given scenario.
132
+
133
+ Raises:
134
+ ValueError: If there is no result available for the scenario.
135
+ KpiError: If one or more KPI calculations fail.
136
+ """
137
+ if not self.result:
138
+ raise ValueError("Scenario result is not available")
139
+
140
+ for kpi in self._kpis.values():
141
+ kpi.compute_and_check(self.result)
142
+
143
+ def to_dict(self) -> dict:
144
+ """
145
+ Converts the attributes of the instance into a dictionary representation.
146
+
147
+ This method creates a dictionary containing the key attributes of the instance by
148
+ converting them into a serializable format. Attributes that have a `to_dict` method
149
+ are recursively processed. If some attributes do not exist or cannot be accessed,
150
+ they may return `None`.
151
+
152
+ Returns:
153
+ dict: A dictionary representation of the instance's attributes.
154
+ """
155
+ return {
156
+ "id": self.id,
157
+ "tag": self.tag,
158
+ "input_data_id": self._input_data.id
159
+ if hasattr(self._input_data, "id")
160
+ else None,
161
+ "kpis": {
162
+ k: v.to_dict() if hasattr(v, "to_dict") else v
163
+ for k, v in self._kpis.items()
164
+ },
165
+ "algorithm": self._algorithm.to_dict()
166
+ if hasattr(self._algorithm, "to_dict")
167
+ else self._algorithm,
168
+ "status": self.status,
169
+ "result": self.result.to_dict()
170
+ if hasattr(self.result, "to_dict")
171
+ else self.result,
172
+ }
173
+
174
+ def is_completed(self) -> bool:
175
+ return self.status == ScenarioStatus.COMPLETE
@@ -0,0 +1,76 @@
1
+ from typing import Dict, List, Optional, Type
2
+
3
+ from algomancy_utils.logger import Logger
4
+ from algomancy_data import DataManager
5
+
6
+ from .algorithmfactory import AlgorithmFactory
7
+ from .basealgorithm import ALGORITHM
8
+ from .keyperformanceindicator import BASE_KPI
9
+ from .kpifactory import KpiFactory
10
+ from .scenario import Scenario
11
+
12
+
13
+ class ScenarioFactory:
14
+ """
15
+ Creates scenarios, builds algorithms and KPIs, and performs parameter validation.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ kpi_templates: Dict[str, Type[BASE_KPI]],
21
+ algo_templates: Dict[str, Type[ALGORITHM]],
22
+ data_manager: DataManager,
23
+ logger: Logger | None = None,
24
+ ):
25
+ self.logger = logger
26
+ self._kpi_factory = KpiFactory(kpi_templates)
27
+ self._algorithm_factory = AlgorithmFactory(algo_templates, logger)
28
+ self._data_manager = data_manager
29
+
30
+ @property
31
+ def available_algorithms(self) -> List[str]:
32
+ return self._algorithm_factory.available_algorithms
33
+
34
+ @property
35
+ def algo_templates(self) -> Dict[str, Type[ALGORITHM]]:
36
+ return self._algorithm_factory.templates
37
+
38
+ def log(self, msg: str):
39
+ if self.logger:
40
+ self.logger.log(msg)
41
+
42
+ def create(
43
+ self,
44
+ tag: str,
45
+ dataset_key: str,
46
+ algo_name: str,
47
+ algo_params: Optional[dict] = None,
48
+ ) -> Scenario:
49
+ if algo_params is None:
50
+ algo_params = {}
51
+
52
+ assert (
53
+ algo_name in self.available_algorithms
54
+ ), f"Algorithm '{algo_name}' not found."
55
+ assert (
56
+ dataset_key in self._data_manager.get_data_keys()
57
+ ), f"Data '{dataset_key}' not found."
58
+
59
+ algorithm = self._algorithm_factory.create(
60
+ input_name=algo_name,
61
+ input_params=algo_params,
62
+ )
63
+
64
+ kpi_dict = self._kpi_factory.create_all()
65
+
66
+ scenario = Scenario(
67
+ tag=tag,
68
+ input_data=self._data_manager.get_data(dataset_key),
69
+ kpis=kpi_dict,
70
+ algorithm=algorithm,
71
+ )
72
+ self.log(f"Scenario '{scenario.tag}' created.")
73
+ return scenario
74
+
75
+ def get_associated_parameters(self, algo_name: str):
76
+ return self._algorithm_factory.get_parameters(algo_name)
@@ -0,0 +1,373 @@
1
+ from typing import Dict, List, Optional, TypeVar, Type
2
+
3
+ from algomancy_data import (
4
+ ETLFactory,
5
+ InputFileConfiguration,
6
+ StatefulDataManager,
7
+ StatelessDataManager,
8
+ BASE_DATA_BOUND,
9
+ )
10
+ from algomancy_utils.logger import Logger, MessageStatus
11
+ from .basealgorithm import ALGORITHM
12
+ from algomancy_utils.baseparameterset import BASE_PARAMS_BOUND
13
+
14
+ from .keyperformanceindicator import BASE_KPI
15
+ from .scenario import Scenario
16
+ from .scenarioregistry import ScenarioRegistry
17
+ from .scenariofactory import ScenarioFactory
18
+ from .scenarioprocessor import ScenarioProcessor
19
+
20
+
21
+ class ScenarioManager:
22
+ """
23
+ Facade that coordinates data management, scenario creation/registry, and processing.
24
+ """
25
+
26
+ E = TypeVar("E", bound=ETLFactory)
27
+
28
+ @classmethod
29
+ def from_config(cls, cfg) -> "ScenarioManager":
30
+ return cls(
31
+ etl_factory=cfg.etl_factory,
32
+ kpi_templates=cfg.kpi_templates,
33
+ algo_templates=cfg.algo_templates,
34
+ input_configs=cfg.input_configs,
35
+ data_object_type=cfg.data_object_type,
36
+ data_folder=cfg.data_path,
37
+ has_persistent_state=cfg.has_persistent_state,
38
+ save_type=cfg.save_type,
39
+ autocreate=cfg.autocreate,
40
+ default_algo_name=cfg.default_algo,
41
+ default_param_values=cfg.default_algo_params_values,
42
+ autorun=cfg.autorun,
43
+ )
44
+
45
+ def __init__(
46
+ self,
47
+ etl_factory: type[E],
48
+ kpi_templates: Dict[str, Type[BASE_KPI]],
49
+ algo_templates: Dict[str, Type[ALGORITHM]],
50
+ input_configs: List[InputFileConfiguration],
51
+ data_object_type: type[BASE_DATA_BOUND], # for extensions of datasource
52
+ data_folder: str = None,
53
+ logger: Logger = None,
54
+ scenario_save_location: str = "scenarios.json",
55
+ has_persistent_state: bool = False,
56
+ save_type: str = "json", # adjusts the format
57
+ autocreate: bool = False,
58
+ default_algo_name: str = None,
59
+ default_param_values: Dict[str, any] = None,
60
+ autorun: bool = False,
61
+ ) -> None:
62
+ self.logger = logger if logger else Logger()
63
+ self.scenario_save_location = scenario_save_location
64
+ self._has_persistent_state = has_persistent_state
65
+ self._auto_create_scenario = autocreate
66
+ self._default_algo_name = default_algo_name
67
+ self._default_param_values = default_param_values
68
+
69
+ assert save_type in ["json"], "Save type must be parquet or json."
70
+ self._save_type = save_type
71
+
72
+ # Components
73
+ if self._has_persistent_state:
74
+ assert data_folder, (
75
+ "Data folder must be specified if data manager has state."
76
+ )
77
+ self._dm = StatefulDataManager(
78
+ etl_factory=etl_factory,
79
+ input_configs=input_configs,
80
+ data_folder=data_folder,
81
+ save_type=save_type,
82
+ data_object_type=data_object_type,
83
+ logger=self.logger,
84
+ )
85
+ else:
86
+ self._dm = StatelessDataManager(
87
+ etl_factory=etl_factory,
88
+ input_configs=input_configs,
89
+ save_type=save_type,
90
+ logger=self.logger,
91
+ data_object_type=data_object_type,
92
+ )
93
+
94
+ self._registry = ScenarioRegistry(logger=self.logger)
95
+ self._factory = ScenarioFactory(
96
+ kpi_templates=kpi_templates,
97
+ algo_templates=algo_templates,
98
+ data_manager=self._dm,
99
+ logger=self.logger,
100
+ )
101
+ self._processor = ScenarioProcessor(logger=self.logger)
102
+ self.toggle_autorun(autorun)
103
+
104
+ # Keep inputs for accessors
105
+ # self._algo_templates = algo_templates
106
+ self._input_configs = input_configs
107
+
108
+ # Load initial data
109
+ try:
110
+ self._dm.startup()
111
+ if self._auto_create_scenario:
112
+ self.auto_create_scenarios(self._dm.get_data_keys())
113
+ except Exception as e:
114
+ self.log(f"Error loading initial data: {e}", status=MessageStatus.ERROR)
115
+
116
+ self.log("ScenarioManager initialized.")
117
+
118
+ # Logging
119
+ def log(self, message: str, status: MessageStatus = MessageStatus.INFO) -> None:
120
+ if self.logger:
121
+ self.logger.log(message, status)
122
+
123
+ @property
124
+ def has_persistent_state(self):
125
+ return self._has_persistent_state
126
+
127
+ # Accessors
128
+ @property
129
+ def save_type(self):
130
+ return self._save_type
131
+
132
+ @property
133
+ def input_configurations(self):
134
+ return self._input_configs
135
+
136
+ @property
137
+ def available_algorithms(self):
138
+ return self._factory.available_algorithms
139
+
140
+ @property
141
+ def auto_run_scenarios(self):
142
+ return self._processor.auto_run_scenarios
143
+
144
+ @property
145
+ def currently_processing(self) -> Optional[Scenario]:
146
+ return self._processor.currently_processing
147
+
148
+ def get_algorithm_parameters(self, key) -> BASE_PARAMS_BOUND:
149
+ return self._factory.algo_templates.get(key).initialize_parameters()
150
+
151
+ # Data operations (delegated)
152
+ def get_data_keys(self) -> List[str]:
153
+ return self._dm.get_data_keys()
154
+
155
+ def get_data(self, data_key):
156
+ return self._dm.get_data(data_key)
157
+
158
+ def set_data(self, data_key, data):
159
+ self._dm.set_data(data_key, data)
160
+
161
+ def derive_data(self, derive_from_key: str, new_data_key: str) -> None:
162
+ self._dm.derive_data(derive_from_key, new_data_key)
163
+ if self._auto_create_scenario:
164
+ self.auto_create_scenarios([new_data_key])
165
+
166
+ def delete_data(
167
+ self, data_key: str, prevent_masterdata_removal: bool = False
168
+ ) -> None:
169
+ # prevent delete if used by scenarios
170
+ assert data_key not in self._registry.used_datasets(), (
171
+ "Cannot delete data used in scenarios."
172
+ )
173
+ self._dm.delete_data(data_key, prevent_masterdata_removal)
174
+
175
+ def store_data(self, dataset_name: str, data):
176
+ if isinstance(self._dm, StatefulDataManager):
177
+ self._dm.store_data(dataset_name, data)
178
+ else:
179
+ if self.logger:
180
+ self.logger.error(
181
+ "Store data is not supported for stateless data manager. "
182
+ )
183
+ pass
184
+
185
+ def toggle_autorun(self, value: bool = None) -> None:
186
+ if value is None:
187
+ self._processor.auto_run_scenarios = not self._processor.auto_run_scenarios
188
+ else:
189
+ self._processor.auto_run_scenarios = value
190
+ self.log(f"Auto-run scenarios set to {self._processor.auto_run_scenarios}")
191
+
192
+ # Processing operations (delegated)
193
+ def process_scenario_async(self, scenario):
194
+ self._processor.enqueue(scenario)
195
+
196
+ def wait_for_processing(self):
197
+ self._processor.wait_for_processing()
198
+
199
+ def shutdown_processing(self):
200
+ self._processor.shutdown()
201
+
202
+ # Scenario creation/registry
203
+ def get_associated_parameters(self, algo_name: str):
204
+ return self._factory.get_associated_parameters(algo_name)
205
+
206
+ def create_scenario(
207
+ self,
208
+ tag: str,
209
+ dataset_key: str = "Master data",
210
+ algo_name: str = "",
211
+ algo_params=None,
212
+ ) -> Scenario:
213
+ if self._registry.has_tag(tag):
214
+ self.log(f"Scenario with tag '{tag}' already exists. Skipping creation.")
215
+ raise ValueError(f"A scenario with tag '{tag}' already exists.")
216
+
217
+ scenario = self._factory.create(
218
+ tag=tag,
219
+ dataset_key=dataset_key,
220
+ algo_name=algo_name,
221
+ algo_params=algo_params,
222
+ )
223
+ self._registry.add(scenario)
224
+
225
+ if self._processor.auto_run_scenarios:
226
+ self._processor.enqueue(scenario)
227
+ return scenario
228
+
229
+ def get_by_id(self, scenario_id: str) -> Optional[Scenario]:
230
+ return self._registry.get_by_id(scenario_id)
231
+
232
+ def get_by_tag(self, tag: str) -> Optional[Scenario]:
233
+ return self._registry.get_by_tag(tag)
234
+
235
+ def delete_scenario(self, scenario_id: str) -> bool:
236
+ return self._registry.delete(scenario_id)
237
+
238
+ def list_scenarios(self) -> List[Scenario]:
239
+ return self._registry.list()
240
+
241
+ def list_ids(self):
242
+ return self._registry.list_ids()
243
+
244
+ def toggle_autocreate(
245
+ self, value: bool = None, default_algo_name: str = ""
246
+ ) -> None:
247
+ if value is None:
248
+ self._auto_create_scenario = not self._auto_create_scenario
249
+ self._default_algo_name = (
250
+ default_algo_name if self._auto_create_scenario else None
251
+ )
252
+ else:
253
+ self._auto_create_scenario = value
254
+ self._default_algo_name = (
255
+ default_algo_name if self._auto_create_scenario else None
256
+ )
257
+ self.log(f"Auto-create scenarios set to {self._auto_create_scenario}")
258
+
259
+ def add_datasource_from_json(self, json_string):
260
+ # Create data source from JSON
261
+ datasource = self._dm.data_object_type.from_json(json_string)
262
+
263
+ # Add data source to datamanager
264
+ self._dm.add_data_source(datasource)
265
+
266
+ # create scenario if auto-create is enabled
267
+ if self._auto_create_scenario:
268
+ self.auto_create_scenarios([datasource.name])
269
+
270
+ def etl_data(self, files, dataset_name: str) -> None:
271
+ # Process the files
272
+ self._dm.etl_data(files, dataset_name)
273
+
274
+ # create scenario if auto-create is enabled
275
+ if self._auto_create_scenario:
276
+ self.auto_create_scenarios([dataset_name])
277
+
278
+ def auto_create_scenarios(self, keys: List[str] = None):
279
+ for key in keys:
280
+ self.create_scenario(
281
+ tag=f"{key} [auto]",
282
+ dataset_key=key,
283
+ algo_name=self._default_algo_name,
284
+ algo_params=self._default_param_values,
285
+ )
286
+
287
+ def get_data_as_json(self, key: str) -> str:
288
+ return self._dm.get_data(key).to_json()
289
+
290
+ def store_data_as_json(self, set_name):
291
+ if isinstance(self._dm, StatefulDataManager):
292
+ self._dm.store_data_source_as_json(set_name)
293
+ else:
294
+ raise AttributeError(
295
+ "Stateless data manager does not support internal serialization."
296
+ )
297
+
298
+ def debug_load_data(self, dataset_name: str) -> None:
299
+ if isinstance(self._dm, StatefulDataManager):
300
+ self._dm.load_data_from_dir(dataset_name)
301
+ elif isinstance(self._dm, StatelessDataManager):
302
+ raise NotImplementedError(
303
+ "Todo: implement loading for stateless data manager."
304
+ )
305
+ else:
306
+ raise Exception("Data manager not initialized.")
307
+
308
+ def debug_create_and_run_scenario(
309
+ self,
310
+ scenario_tag: str,
311
+ dataset_key: str,
312
+ algo_name: str,
313
+ algo_params: Dict[str, any],
314
+ ) -> Scenario:
315
+ """
316
+ Creates and runs a scenario for debugging purposes. The method uses a factory to create a
317
+ scenario instance, registers it, enqueues it for processing, and waits for the processing to
318
+ complete. Returns the fully processed scenario.
319
+
320
+ Parameters:
321
+ scenario_tag (str): A unique identifier for the scenario being created and run.
322
+ dataset_key (str): The key for the dataset to be used in the scenario.
323
+ algo_name (str): The name of the algorithm to be applied in the scenario.
324
+ algo_params (Dict): Additional parameters for the algorithm.
325
+
326
+ Returns:
327
+ Scenario: The fully processed scenario created and executed within this method.
328
+ """
329
+ scenario = self._factory.create(
330
+ tag=scenario_tag,
331
+ dataset_key=dataset_key,
332
+ algo_name=algo_name,
333
+ algo_params=algo_params,
334
+ )
335
+ self._registry.add(scenario)
336
+ self._processor.enqueue(scenario)
337
+ self.wait_for_processing()
338
+ return scenario
339
+
340
+ def debug_etl_data(self, dataset_name: str) -> None:
341
+ """
342
+ Debugging utility to run ETL on a directory as if loaded on startup.
343
+ """
344
+ # Retrieve files from directory
345
+ if isinstance(self._dm, StatefulDataManager):
346
+ self._dm.load_data_from_dir(dataset_name)
347
+ else:
348
+ raise AttributeError(
349
+ "Stateless data manager does not support internal ETL."
350
+ )
351
+
352
+ def debug_load_serialized_data(self, file_name: str):
353
+ """
354
+ Debugging utility to upload a file as if loaded on startup.
355
+ """
356
+ if isinstance(self._dm, StatefulDataManager):
357
+ self._dm.load_data_from_file(file_name)
358
+ else:
359
+ raise AttributeError(
360
+ "Stateless data manager does not support internal deserialization."
361
+ )
362
+
363
+ def debug_import_data(self, directory: str) -> None:
364
+ """
365
+ Debugging utility to import data from a directory.
366
+ """
367
+ raise NotImplementedError("todo: write import data method")
368
+
369
+ def debug_upload_data(self, file_name: str) -> None:
370
+ """
371
+ Debugging utility to upload data from a file.
372
+ """
373
+ raise NotImplementedError("todo: write upload data method")
@@ -0,0 +1,66 @@
1
+ import queue
2
+ import threading
3
+ from typing import Optional
4
+
5
+ from algomancy_utils.logger import Logger
6
+
7
+ from .scenario import Scenario
8
+
9
+
10
+ class ScenarioProcessor:
11
+ """
12
+ Manages the processing queue, runs scenarios asynchronously, and tracks status.
13
+ """
14
+
15
+ def __init__(self, logger: Logger | None = None):
16
+ self.logger = logger
17
+ self._process_queue: queue.Queue[Scenario | None] = queue.Queue()
18
+ self._worker_thread = threading.Thread(
19
+ target=self._process_scenarios_worker, daemon=True
20
+ )
21
+ self._currently_processing: Optional[Scenario] = None
22
+ self._auto_run_scenarios = False
23
+ self._worker_thread.start()
24
+
25
+ # Properties
26
+ @property
27
+ def auto_run_scenarios(self) -> bool:
28
+ return self._auto_run_scenarios
29
+
30
+ @auto_run_scenarios.setter
31
+ def auto_run_scenarios(self, value: bool):
32
+ self._auto_run_scenarios = value
33
+
34
+ @property
35
+ def currently_processing(self) -> Optional[Scenario]:
36
+ return self._currently_processing
37
+
38
+ # Worker
39
+ def _process_scenarios_worker(self):
40
+ while True:
41
+ scenario = self._process_queue.get()
42
+ if scenario is None:
43
+ break
44
+
45
+ if self.logger:
46
+ self.logger.log(f"Processing scenario '{scenario.tag}'...")
47
+ self._currently_processing = scenario
48
+
49
+ scenario.process(logger=self.logger)
50
+
51
+ if self.logger:
52
+ self.logger.log(f"Scenario '{scenario.tag}' completed.")
53
+ self._currently_processing = None
54
+ self._process_queue.task_done()
55
+
56
+ # API
57
+ def enqueue(self, scenario: Scenario):
58
+ scenario.set_queued()
59
+ self._process_queue.put(scenario)
60
+
61
+ def wait_for_processing(self):
62
+ self._process_queue.join()
63
+
64
+ def shutdown(self):
65
+ self._process_queue.put(None)
66
+ self._worker_thread.join()
@@ -0,0 +1,54 @@
1
+ from typing import Dict, List, Optional
2
+
3
+ from algomancy_utils.logger import Logger
4
+ from .scenario import Scenario
5
+
6
+
7
+ class ScenarioRegistry:
8
+ """
9
+ Stores and retrieves scenarios and maintains indices.
10
+ """
11
+
12
+ def __init__(self, logger: Logger | None = None):
13
+ self.logger = logger
14
+ self._scenarios: Dict[str, Scenario] = {}
15
+ self._tag_index: Dict[str, str] = {}
16
+
17
+ def log(self, msg: str):
18
+ if self.logger:
19
+ self.logger.log(msg)
20
+
21
+ # CRUD
22
+ def add(self, scenario: Scenario) -> None:
23
+ self._scenarios[scenario.id] = scenario
24
+ self._tag_index[scenario.tag] = scenario.id
25
+ self.log(f"Registered scenario '{scenario.tag}'.")
26
+
27
+ def get_by_id(self, scenario_id: str) -> Optional[Scenario]:
28
+ return self._scenarios.get(scenario_id)
29
+
30
+ def get_by_tag(self, tag: str) -> Optional[Scenario]:
31
+ scenario_id = self._tag_index.get(tag)
32
+ return self.get_by_id(scenario_id) if scenario_id else None
33
+
34
+ def delete(self, scenario_id: str) -> bool:
35
+ if scenario_id in self._scenarios:
36
+ tag = self._scenarios[scenario_id].tag
37
+ del self._scenarios[scenario_id]
38
+ if tag in self._tag_index:
39
+ del self._tag_index[tag]
40
+ self.log(f"Deleted scenario '{tag}'.")
41
+ return True
42
+ return False
43
+
44
+ def list(self) -> List[Scenario]:
45
+ return list(self._scenarios.values())
46
+
47
+ def list_ids(self) -> List[str]:
48
+ return list(self._scenarios.keys())
49
+
50
+ def has_tag(self, tag: str) -> bool:
51
+ return tag in self._tag_index
52
+
53
+ def used_datasets(self) -> List[str]:
54
+ return [s.input_data_key for s in self._scenarios.values()]
@@ -0,0 +1,88 @@
1
+ Metadata-Version: 2.3
2
+ Name: algomancy-scenario
3
+ Version: 0.3.13
4
+ Summary: Scenario management model for the Algomancy library
5
+ Author: Pepijn Wissing
6
+ Author-email: Pepijn Wissing <Wsg@cqm.nl>
7
+ Requires-Dist: algomancy-utils
8
+ Requires-Dist: algomancy-data
9
+ Requires-Python: >=3.14
10
+ Description-Content-Type: text/markdown
11
+
12
+ ### algomancy-scenario
13
+
14
+ Scenario modeling utilities for Algomancy: define algorithms and parameters, run scenarios against data, and compute KPIs.
15
+
16
+ #### Features
17
+ - `Scenario` lifecycle with statuses (`CREATED`, `QUEUED`, `PROCESSING`, `COMPLETE`, `FAILED`)
18
+ - `BaseAlgorithm` and parameter classes to define pluggable algorithms
19
+ - KPI framework (`BaseKPI`) to compute metrics from algorithm results
20
+ - Works with `algomancy-data` data sources and can be orchestrated from the GUI
21
+
22
+ #### Installation
23
+ ```
24
+ pip install -e packages/algomancy-scenario
25
+ ```
26
+
27
+ Requires Python >= 3.14.
28
+
29
+ #### Quick start
30
+ Define a simple algorithm and KPI, then run a `Scenario`:
31
+
32
+ ```python
33
+ from algomancy_scenario import (
34
+ Scenario, ScenarioStatus,
35
+ BaseAlgorithm, BaseParameterSet, BaseKPI,
36
+ )
37
+ from algomancy_data import DataSource, DataClassification
38
+
39
+
40
+ # Minimal parameters type
41
+ class ExampleParams(BaseParameterSet):
42
+ def serialize(self) -> dict:
43
+ return {"hello": "world"}
44
+
45
+
46
+ # Minimal algorithm
47
+ class ExampleAlgorithm(BaseAlgorithm):
48
+ def __init__(self):
49
+ super().__init__(name="Example", params=ExampleParams())
50
+
51
+ @staticmethod
52
+ def initialize_parameters() -> ExampleParams: # used by GUI tooling
53
+ return ExampleParams()
54
+
55
+ def run(self, data: DataSource) -> dict:
56
+ # do something with data and return a result dictionary
57
+ self.set_progress(100)
58
+ return {"count_tables": len(data.list_tables())}
59
+
60
+
61
+ # Minimal KPI
62
+ class CountTablesKPI(BaseKPI):
63
+ def __init__(self):
64
+ super().__init__(name="Tables", improvement_direction=None)
65
+
66
+ def compute_and_check(self, result: dict):
67
+ self.value = result["count_tables"]
68
+
69
+
70
+ # Prepare data
71
+ ds = DataSource(ds_type=DataClassification.MASTER_DATA, name="warehouse")
72
+
73
+ # Build and run scenario
74
+ scenario = Scenario(
75
+ tag="demo",
76
+ input_data=ds,
77
+ kpis={"tables": CountTablesKPI()},
78
+ algorithm=ExampleAlgorithm(),
79
+ )
80
+
81
+ scenario.process()
82
+ assert scenario.status == ScenarioStatus.COMPLETE
83
+ print("Tables KPI:", scenario.kpis["tables"].value)
84
+ ```
85
+
86
+ #### Related docs and examples
87
+ - Example app demonstrates scenario wiring: `example/pages/ScenarioPageContent.py`
88
+ - Algorithm/KPI examples: `example/templates/algorithm/` and `example/templates/kpi/`
@@ -0,0 +1,16 @@
1
+ algomancy_scenario/__init__.py,sha256=m6FLBac2tEwg8KGBG1eXSmOO84tQBsMfaMtP5YBnxPQ,952
2
+ algomancy_scenario/algorithmfactory.py,sha256=HXobbXLkny1gIxPLda_NI7Bur6EeSYewFV-y-5lU4NA,1711
3
+ algomancy_scenario/basealgorithm.py,sha256=5YatHK7ym9BHaqyowIs38_7k4QZrs8AAQTJxQOlzkKY,2160
4
+ algomancy_scenario/core_configuration.py,sha256=F-kMj08x9YgNW7RBk28QAOc9TTx3pZQDPNBWz3yk58w,5126
5
+ algomancy_scenario/keyperformanceindicator.py,sha256=PbuENywT9jLEaC1p_hbINaRl-815Wpx5NiFQo8Dagro,4296
6
+ algomancy_scenario/kpifactory.py,sha256=cthdu7hCmy8XJD29jfOXlC3_nBgULkOL1mRlpbUPOBY,1682
7
+ algomancy_scenario/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ algomancy_scenario/result.py,sha256=8g_SlhZ1j36yaVXSNnsElnmkX7cE7x0-vhlikjsGn00,667
9
+ algomancy_scenario/scenario.py,sha256=VnCVDXiCfj727IwylzKtPBRUhXTe0UT8K_vfG9JMNsw,5680
10
+ algomancy_scenario/scenariofactory.py,sha256=HKP6mDDzmiOlOSecRHDkqTfQzeN_lBDdGIpr1-5amS4,2243
11
+ algomancy_scenario/scenariomanager.py,sha256=LEFCPSd8jbCWBJBeGcQBD376ipxneaosJf7PstHsHm8,13150
12
+ algomancy_scenario/scenarioprocessor.py,sha256=iXQnV0SCG1Mdz4AwgLJmeIGTDTyXzlhD_SOdCb9_eKM,1898
13
+ algomancy_scenario/scenarioregistry.py,sha256=VMtZUQTsX_n_Sd8-cLilK3GhyQ7NzLVZqT_tiMAEd20,1708
14
+ algomancy_scenario-0.3.13.dist-info/WHEEL,sha256=XjEbIc5-wIORjWaafhI6vBtlxDBp7S9KiujWF1EM7Ak,79
15
+ algomancy_scenario-0.3.13.dist-info/METADATA,sha256=8TEa4yXPWSWeOpr6Y8qYT3cGSau11ASfi1ubqIkDVRs,2556
16
+ algomancy_scenario-0.3.13.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.9.25
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any