mcprojsim 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mcprojsim/__init__.py ADDED
@@ -0,0 +1,24 @@
1
+ """Monte Carlo Project Simulator - A probabilistic project estimation tool."""
2
+
3
+ from importlib.metadata import PackageNotFoundError, version
4
+ from pathlib import Path
5
+ import tomllib
6
+
7
+ from mcprojsim.models.project import Project, Task, Risk
8
+ from mcprojsim.simulation.engine import SimulationEngine
9
+
10
+
11
+ def _resolve_version() -> str:
12
+ try:
13
+ return version("mcprojsim")
14
+ except PackageNotFoundError:
15
+ pyproject_path = Path(__file__).resolve().parents[2] / "pyproject.toml"
16
+ try:
17
+ with pyproject_path.open("rb") as pyproject_file:
18
+ return tomllib.load(pyproject_file)["tool"]["poetry"]["version"]
19
+ except FileNotFoundError, KeyError, tomllib.TOMLDecodeError:
20
+ return "0.0.0+unknown"
21
+
22
+
23
+ __version__ = _resolve_version()
24
+ __all__ = ["Project", "Task", "Risk", "SimulationEngine"]
@@ -0,0 +1,7 @@
1
+ """Analysis components."""
2
+
3
+ from mcprojsim.analysis.statistics import StatisticalAnalyzer
4
+ from mcprojsim.analysis.sensitivity import SensitivityAnalyzer
5
+ from mcprojsim.analysis.critical_path import CriticalPathAnalyzer
6
+
7
+ __all__ = ["StatisticalAnalyzer", "SensitivityAnalyzer", "CriticalPathAnalyzer"]
@@ -0,0 +1,52 @@
1
+ """Critical path analysis."""
2
+
3
+ from typing import Dict, List
4
+
5
+ from mcprojsim.models.simulation import CriticalPathRecord, SimulationResults
6
+
7
+
8
+ class CriticalPathAnalyzer:
9
+ """Analyzer for critical path identification."""
10
+
11
+ @staticmethod
12
+ def get_criticality_index(results: SimulationResults) -> Dict[str, float]:
13
+ """Get criticality index for each task.
14
+
15
+ Args:
16
+ results: Simulation results
17
+
18
+ Returns:
19
+ Dictionary mapping task IDs to criticality (0.0-1.0)
20
+ """
21
+ return results.get_critical_path()
22
+
23
+ @staticmethod
24
+ def get_most_critical_tasks(
25
+ results: SimulationResults, threshold: float = 0.5
26
+ ) -> List[str]:
27
+ """Get tasks that are critical in at least threshold % of iterations.
28
+
29
+ Args:
30
+ results: Simulation results
31
+ threshold: Minimum criticality threshold (0.0-1.0)
32
+
33
+ Returns:
34
+ List of critical task IDs
35
+ """
36
+ criticality = CriticalPathAnalyzer.get_criticality_index(results)
37
+ return [task_id for task_id, crit in criticality.items() if crit >= threshold]
38
+
39
+ @staticmethod
40
+ def get_most_frequent_paths(
41
+ results: SimulationResults, top_n: int | None = None
42
+ ) -> list[CriticalPathRecord]:
43
+ """Get the most frequent full critical path sequences.
44
+
45
+ Args:
46
+ results: Simulation results
47
+ top_n: Maximum number of paths to return
48
+
49
+ Returns:
50
+ List of aggregated critical path records
51
+ """
52
+ return results.get_critical_path_sequences(top_n)
@@ -0,0 +1,57 @@
1
+ """Sensitivity analysis for task impacts."""
2
+
3
+ from typing import Dict
4
+ import numpy as np
5
+ from scipy import stats
6
+ from mcprojsim.models.simulation import SimulationResults
7
+
8
+
9
+ class SensitivityAnalyzer:
10
+ """Analyzer for sensitivity analysis."""
11
+
12
+ @staticmethod
13
+ def calculate_correlations(results: SimulationResults) -> Dict[str, float]:
14
+ """Calculate Spearman rank correlation between task durations and project.
15
+
16
+ Args:
17
+ results: Simulation results
18
+
19
+ Returns:
20
+ Dictionary mapping task IDs to correlation coefficients
21
+ """
22
+ correlations: Dict[str, float] = {}
23
+
24
+ for task_id, task_durations in results.task_durations.items():
25
+ # Calculate Spearman correlation
26
+ # Convert to numpy arrays to ensure proper typing
27
+ x: np.ndarray = np.asarray(task_durations)
28
+ y: np.ndarray = np.asarray(results.durations)
29
+
30
+ # stats.spearmanr returns a tuple (correlation, p-value)
31
+ correlation_coeff, _ = stats.spearmanr(x, y)
32
+ # Explicit cast to handle scipy type inference issues
33
+ correlations[task_id] = float(np.asarray(correlation_coeff).item())
34
+
35
+ return correlations
36
+
37
+ @staticmethod
38
+ def get_top_contributors(
39
+ results: SimulationResults, n: int = 10
40
+ ) -> list[tuple[str, float]]:
41
+ """Get top N tasks contributing to schedule variance.
42
+
43
+ Args:
44
+ results: Simulation results
45
+ n: Number of top tasks to return
46
+
47
+ Returns:
48
+ List of (task_id, correlation) tuples, sorted by correlation
49
+ """
50
+ correlations = SensitivityAnalyzer.calculate_correlations(results)
51
+
52
+ # Sort by absolute correlation value
53
+ sorted_tasks = sorted(
54
+ correlations.items(), key=lambda x: abs(x[1]), reverse=True
55
+ )
56
+
57
+ return sorted_tasks[:n]
@@ -0,0 +1,67 @@
1
+ """Statistical analysis utilities."""
2
+
3
+ from typing import Dict
4
+ import numpy as np
5
+ from scipy import stats
6
+
7
+
8
+ class StatisticalAnalyzer:
9
+ """Analyzer for simulation statistics."""
10
+
11
+ @staticmethod
12
+ def calculate_statistics(durations: np.ndarray) -> Dict[str, float]:
13
+ """Calculate statistical measures.
14
+
15
+ Args:
16
+ durations: Array of duration values
17
+
18
+ Returns:
19
+ Dictionary of statistical measures
20
+ """
21
+ return {
22
+ "mean": float(np.mean(durations)),
23
+ "median": float(np.median(durations)),
24
+ "std_dev": float(np.std(durations)),
25
+ "variance": float(np.var(durations)),
26
+ "min": float(np.min(durations)),
27
+ "max": float(np.max(durations)),
28
+ "range": float(np.max(durations) - np.min(durations)),
29
+ "coefficient_of_variation": (
30
+ float(np.std(durations) / np.mean(durations))
31
+ if np.mean(durations) > 0
32
+ else 0.0
33
+ ),
34
+ }
35
+
36
+ @staticmethod
37
+ def calculate_percentiles(
38
+ durations: np.ndarray, percentiles: list[int]
39
+ ) -> Dict[int, float]:
40
+ """Calculate percentile values.
41
+
42
+ Args:
43
+ durations: Array of duration values
44
+ percentiles: List of percentile values (0-100)
45
+
46
+ Returns:
47
+ Dictionary mapping percentiles to values
48
+ """
49
+ return {p: float(np.percentile(durations, p)) for p in percentiles}
50
+
51
+ @staticmethod
52
+ def confidence_interval(
53
+ durations: np.ndarray, confidence: float = 0.95
54
+ ) -> tuple[float, float]:
55
+ """Calculate confidence interval.
56
+
57
+ Args:
58
+ durations: Array of duration values
59
+ confidence: Confidence level (default 0.95)
60
+
61
+ Returns:
62
+ Tuple of (lower_bound, upper_bound)
63
+ """
64
+ mean = np.mean(durations)
65
+ sem = stats.sem(durations)
66
+ ci = stats.t.interval(confidence, len(durations) - 1, loc=mean, scale=sem)
67
+ return float(ci[0]), float(ci[1])
mcprojsim/cli.py ADDED
@@ -0,0 +1,242 @@
1
+ """Command-line interface for Monte Carlo Project Simulator."""
2
+
3
+ from pathlib import Path
4
+ from typing import Optional, Union
5
+
6
+ import click
7
+
8
+ from mcprojsim import __version__
9
+ from mcprojsim.config import Config, DEFAULT_SIMULATION_ITERATIONS
10
+ from mcprojsim.exporters import CSVExporter, HTMLExporter, JSONExporter
11
+ from mcprojsim.parsers import TOMLParser, YAMLParser
12
+ from mcprojsim.simulation import SimulationEngine
13
+ from mcprojsim.utils import Validator, setup_logging
14
+
15
+
16
+ @click.group()
17
+ @click.version_option(version=__version__, prog_name="mcprojsim")
18
+ def cli() -> None:
19
+ """Monte Carlo Project Simulator - Probabilistic project estimation."""
20
+ pass
21
+
22
+
23
+ @cli.command()
24
+ @click.argument("project_file", type=click.Path(exists=True))
25
+ @click.option(
26
+ "--iterations",
27
+ "-n",
28
+ type=int,
29
+ default=DEFAULT_SIMULATION_ITERATIONS,
30
+ help="Number of simulation iterations",
31
+ )
32
+ @click.option("--config", "-c", type=click.Path(exists=True), help="Configuration file")
33
+ @click.option("--seed", "-s", type=int, help="Random seed for reproducibility")
34
+ @click.option(
35
+ "--output", "-o", type=click.Path(), help="Output file path (without extension)"
36
+ )
37
+ @click.option(
38
+ "--output-format",
39
+ "-f",
40
+ default="",
41
+ help="Output formats (comma-separated: json,csv,html). If not specified, only CLI output is shown.",
42
+ )
43
+ @click.option(
44
+ "--critical-paths",
45
+ type=int,
46
+ help="Number of full critical path sequences to include in CLI output and exports.",
47
+ )
48
+ @click.option("--quiet", "-q", is_flag=True, help="Suppress progress output")
49
+ def simulate(
50
+ project_file: str,
51
+ iterations: int,
52
+ config: Optional[str],
53
+ seed: Optional[int],
54
+ output: Optional[str],
55
+ output_format: str,
56
+ critical_paths: Optional[int],
57
+ quiet: bool,
58
+ ) -> None:
59
+ """Run Monte Carlo simulation for a project."""
60
+ logger = setup_logging()
61
+
62
+ try:
63
+ # Load configuration
64
+ if config:
65
+ cfg = Config.load_from_file(config)
66
+ logger.info(f"Loaded configuration from {config}")
67
+ else:
68
+ cfg = Config.get_default()
69
+ logger.info("Using default configuration")
70
+
71
+ # Parse project file
72
+ project_path = Path(project_file)
73
+ parser: Union[YAMLParser, TOMLParser]
74
+ if project_path.suffix in [".yaml", ".yml"]:
75
+ parser = YAMLParser()
76
+ elif project_path.suffix == ".toml":
77
+ parser = TOMLParser()
78
+ else:
79
+ click.echo(f"Error: Unsupported file format {project_path.suffix}")
80
+ return
81
+
82
+ if not quiet:
83
+ click.echo(f"Loading project from {project_file}...")
84
+ project = parser.parse_file(project_file)
85
+ logger.info(f"Loaded project: {project.project.name}")
86
+
87
+ # Run simulation
88
+ if not quiet:
89
+ click.echo(f"Running simulation with {iterations} iterations...")
90
+ engine = SimulationEngine(
91
+ iterations=iterations,
92
+ random_seed=seed,
93
+ config=cfg,
94
+ show_progress=not quiet,
95
+ )
96
+ results = engine.run(project)
97
+ critical_path_limit = critical_paths or cfg.output.critical_path_report_limit
98
+
99
+ if not quiet:
100
+ click.echo("\n=== Simulation Results ===")
101
+ click.echo(f"Project: {results.project_name}")
102
+ click.echo(f"Mean: {results.mean:.2f} days")
103
+ click.echo(f"Median (P50): {results.median:.2f} days")
104
+ click.echo(f"Std Dev: {results.std_dev:.2f} days")
105
+ click.echo("\nConfidence Intervals:")
106
+ for p in sorted(results.percentiles.keys()):
107
+ click.echo(f" P{p}: {results.percentiles[p]:.2f} days")
108
+
109
+ critical_path_records = results.get_critical_path_sequences(
110
+ critical_path_limit
111
+ )
112
+ if critical_path_records:
113
+ click.echo("\nMost Frequent Critical Paths:")
114
+ for index, record in enumerate(critical_path_records, start=1):
115
+ click.echo(
116
+ " "
117
+ f"{index}. {record.format_path()} "
118
+ f"({record.count}/{results.iterations}, {record.frequency * 100:.1f}%)"
119
+ )
120
+
121
+ # Export results (only if formats are explicitly specified)
122
+ if output_format.strip():
123
+ formats = [f.strip().lower() for f in output_format.split(",") if f.strip()]
124
+ base_output = (
125
+ Path(output) if output else Path(f"{project.project.name}_results")
126
+ )
127
+
128
+ for fmt in formats:
129
+ if fmt == "json":
130
+ output_file = base_output.with_suffix(".json")
131
+ JSONExporter.export(
132
+ results,
133
+ output_file,
134
+ config=cfg,
135
+ critical_path_limit=critical_path_limit,
136
+ )
137
+ if not quiet:
138
+ click.echo(f"\nResults exported to {output_file}")
139
+ elif fmt == "csv":
140
+ output_file = base_output.with_suffix(".csv")
141
+ CSVExporter.export(
142
+ results,
143
+ output_file,
144
+ config=cfg,
145
+ critical_path_limit=critical_path_limit,
146
+ )
147
+ if not quiet:
148
+ click.echo(f"Results exported to {output_file}")
149
+ elif fmt == "html":
150
+ output_file = base_output.with_suffix(".html")
151
+ HTMLExporter.export(
152
+ results,
153
+ output_file,
154
+ project=project,
155
+ config=cfg,
156
+ critical_path_limit=critical_path_limit,
157
+ )
158
+ if not quiet:
159
+ click.echo(f"Results exported to {output_file}")
160
+ else:
161
+ if not quiet:
162
+ click.echo(f"Warning: Unknown format '{fmt}' ignored")
163
+ else:
164
+ if not quiet:
165
+ click.echo(
166
+ "\nNo export formats specified. Use -f to export results to files."
167
+ )
168
+
169
+ except Exception as e:
170
+ logger.error(f"Error during simulation: {e}")
171
+ click.echo(f"Error: {e}", err=True)
172
+ raise click.Abort()
173
+
174
+
175
+ @cli.command()
176
+ @click.argument("project_file", type=click.Path(exists=True))
177
+ def validate(project_file: str) -> None:
178
+ """Validate a project definition file."""
179
+ logger = setup_logging()
180
+
181
+ click.echo(f"Validating {project_file}...")
182
+
183
+ is_valid, error_message = Validator.validate_file(project_file)
184
+
185
+ if is_valid:
186
+ click.echo("✓ Project file is valid!")
187
+ logger.info(f"Project file {project_file} is valid")
188
+ else:
189
+ click.echo("✗ Validation failed:", err=True)
190
+ click.echo(f" {error_message}", err=True)
191
+ logger.error(f"Validation failed for {project_file}: {error_message}")
192
+ raise click.Abort()
193
+
194
+
195
+ @cli.group()
196
+ def config() -> None:
197
+ """Configuration management commands."""
198
+ pass
199
+
200
+
201
+ @config.command(name="show")
202
+ @click.option("--config-file", "-c", type=click.Path(exists=True), help="Config file")
203
+ def show_config(config_file: Optional[str]) -> None:
204
+ """Show current configuration."""
205
+ if config_file:
206
+ cfg = Config.load_from_file(config_file)
207
+ click.echo(f"Configuration from {config_file}:")
208
+ else:
209
+ cfg = Config.get_default()
210
+ click.echo("Default configuration:")
211
+
212
+ click.echo("\nUncertainty Factors:")
213
+ for factor_name, levels in cfg.uncertainty_factors.items():
214
+ click.echo(f" {factor_name}:")
215
+ for level, value in levels.items():
216
+ click.echo(f" {level}: {value}")
217
+
218
+ click.echo("\nT-Shirt Sizes (effort estimates in days):")
219
+ for size, config in cfg.t_shirt_sizes.items():
220
+ click.echo(f" {size}:")
221
+ click.echo(
222
+ f" min: {config.min}, most_likely: {config.most_likely}, max: {config.max}"
223
+ )
224
+
225
+ click.echo("\nSimulation:")
226
+ click.echo(f" Default iterations: {cfg.simulation.default_iterations}")
227
+ click.echo(f" Random seed: {cfg.simulation.random_seed}")
228
+ click.echo(
229
+ " Max stored critical paths: " f"{cfg.simulation.max_stored_critical_paths}"
230
+ )
231
+
232
+ click.echo("\nOutput:")
233
+ click.echo(f" Formats: {', '.join(cfg.output.formats)}")
234
+ click.echo(f" Include histogram: {cfg.output.include_histogram}")
235
+ click.echo(f" Histogram bins: {cfg.output.histogram_bins}")
236
+ click.echo(
237
+ " Critical path report limit: " f"{cfg.output.critical_path_report_limit}"
238
+ )
239
+
240
+
241
+ if __name__ == "__main__":
242
+ cli()
mcprojsim/config.py ADDED
@@ -0,0 +1,213 @@
1
+ """Configuration management for uncertainty factors and simulation settings."""
2
+
3
+ from copy import deepcopy
4
+ from pathlib import Path
5
+ from typing import Dict, Optional
6
+
7
+ import yaml
8
+ from pydantic import BaseModel, Field
9
+
10
+ # This file defines the configuration schema and default values for the Monte Carlo Project Simulator.
11
+ # This is where we centralize all configurable parameters, including uncertainty factors, T-shirt size mappings,
12
+ # and simulation settings. The SimulationEngine will use this configuration to adjust task durations and apply
13
+ # risk impacts during simulation.
14
+ # It is the single source of truth for all configuration-related logic, making it easier to maintain and extend in the future.
15
+
16
+ DEFAULT_SIMULATION_ITERATIONS = 10000
17
+ DEFAULT_OUTPUT_FORMATS = ["json", "csv", "html"]
18
+ DEFAULT_HISTOGRAM_BINS = 50
19
+ DEFAULT_MAX_STORED_CRITICAL_PATHS = 20
20
+ DEFAULT_CRITICAL_PATH_REPORT_LIMIT = 2
21
+ DEFAULT_CONFIDENCE_LEVELS = [25, 50, 75, 80, 85, 90, 95, 99]
22
+ DEFAULT_PROBABILITY_RED_THRESHOLD = 0.50
23
+ DEFAULT_PROBABILITY_GREEN_THRESHOLD = 0.90
24
+ DEFAULT_UNCERTAINTY_FACTOR_LEVELS = {
25
+ "team_experience": "medium",
26
+ "requirements_maturity": "medium",
27
+ "technical_complexity": "medium",
28
+ "team_distribution": "colocated",
29
+ "integration_complexity": "medium",
30
+ }
31
+ DEFAULT_UNCERTAINTY_FACTORS = {
32
+ "team_experience": {"high": 0.90, "medium": 1.0, "low": 1.30},
33
+ "requirements_maturity": {"high": 1.0, "medium": 1.15, "low": 1.40},
34
+ "technical_complexity": {"low": 1.0, "medium": 1.20, "high": 1.50},
35
+ "team_distribution": {"colocated": 1.0, "distributed": 1.25},
36
+ "integration_complexity": {"low": 1.0, "medium": 1.15, "high": 1.35},
37
+ }
38
+ DEFAULT_T_SHIRT_SIZE_VALUES = {
39
+ "XS": {"min": 0.5, "most_likely": 1, "max": 2},
40
+ "S": {"min": 1, "most_likely": 2, "max": 4},
41
+ "M": {"min": 3, "most_likely": 5, "max": 8},
42
+ "L": {"min": 5, "most_likely": 8, "max": 13},
43
+ "XL": {"min": 8, "most_likely": 13, "max": 21},
44
+ "XXL": {"min": 13, "most_likely": 21, "max": 34},
45
+ }
46
+ DEFAULT_STORY_POINT_VALUES = {
47
+ 1: {"min": 0.5, "most_likely": 1, "max": 3},
48
+ 2: {"min": 1, "most_likely": 2, "max": 4},
49
+ 3: {"min": 1.5, "most_likely": 3, "max": 5},
50
+ 5: {"min": 3, "most_likely": 5, "max": 8},
51
+ 8: {"min": 5, "most_likely": 8, "max": 15},
52
+ 13: {"min": 8, "most_likely": 13, "max": 21},
53
+ 21: {"min": 13, "most_likely": 21, "max": 34},
54
+ }
55
+
56
+
57
+ def _build_default_config_data() -> dict:
58
+ """Build the default configuration payload."""
59
+ return {
60
+ "uncertainty_factors": deepcopy(DEFAULT_UNCERTAINTY_FACTORS),
61
+ "t_shirt_sizes": {
62
+ size: deepcopy(values)
63
+ for size, values in DEFAULT_T_SHIRT_SIZE_VALUES.items()
64
+ },
65
+ "story_points": {
66
+ points: deepcopy(values)
67
+ for points, values in DEFAULT_STORY_POINT_VALUES.items()
68
+ },
69
+ "simulation": {
70
+ "default_iterations": DEFAULT_SIMULATION_ITERATIONS,
71
+ "random_seed": None,
72
+ "max_stored_critical_paths": DEFAULT_MAX_STORED_CRITICAL_PATHS,
73
+ },
74
+ "output": {
75
+ "formats": list(DEFAULT_OUTPUT_FORMATS),
76
+ "include_histogram": True,
77
+ "histogram_bins": DEFAULT_HISTOGRAM_BINS,
78
+ "critical_path_report_limit": DEFAULT_CRITICAL_PATH_REPORT_LIMIT,
79
+ },
80
+ }
81
+
82
+
83
+ def _merge_nested_dicts(base: dict, overrides: dict) -> dict:
84
+ """Recursively merge dictionaries, preserving defaults."""
85
+ merged = deepcopy(base)
86
+
87
+ for key, value in overrides.items():
88
+ if key in merged and isinstance(merged[key], dict) and isinstance(value, dict):
89
+ merged[key] = _merge_nested_dicts(merged[key], value)
90
+ else:
91
+ merged[key] = value
92
+
93
+ return merged
94
+
95
+
96
+ class UncertaintyFactorConfig(BaseModel):
97
+ """Configuration for a single uncertainty factor."""
98
+
99
+ high: float = Field(default=1.0)
100
+ medium: float = Field(default=1.0)
101
+ low: float = Field(default=1.0)
102
+
103
+
104
+ class SimulationConfig(BaseModel):
105
+ """Simulation settings."""
106
+
107
+ default_iterations: int = Field(default=DEFAULT_SIMULATION_ITERATIONS, gt=0)
108
+ random_seed: Optional[int] = None
109
+ max_stored_critical_paths: int = Field(
110
+ default=DEFAULT_MAX_STORED_CRITICAL_PATHS,
111
+ gt=0,
112
+ )
113
+
114
+
115
+ class OutputConfig(BaseModel):
116
+ """Output settings."""
117
+
118
+ formats: list[str] = Field(default_factory=lambda: list(DEFAULT_OUTPUT_FORMATS))
119
+ include_histogram: bool = True
120
+ histogram_bins: int = Field(default=DEFAULT_HISTOGRAM_BINS, gt=0)
121
+ critical_path_report_limit: int = Field(
122
+ default=DEFAULT_CRITICAL_PATH_REPORT_LIMIT,
123
+ gt=0,
124
+ )
125
+
126
+
127
+ class EstimateRangeConfig(BaseModel):
128
+ """Range configuration for a symbolic estimate."""
129
+
130
+ min: float = Field(gt=0)
131
+ most_likely: float = Field(gt=0)
132
+ max: float = Field(gt=0)
133
+
134
+
135
+ class TShirtSizeConfig(EstimateRangeConfig):
136
+ """T-shirt size estimate configuration."""
137
+
138
+
139
+ class StoryPointConfig(EstimateRangeConfig):
140
+ """Story Point estimate configuration."""
141
+
142
+
143
+ class Config(BaseModel):
144
+ """Complete application configuration."""
145
+
146
+ uncertainty_factors: Dict[str, Dict[str, float]] = Field(default_factory=dict)
147
+ t_shirt_sizes: Dict[str, TShirtSizeConfig] = Field(default_factory=dict)
148
+ story_points: Dict[int, StoryPointConfig] = Field(default_factory=dict)
149
+ simulation: SimulationConfig = Field(default_factory=SimulationConfig)
150
+ output: OutputConfig = Field(default_factory=OutputConfig)
151
+
152
+ @classmethod
153
+ def load_from_file(cls, config_path: Path | str) -> "Config":
154
+ """Load configuration from YAML file.
155
+
156
+ Args:
157
+ config_path: Path to configuration file
158
+
159
+ Returns:
160
+ Config object
161
+ """
162
+ config_path = Path(config_path)
163
+ if not config_path.exists():
164
+ raise FileNotFoundError(f"Config file not found: {config_path}")
165
+
166
+ with open(config_path, "r") as f:
167
+ data = yaml.safe_load(f) or {}
168
+
169
+ merged_data = _merge_nested_dicts(_build_default_config_data(), data)
170
+ return cls.model_validate(merged_data)
171
+
172
+ @classmethod
173
+ def get_default(cls) -> "Config":
174
+ """Get default configuration with standard uncertainty factors."""
175
+ return cls.model_validate(_build_default_config_data())
176
+
177
+ def get_uncertainty_multiplier(self, factor_name: str, level: str) -> float:
178
+ """Get uncertainty multiplier for a given factor and level.
179
+
180
+ Args:
181
+ factor_name: Name of uncertainty factor
182
+ level: Level of the factor (e.g., 'high', 'medium', 'low')
183
+
184
+ Returns:
185
+ Multiplier value
186
+ """
187
+ if factor_name not in self.uncertainty_factors:
188
+ return 1.0
189
+
190
+ factor_config = self.uncertainty_factors[factor_name]
191
+ return factor_config.get(level, 1.0)
192
+
193
+ def get_t_shirt_size(self, size: str) -> Optional[TShirtSizeConfig]:
194
+ """Get T-shirt size configuration.
195
+
196
+ Args:
197
+ size: T-shirt size (e.g., 'XS', 'S', 'M', 'L', 'XL', 'XXL')
198
+
199
+ Returns:
200
+ TShirtSizeConfig object or None if not found
201
+ """
202
+ return self.t_shirt_sizes.get(size)
203
+
204
+ def get_story_point(self, points: int) -> Optional[StoryPointConfig]:
205
+ """Get Story Point configuration.
206
+
207
+ Args:
208
+ points: Story Point value (for example 1, 2, 3, 5, 8, 13, 21)
209
+
210
+ Returns:
211
+ StoryPointConfig object or None if not found
212
+ """
213
+ return self.story_points.get(points)
@@ -0,0 +1,7 @@
1
+ """Exporters for simulation results."""
2
+
3
+ from mcprojsim.exporters.json_exporter import JSONExporter
4
+ from mcprojsim.exporters.csv_exporter import CSVExporter
5
+ from mcprojsim.exporters.html_exporter import HTMLExporter
6
+
7
+ __all__ = ["JSONExporter", "CSVExporter", "HTMLExporter"]