multidimensional-evaluation-engine 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- multidimensional_evaluation_engine/__init__.py +0 -0
- multidimensional_evaluation_engine/_version.py +24 -0
- multidimensional_evaluation_engine/domain/__init__.py +1 -0
- multidimensional_evaluation_engine/domain/candidates.py +30 -0
- multidimensional_evaluation_engine/domain/results.py +37 -0
- multidimensional_evaluation_engine/evaluation/__init__.py +1 -0
- multidimensional_evaluation_engine/evaluation/evaluator.py +58 -0
- multidimensional_evaluation_engine/io/__init__.py +1 -0
- multidimensional_evaluation_engine/io/load_candidates.py +69 -0
- multidimensional_evaluation_engine/io/load_policy.py +44 -0
- multidimensional_evaluation_engine/policy/__init__.py +1 -0
- multidimensional_evaluation_engine/policy/policy.py +62 -0
- multidimensional_evaluation_engine/py.typed +22 -0
- multidimensional_evaluation_engine/reporting/__init__.py +0 -0
- multidimensional_evaluation_engine/reporting/tables.py +35 -0
- multidimensional_evaluation_engine/utils/__init__.py +1 -0
- multidimensional_evaluation_engine/utils/logging_utils.py +62 -0
- multidimensional_evaluation_engine-0.1.0.dist-info/METADATA +175 -0
- multidimensional_evaluation_engine-0.1.0.dist-info/RECORD +22 -0
- multidimensional_evaluation_engine-0.1.0.dist-info/WHEEL +5 -0
- multidimensional_evaluation_engine-0.1.0.dist-info/licenses/LICENSE +21 -0
- multidimensional_evaluation_engine-0.1.0.dist-info/top_level.txt +1 -0
|
File without changes
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# file generated by vcs-versioning
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"__version__",
|
|
7
|
+
"__version_tuple__",
|
|
8
|
+
"version",
|
|
9
|
+
"version_tuple",
|
|
10
|
+
"__commit_id__",
|
|
11
|
+
"commit_id",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
version: str
|
|
15
|
+
__version__: str
|
|
16
|
+
__version_tuple__: tuple[int | str, ...]
|
|
17
|
+
version_tuple: tuple[int | str, ...]
|
|
18
|
+
commit_id: str | None
|
|
19
|
+
__commit_id__: str | None
|
|
20
|
+
|
|
21
|
+
__version__ = version = '0.1.0'
|
|
22
|
+
__version_tuple__ = version_tuple = (0, 1, 0)
|
|
23
|
+
|
|
24
|
+
__commit_id__ = commit_id = None
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Domain models."""
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""domain/candidates.py: Domain models for evaluation inputs."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _default_metadata() -> dict[str, str]:
|
|
7
|
+
"""Return default empty metadata mapping."""
|
|
8
|
+
return {}
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class Candidate:
|
|
13
|
+
"""A generic candidate for multidimensional evaluation.
|
|
14
|
+
|
|
15
|
+
A candidate represents an entity being evaluated under a policy.
|
|
16
|
+
All domain-specific attributes are expressed as dimension values.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
candidate_id: Stable identifier for the candidate.
|
|
20
|
+
candidate_name: Human-readable name.
|
|
21
|
+
dimensions: Mapping of dimension name to categorical value.
|
|
22
|
+
notes: Optional free-text annotation.
|
|
23
|
+
metadata: Optional auxiliary key-value data not used in scoring.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
candidate_id: str
|
|
27
|
+
candidate_name: str
|
|
28
|
+
dimensions: dict[str, str]
|
|
29
|
+
notes: str = ""
|
|
30
|
+
metadata: dict[str, str] = field(default_factory=_default_metadata)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""domain/results.py: Result models for candidate evaluation."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
from .candidates import Candidate
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _default_levels() -> dict[str, str]:
|
|
9
|
+
"""Return default empty levels mapping."""
|
|
10
|
+
return {}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _default_visual_levels() -> dict[str, str]:
|
|
14
|
+
"""Return default empty visual levels mapping."""
|
|
15
|
+
return {}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass(frozen=True)
|
|
19
|
+
class CandidateResult:
|
|
20
|
+
"""Result of evaluating a candidate under a policy.
|
|
21
|
+
|
|
22
|
+
Contains computed scores and optional derived representations
|
|
23
|
+
for interpretation or visualization.
|
|
24
|
+
|
|
25
|
+
Attributes:
|
|
26
|
+
candidate: The evaluated candidate.
|
|
27
|
+
scores: Mapping of label to numeric score.
|
|
28
|
+
levels: Optional mapping of label to qualitative level
|
|
29
|
+
(e.g., "low", "medium", "high").
|
|
30
|
+
visual_levels: Optional mapping of label to presentation-oriented
|
|
31
|
+
categories (e.g., for UI color or grouping).
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
candidate: Candidate
|
|
35
|
+
scores: dict[str, float]
|
|
36
|
+
levels: dict[str, str] = field(default_factory=_default_levels)
|
|
37
|
+
visual_levels: dict[str, str] = field(default_factory=_default_visual_levels)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Evaluation logic for exploration outputs."""
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""evaluation/evaluator.py: Evaluate candidates against policy."""
|
|
2
|
+
|
|
3
|
+
from ..domain.candidates import Candidate
|
|
4
|
+
from ..domain.results import CandidateResult
|
|
5
|
+
from ..policy.policy import Policy
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def evaluate_candidate(
|
|
9
|
+
candidate: Candidate,
|
|
10
|
+
policy: Policy,
|
|
11
|
+
) -> CandidateResult:
|
|
12
|
+
"""Evaluate a candidate under a policy.
|
|
13
|
+
|
|
14
|
+
Computes weighted scores for each label defined in the policy.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
candidate: Candidate with dimension values.
|
|
18
|
+
policy: Policy defining scales and weights.
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
CandidateResult containing computed scores.
|
|
22
|
+
|
|
23
|
+
Raises:
|
|
24
|
+
KeyError: If a required dimension is missing from the candidate.
|
|
25
|
+
KeyError: If a dimension is not defined in the policy scales.
|
|
26
|
+
KeyError: If a candidate value is not defined in the corresponding scale.
|
|
27
|
+
"""
|
|
28
|
+
scores: dict[str, float] = {}
|
|
29
|
+
|
|
30
|
+
for label, weights in policy.weights.items():
|
|
31
|
+
total = 0.0
|
|
32
|
+
|
|
33
|
+
for dimension, weight in weights.items():
|
|
34
|
+
if dimension not in candidate.dimensions:
|
|
35
|
+
raise KeyError(
|
|
36
|
+
f"Candidate {candidate.candidate_id!r} is missing dimension {dimension!r}."
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
value = candidate.dimensions[dimension]
|
|
40
|
+
|
|
41
|
+
if dimension not in policy.scales:
|
|
42
|
+
raise KeyError(f"Policy is missing scale for dimension {dimension!r}.")
|
|
43
|
+
|
|
44
|
+
if value not in policy.scales[dimension]:
|
|
45
|
+
raise KeyError(
|
|
46
|
+
f"Unknown value {value!r} for dimension {dimension!r} "
|
|
47
|
+
f"in candidate {candidate.candidate_id!r}."
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
numeric = policy.scales[dimension][value]
|
|
51
|
+
total += float(numeric) * float(weight)
|
|
52
|
+
|
|
53
|
+
scores[label] = round(total, 3)
|
|
54
|
+
|
|
55
|
+
return CandidateResult(
|
|
56
|
+
candidate=candidate,
|
|
57
|
+
scores=scores,
|
|
58
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Input and output helpers."""
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""io/load_candidates.py: Load candidates from CSV."""
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from ..domain.candidates import Candidate
|
|
7
|
+
from ..utils.logging_utils import get_logger
|
|
8
|
+
|
|
9
|
+
logger = get_logger(__name__)
|
|
10
|
+
|
|
11
|
+
# CSV contract (local to loader):
|
|
12
|
+
_REQUIRED_COLUMNS = {"candidate_id", "candidate_name"}
|
|
13
|
+
_RESERVED_COLUMNS = {"candidate_id", "candidate_name", "notes"}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def load_candidates(csv_path: Path) -> list[Candidate]:
|
|
17
|
+
"""Load candidates from a CSV file.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
csv_path: Path to the CSV file.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
List of Candidate objects.
|
|
24
|
+
|
|
25
|
+
Raises:
|
|
26
|
+
ValueError: If required columns are missing.
|
|
27
|
+
"""
|
|
28
|
+
logger.info(f"Loading candidates from: {csv_path}")
|
|
29
|
+
candidates: list[Candidate] = []
|
|
30
|
+
|
|
31
|
+
with csv_path.open("r", encoding="utf-8", newline="") as file:
|
|
32
|
+
reader = csv.DictReader(file)
|
|
33
|
+
|
|
34
|
+
if reader.fieldnames is None:
|
|
35
|
+
raise ValueError("CSV file must include a header row.")
|
|
36
|
+
|
|
37
|
+
headers = [header.strip() for header in reader.fieldnames]
|
|
38
|
+
|
|
39
|
+
missing = _REQUIRED_COLUMNS - set(headers)
|
|
40
|
+
if missing:
|
|
41
|
+
missing_str = ", ".join(sorted(missing))
|
|
42
|
+
raise ValueError(f"CSV file is missing required columns: {missing_str}")
|
|
43
|
+
|
|
44
|
+
dimension_headers = [
|
|
45
|
+
header for header in headers if header not in _RESERVED_COLUMNS
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
for row in reader:
|
|
49
|
+
cleaned_row = {
|
|
50
|
+
key.strip(): (value.strip() if value is not None else "")
|
|
51
|
+
for key, value in row.items()
|
|
52
|
+
if key is not None
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
dimensions = {
|
|
56
|
+
header: cleaned_row.get(header, "") for header in dimension_headers
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
candidates.append(
|
|
60
|
+
Candidate(
|
|
61
|
+
candidate_id=cleaned_row["candidate_id"],
|
|
62
|
+
candidate_name=cleaned_row["candidate_name"],
|
|
63
|
+
dimensions=dimensions,
|
|
64
|
+
notes=cleaned_row.get("notes", ""),
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
logger.info(f"Loaded {len(candidates)} candidates.")
|
|
69
|
+
return candidates
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""io/load_policy.py: Load policy from TOML."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import tomllib
|
|
5
|
+
|
|
6
|
+
from ..policy.policy import Policy
|
|
7
|
+
from ..utils.logging_utils import get_logger
|
|
8
|
+
|
|
9
|
+
logger = get_logger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def load_policy(path: Path) -> Policy:
|
|
13
|
+
"""Load a policy from a TOML file.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
path: Path to TOML policy file.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Policy instance.
|
|
20
|
+
|
|
21
|
+
Raises:
|
|
22
|
+
FileNotFoundError: If the file does not exist.
|
|
23
|
+
tomllib.TOMLDecodeError: If TOML parsing fails.
|
|
24
|
+
KeyError: If required policy fields are missing.
|
|
25
|
+
"""
|
|
26
|
+
logger.info(f"Loading policy from: {path}")
|
|
27
|
+
|
|
28
|
+
if not path.exists():
|
|
29
|
+
raise FileNotFoundError(f"Policy file not found: {path}")
|
|
30
|
+
|
|
31
|
+
with path.open("rb") as f:
|
|
32
|
+
data = tomllib.load(f)
|
|
33
|
+
|
|
34
|
+
# Minimal structural validation (core-level only)
|
|
35
|
+
required = {"scales", "weights", "interpretation"}
|
|
36
|
+
missing = required - set(data)
|
|
37
|
+
if missing:
|
|
38
|
+
missing_str = ", ".join(sorted(missing))
|
|
39
|
+
raise KeyError(f"Policy is missing required sections: {missing_str}")
|
|
40
|
+
|
|
41
|
+
policy = Policy.from_dict(data)
|
|
42
|
+
|
|
43
|
+
logger.info("Policy loaded successfully.")
|
|
44
|
+
return policy
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Policy definitions and loaders."""
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""policy/policy.py: Policy models for multidimensional evaluation."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def _default_patterns() -> list[dict[str, Any]]:
|
|
8
|
+
"""Return default empty pattern list."""
|
|
9
|
+
return []
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _default_rules() -> list[dict[str, Any]]:
|
|
13
|
+
"""Return default empty rule list."""
|
|
14
|
+
return []
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _default_metadata() -> dict[str, str]:
|
|
18
|
+
"""Return default empty metadata map."""
|
|
19
|
+
return {}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass(frozen=True)
|
|
23
|
+
class Policy:
|
|
24
|
+
"""Configuration for multidimensional evaluation.
|
|
25
|
+
|
|
26
|
+
A policy defines how candidate dimension values are mapped to numeric
|
|
27
|
+
scores and how those scores are aggregated.
|
|
28
|
+
|
|
29
|
+
Attributes:
|
|
30
|
+
scales: Mapping of dimension -> categorical value -> numeric score.
|
|
31
|
+
weights: Mapping of label -> dimension -> weight used in aggregation.
|
|
32
|
+
interpretation: Thresholds for mapping numeric scores to qualitative levels.
|
|
33
|
+
patterns: Optional descriptive patterns associated with configurations.
|
|
34
|
+
rules: Optional rule definitions for annotations or warnings.
|
|
35
|
+
metadata: Optional auxiliary information not used in scoring.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
scales: dict[str, dict[str, int | float]]
|
|
39
|
+
weights: dict[str, dict[str, float]]
|
|
40
|
+
interpretation: dict[str, float]
|
|
41
|
+
patterns: list[dict[str, Any]] = field(default_factory=_default_patterns)
|
|
42
|
+
rules: list[dict[str, Any]] = field(default_factory=_default_rules)
|
|
43
|
+
metadata: dict[str, str] = field(default_factory=_default_metadata)
|
|
44
|
+
|
|
45
|
+
@classmethod
|
|
46
|
+
def from_dict(cls, data: dict[str, Any]) -> "Policy":
|
|
47
|
+
"""Create a Policy from a dictionary.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
data: Parsed policy configuration (e.g., from TOML).
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Policy instance.
|
|
54
|
+
"""
|
|
55
|
+
return cls(
|
|
56
|
+
scales=data["scales"],
|
|
57
|
+
weights=data["weights"],
|
|
58
|
+
interpretation=data["interpretation"],
|
|
59
|
+
patterns=data.get("patterns", []),
|
|
60
|
+
rules=data.get("rules", []),
|
|
61
|
+
metadata=data.get("metadata", {}),
|
|
62
|
+
)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# FILE: src/datafun_toolkit/py.typed
|
|
2
|
+
|
|
3
|
+
# REQ.TYPING:
|
|
4
|
+
# Mark this package as providing inline type hints (PEP 561).
|
|
5
|
+
|
|
6
|
+
# WHY.TYPING:
|
|
7
|
+
# Type checkers (e.g., Pyright, Mypy) only trust type hints in installed
|
|
8
|
+
# packages if this marker file is present.
|
|
9
|
+
|
|
10
|
+
# OBS.TYPING:
|
|
11
|
+
# - This file is required by PEP 561.
|
|
12
|
+
# - The filename MUST be exactly "py.typed".
|
|
13
|
+
# - This is a marker file, not Python code.
|
|
14
|
+
# - The file may be empty; comments are allowed.
|
|
15
|
+
|
|
16
|
+
# REQ.LOCATION:
|
|
17
|
+
# Place this file in the top-level package directory
|
|
18
|
+
# (e.g., src/datafun_toolkit/).
|
|
19
|
+
|
|
20
|
+
# WHY.LOCATION:
|
|
21
|
+
# This tells type checkers that ALL modules in this package
|
|
22
|
+
# should be treated as typed.
|
|
File without changes
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""reporting/tables.py: Simple text-table reporting for scored results."""
|
|
2
|
+
|
|
3
|
+
from ..domain.results import CandidateResult
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def format_results(results: list[CandidateResult]) -> str:
|
|
7
|
+
"""Format evaluation results as a plain-text report.
|
|
8
|
+
|
|
9
|
+
Produces a deterministic, human-readable summary of scores and
|
|
10
|
+
optional qualitative levels for each candidate.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
results: List of candidate evaluation results.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Formatted string suitable for console output or logging.
|
|
17
|
+
"""
|
|
18
|
+
lines: list[str] = []
|
|
19
|
+
|
|
20
|
+
for r in results:
|
|
21
|
+
lines.append(f"{r.candidate.candidate_id} | {r.candidate.candidate_name}")
|
|
22
|
+
|
|
23
|
+
# Deterministic ordering for stable output
|
|
24
|
+
for label in sorted(r.scores):
|
|
25
|
+
score = r.scores[label]
|
|
26
|
+
level = r.levels.get(label)
|
|
27
|
+
|
|
28
|
+
if level:
|
|
29
|
+
lines.append(f" - {label}: {score:.2f} ({level})")
|
|
30
|
+
else:
|
|
31
|
+
lines.append(f" - {label}: {score:.2f}")
|
|
32
|
+
|
|
33
|
+
lines.append("")
|
|
34
|
+
|
|
35
|
+
return "\n".join(lines).rstrip()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Utility helpers for the project."""
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""utils/logging_utils.py: Logging helpers for the project."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
PROJECT_LOGGER_NAME = "multidimensional_evaluation_engine"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_logger(name: str | None = None, level: str = "INFO") -> logging.Logger:
|
|
10
|
+
"""Return a configured project logger.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
name: Optional child logger name.
|
|
14
|
+
level: Logging level for the root project logger.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
Logger instance scoped to the project.
|
|
18
|
+
"""
|
|
19
|
+
logger = logging.getLogger(PROJECT_LOGGER_NAME)
|
|
20
|
+
|
|
21
|
+
if not logger.handlers:
|
|
22
|
+
handler = logging.StreamHandler()
|
|
23
|
+
formatter = logging.Formatter(
|
|
24
|
+
"%(asctime)s | %(levelname)s | %(name)s | %(message)s"
|
|
25
|
+
)
|
|
26
|
+
handler.setFormatter(formatter)
|
|
27
|
+
logger.addHandler(handler)
|
|
28
|
+
|
|
29
|
+
logger.setLevel(level)
|
|
30
|
+
|
|
31
|
+
if name:
|
|
32
|
+
return logger.getChild(name)
|
|
33
|
+
return logger
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def log_header(title: str, logger: logging.Logger | None = None) -> None:
|
|
37
|
+
"""Log a section header.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
title: Header text.
|
|
41
|
+
logger: Logger to use (defaults to project logger).
|
|
42
|
+
"""
|
|
43
|
+
active_logger = logger or get_logger()
|
|
44
|
+
active_logger.info("=" * 60)
|
|
45
|
+
active_logger.info(title)
|
|
46
|
+
active_logger.info("=" * 60)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def log_path(
|
|
50
|
+
name: str,
|
|
51
|
+
path: Path | str,
|
|
52
|
+
logger: logging.Logger | None = None,
|
|
53
|
+
) -> None:
|
|
54
|
+
"""Log a filesystem path.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
name: Label for the path.
|
|
58
|
+
path: Filesystem path.
|
|
59
|
+
logger: Logger to use (defaults to project logger).
|
|
60
|
+
"""
|
|
61
|
+
active_logger = logger or get_logger()
|
|
62
|
+
active_logger.info(f"{name}: {Path(path)}")
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: multidimensional-evaluation-engine
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Core engine for multidimensional evaluation under explicit policy assumptions.
|
|
5
|
+
Author: Denise Case
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Documentation, https://civic-interconnect.github.io/multidimensional-evaluation-engine/
|
|
8
|
+
Project-URL: Issues, https://github.com/civic-interconnect/multidimensional-evaluation-engine/issues
|
|
9
|
+
Project-URL: Repository, https://github.com/civic-interconnect/multidimensional-evaluation-engine
|
|
10
|
+
Keywords: multidimensional evaluation,policy modeling,tradeoff analysis,decision support,structured evaluation
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: Science/Research
|
|
14
|
+
Classifier: Natural Language :: English
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
20
|
+
Classifier: Typing :: Typed
|
|
21
|
+
Requires-Python: >=3.14
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Provides-Extra: dev
|
|
25
|
+
Requires-Dist: bandit>=1.9.4; extra == "dev"
|
|
26
|
+
Requires-Dist: deptry>=0.24.0; extra == "dev"
|
|
27
|
+
Requires-Dist: packaging>=26.0; extra == "dev"
|
|
28
|
+
Requires-Dist: pre-commit>=4.5.1; extra == "dev"
|
|
29
|
+
Requires-Dist: pytest>=9.0.2; extra == "dev"
|
|
30
|
+
Requires-Dist: pytest-cov>=7.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: pyright>=1.1.408; extra == "dev"
|
|
32
|
+
Requires-Dist: ruff>=0.14.13; extra == "dev"
|
|
33
|
+
Requires-Dist: validate-pyproject>=0.24.1; extra == "dev"
|
|
34
|
+
Provides-Extra: docs
|
|
35
|
+
Requires-Dist: mkdocstrings[python]; extra == "docs"
|
|
36
|
+
Requires-Dist: zensical; extra == "docs"
|
|
37
|
+
Dynamic: license-file
|
|
38
|
+
|
|
39
|
+
# multidimensional-evaluation-engine
|
|
40
|
+
|
|
41
|
+
[](https://pypi.org/project/multidimensional-evaluation-engine/)
|
|
42
|
+
[](https://github.com/civic-interconnect/multidimensional-evaluation-engine/releases)
|
|
43
|
+
[](https://civic-interconnect.github.io/multidimensional-evaluation-engine/)
|
|
44
|
+
[](https://github.com/civic-interconnect/multidimensional-evaluation-engine/actions/workflows/ci-python-zensical.yml)
|
|
45
|
+
[](https://github.com/civic-interconnect/multidimensional-evaluation-engine/actions/workflows/deploy-zensical.yml)
|
|
46
|
+
[](https://github.com/civic-interconnect/multidimensional-evaluation-engine/actions/workflows/links.yml)
|
|
47
|
+
[](https://github.com/civic-interconnect/multidimensional-evaluation-engine/security)
|
|
48
|
+
[](#)
|
|
49
|
+
[](./LICENSE)
|
|
50
|
+
|
|
51
|
+
> A domain-neutral engine for multidimensional evaluation under explicit policy assumptions.
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
## What the Engine Does
|
|
55
|
+
|
|
56
|
+
The engine:
|
|
57
|
+
|
|
58
|
+
- applies a configurable policy
|
|
59
|
+
- evaluates candidate configurations
|
|
60
|
+
- computes weighted scores across dimensions
|
|
61
|
+
- interprets scores into qualitative levels
|
|
62
|
+
- supports structured comparison across alternative designs
|
|
63
|
+
|
|
64
|
+
The system is designed to make assumptions explicit and results inspectable.
|
|
65
|
+
|
|
66
|
+
## Important Note
|
|
67
|
+
|
|
68
|
+
This project does not advocate for a specific solution.
|
|
69
|
+
|
|
70
|
+
It provides a structured way to examine:
|
|
71
|
+
|
|
72
|
+
- how design choices affect outcomes
|
|
73
|
+
- where tradeoffs become significant
|
|
74
|
+
- how governance assumptions shape results
|
|
75
|
+
|
|
76
|
+
## This Project
|
|
77
|
+
|
|
78
|
+
This project provides a reusable framework for multidimensional evaluation under explicit assumptions and constraints.
|
|
79
|
+
It:
|
|
80
|
+
|
|
81
|
+
- supports policy-driven evaluation across multiple domains
|
|
82
|
+
- makes tradeoffs visible through configurable dimensions, scales, and weights
|
|
83
|
+
- separates evaluation logic from domain-specific explorer interfaces
|
|
84
|
+
|
|
85
|
+
The goal is to provide a stable core that can support multiple exploratory systems built on a shared evaluation model.
|
|
86
|
+
|
|
87
|
+
## Contribution
|
|
88
|
+
|
|
89
|
+
The contribution is the engine for structured multidimensional evaluation, not the specific values used in any given scenario.
|
|
90
|
+
|
|
91
|
+
- Dimensions, scales, weights, and thresholds are configurable
|
|
92
|
+
- Assumptions are explicit and inspectable
|
|
93
|
+
- Results are comparative and scenario-dependent
|
|
94
|
+
- The core logic is domain-neutral
|
|
95
|
+
|
|
96
|
+
This project does not determine outcomes or recommend decisions.
|
|
97
|
+
It provides a way to examine how different assumptions and constraints shape results.
|
|
98
|
+
|
|
99
|
+
## Working Files
|
|
100
|
+
|
|
101
|
+
Working files are found in these areas:
|
|
102
|
+
|
|
103
|
+
- **docs/** - documentation and examples
|
|
104
|
+
- **src/** - implementation
|
|
105
|
+
|
|
106
|
+
## Capabilities
|
|
107
|
+
|
|
108
|
+
- Loads policy configuration structures
|
|
109
|
+
- Evaluates candidate configurations across weighted dimensions
|
|
110
|
+
- Computes score profiles and qualitative levels
|
|
111
|
+
- Supports reusable integration into domain-specific explorer systems
|
|
112
|
+
|
|
113
|
+
## Command Reference
|
|
114
|
+
|
|
115
|
+
<details>
|
|
116
|
+
<summary>Show command reference</summary>
|
|
117
|
+
|
|
118
|
+
### In a machine terminal (open in your `Repos` folder)
|
|
119
|
+
|
|
120
|
+
After you get a copy of this repo in your own GitHub account,
|
|
121
|
+
open a machine terminal in your `Repos` folder:
|
|
122
|
+
|
|
123
|
+
```shell
|
|
124
|
+
# Replace username with YOUR GitHub username.
|
|
125
|
+
git clone https://github.com/username/multidimensional-evaluation-engine
|
|
126
|
+
|
|
127
|
+
cd multidimensional-evaluation-engine
|
|
128
|
+
code .
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### In a VS Code terminal
|
|
132
|
+
|
|
133
|
+
```shell
|
|
134
|
+
uv self update
|
|
135
|
+
uv python pin 3.14
|
|
136
|
+
uv sync --extra dev --extra docs --upgrade
|
|
137
|
+
|
|
138
|
+
uvx pre-commit install
|
|
139
|
+
git add -A
|
|
140
|
+
uvx pre-commit run --all-files
|
|
141
|
+
# repeat if changes were made
|
|
142
|
+
git add -A
|
|
143
|
+
uvx pre-commit run --all-files
|
|
144
|
+
|
|
145
|
+
uv run ruff format --check .
|
|
146
|
+
uv run ruff check .
|
|
147
|
+
uv run pytest --cov=src --cov-report=term-missing
|
|
148
|
+
|
|
149
|
+
uv run deptry .
|
|
150
|
+
uv run bandit -c pyproject.toml -r src
|
|
151
|
+
uv run validate-pyproject pyproject.toml
|
|
152
|
+
uv run zensical build
|
|
153
|
+
|
|
154
|
+
git add -A
|
|
155
|
+
git commit -m "update"
|
|
156
|
+
git push -u origin main
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
</details>
|
|
160
|
+
|
|
161
|
+
## Annotations
|
|
162
|
+
|
|
163
|
+
[ANNOTATIONS.md](./ANNOTATIONS.md)
|
|
164
|
+
|
|
165
|
+
## Citation
|
|
166
|
+
|
|
167
|
+
[CITATION.cff](./CITATION.cff)
|
|
168
|
+
|
|
169
|
+
## License
|
|
170
|
+
|
|
171
|
+
[MIT](./LICENSE)
|
|
172
|
+
|
|
173
|
+
## SE Manifest
|
|
174
|
+
|
|
175
|
+
[SE_MANIFEST.md](./SE_MANIFEST.toml)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
multidimensional_evaluation_engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
multidimensional_evaluation_engine/_version.py,sha256=n_5vdJsPNu7wZ57LGuRL585uvll-hiuvZUBWzdG0RQU,520
|
|
3
|
+
multidimensional_evaluation_engine/py.typed,sha256=PVMQRODHuXaoq_Jd0G4SMYyajSR3oIKYfuRykCUOuCw,667
|
|
4
|
+
multidimensional_evaluation_engine/domain/__init__.py,sha256=4M__LeQqeDBbNjkDOnoLOYoyrHVrs4pJesV2Prs6jpw,21
|
|
5
|
+
multidimensional_evaluation_engine/domain/candidates.py,sha256=cp6AuYmnecpjSvnclF44TilGm9IMXV3W_pPczCKGz9k,944
|
|
6
|
+
multidimensional_evaluation_engine/domain/results.py,sha256=lJEn00VLDZYc1j6F-rl9w-Q9JsK8BP6ov8GtYMqf8no,1141
|
|
7
|
+
multidimensional_evaluation_engine/evaluation/__init__.py,sha256=rvwbL6675_7A5j0BxoHmEqfwc0Z8oLcWC_HQqVxeylo,48
|
|
8
|
+
multidimensional_evaluation_engine/evaluation/evaluator.py,sha256=8wWRSiylYql7x4i8woyEeDfDQbsr7YAU1WPM6STWzx4,1848
|
|
9
|
+
multidimensional_evaluation_engine/io/__init__.py,sha256=DY52HsekcLvdkvbVcm9vCccZQ0PmlYM6Zcuk-TEe6w0,32
|
|
10
|
+
multidimensional_evaluation_engine/io/load_candidates.py,sha256=hMLcfaAOoj-OxZfDXQxCRXYfBZxcAmQM2EuAZhcnUL0,2087
|
|
11
|
+
multidimensional_evaluation_engine/io/load_policy.py,sha256=X7blvYGf-mP3nwlxCxQxoCRCWyy1X96FK0qXwdc0RTA,1155
|
|
12
|
+
multidimensional_evaluation_engine/policy/__init__.py,sha256=x7dxybXvdgS_DAPMqO6VszvSwb7U6IhjJwocZgl3ZcQ,38
|
|
13
|
+
multidimensional_evaluation_engine/policy/policy.py,sha256=9_eqzidNJNnzN6WqMKlOkAyyZ0ZuHe6i-2L2d_cMGcw,2051
|
|
14
|
+
multidimensional_evaluation_engine/reporting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
|
+
multidimensional_evaluation_engine/reporting/tables.py,sha256=AKmWSCoEtsRnYC-nrA01HaePGuahwNQU2lPuEt7JndY,1042
|
|
16
|
+
multidimensional_evaluation_engine/utils/__init__.py,sha256=_-_DIeAuch4rSVvtrH6hPJaReen-hiM1kVEV46GL8iU,39
|
|
17
|
+
multidimensional_evaluation_engine/utils/logging_utils.py,sha256=sM0VEk9xUwkFcDqZxZPI0gP6eoXLY5Jld8waO0VJYIQ,1602
|
|
18
|
+
multidimensional_evaluation_engine-0.1.0.dist-info/licenses/LICENSE,sha256=fRFn0da07ZexkL7FGPthxrSvl1ykev5s38AFH49Ppbw,1071
|
|
19
|
+
multidimensional_evaluation_engine-0.1.0.dist-info/METADATA,sha256=dQho1ANbH5pQvxF_5mDY6svbNZ-EKqxWQwSM8tGDdEs,6517
|
|
20
|
+
multidimensional_evaluation_engine-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
21
|
+
multidimensional_evaluation_engine-0.1.0.dist-info/top_level.txt,sha256=g5MvanuQCLLXPIV6hRupMBmxQz8mQt3P18uWMR4Za5M,35
|
|
22
|
+
multidimensional_evaluation_engine-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Denise M. Case
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
multidimensional_evaluation_engine
|