rule-lab 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rule_lab-0.1.1/PKG-INFO +28 -0
- rule_lab-0.1.1/README.md +16 -0
- rule_lab-0.1.1/pyproject.toml +34 -0
- rule_lab-0.1.1/setup.cfg +4 -0
- rule_lab-0.1.1/src/rule_lab/__init__.py +24 -0
- rule_lab-0.1.1/src/rule_lab/cli.py +120 -0
- rule_lab-0.1.1/src/rule_lab/core/__init__.py +2 -0
- rule_lab-0.1.1/src/rule_lab/core/conditions.py +58 -0
- rule_lab-0.1.1/src/rule_lab/core/conflicts.py +35 -0
- rule_lab-0.1.1/src/rule_lab/core/contracts.py +78 -0
- rule_lab-0.1.1/src/rule_lab/core/errors.py +17 -0
- rule_lab-0.1.1/src/rule_lab/core/evaluator.py +46 -0
- rule_lab-0.1.1/src/rule_lab/core/loader.py +61 -0
- rule_lab-0.1.1/src/rule_lab/core/models.py +20 -0
- rule_lab-0.1.1/src/rule_lab/core/reporting.py +64 -0
- rule_lab-0.1.1/src/rule_lab.egg-info/PKG-INFO +28 -0
- rule_lab-0.1.1/src/rule_lab.egg-info/SOURCES.txt +26 -0
- rule_lab-0.1.1/src/rule_lab.egg-info/dependency_links.txt +1 -0
- rule_lab-0.1.1/src/rule_lab.egg-info/entry_points.txt +2 -0
- rule_lab-0.1.1/src/rule_lab.egg-info/requires.txt +3 -0
- rule_lab-0.1.1/src/rule_lab.egg-info/top_level.txt +1 -0
- rule_lab-0.1.1/tests/test_batch.py +56 -0
- rule_lab-0.1.1/tests/test_cli.py +127 -0
- rule_lab-0.1.1/tests/test_conditions.py +78 -0
- rule_lab-0.1.1/tests/test_diff.py +67 -0
- rule_lab-0.1.1/tests/test_metadata_consistency.py +130 -0
- rule_lab-0.1.1/tests/test_simulate.py +74 -0
- rule_lab-0.1.1/tests/test_validate.py +78 -0
rule_lab-0.1.1/PKG-INFO
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rule-lab
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: WinstonRedGuard local-first deterministic rule evaluation engine
|
|
5
|
+
Author-email: Yakuphan <yakuphan.yucel11@gmail.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/yakuphanycl/WinstonRedGuard
|
|
8
|
+
Requires-Python: >=3.10
|
|
9
|
+
Description-Content-Type: text/markdown
|
|
10
|
+
Provides-Extra: dev
|
|
11
|
+
Requires-Dist: pytest>=8; extra == "dev"
|
|
12
|
+
|
|
13
|
+
# rule_lab
|
|
14
|
+
|
|
15
|
+
Deterministik kural değerlendirme motoru. JSON kural dosyasını context listesine karşı simüle eder, çakışmaları tespit eder.
|
|
16
|
+
|
|
17
|
+
## Kurulum
|
|
18
|
+
```
|
|
19
|
+
cd apps/rule_lab
|
|
20
|
+
pip install -e ".[dev]"
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Kullanım
|
|
24
|
+
```
|
|
25
|
+
rule-lab validate --rules rules.json
|
|
26
|
+
rule-lab simulate --rules rules.json --contexts contexts.json
|
|
27
|
+
rule-lab diff --rules rules.json
|
|
28
|
+
```
|
rule_lab-0.1.1/README.md
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# rule_lab
|
|
2
|
+
|
|
3
|
+
Deterministik kural değerlendirme motoru. JSON kural dosyasını context listesine karşı simüle eder, çakışmaları tespit eder.
|
|
4
|
+
|
|
5
|
+
## Kurulum
|
|
6
|
+
```
|
|
7
|
+
cd apps/rule_lab
|
|
8
|
+
pip install -e ".[dev]"
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Kullanım
|
|
12
|
+
```
|
|
13
|
+
rule-lab validate --rules rules.json
|
|
14
|
+
rule-lab simulate --rules rules.json --contexts contexts.json
|
|
15
|
+
rule-lab diff --rules rules.json
|
|
16
|
+
```
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "rule-lab"
|
|
7
|
+
version = "0.1.1"
|
|
8
|
+
description = "WinstonRedGuard local-first deterministic rule evaluation engine"
|
|
9
|
+
requires-python = ">=3.10"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
readme = "README.md"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Yakuphan", email = "yakuphan.yucel11@gmail.com" }
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
[project.urls]
|
|
17
|
+
Homepage = "https://github.com/yakuphanycl/WinstonRedGuard"
|
|
18
|
+
|
|
19
|
+
[project.optional-dependencies]
|
|
20
|
+
dev = [
|
|
21
|
+
"pytest>=8",
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
[project.scripts]
|
|
25
|
+
rule-lab = "rule_lab.cli:main"
|
|
26
|
+
|
|
27
|
+
[tool.setuptools]
|
|
28
|
+
package-dir = {"" = "src"}
|
|
29
|
+
|
|
30
|
+
[tool.setuptools.packages.find]
|
|
31
|
+
where = ["src"]
|
|
32
|
+
|
|
33
|
+
[tool.pytest.ini_options]
|
|
34
|
+
testpaths = ["tests"]
|
rule_lab-0.1.1/setup.cfg
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
APP_NAME = "rule_lab"
|
|
4
|
+
APP_VERSION = "0.1.1"
|
|
5
|
+
|
|
6
|
+
from rule_lab.core.contracts import Rule, EvaluationResult, BatchResult
|
|
7
|
+
from rule_lab.core.models import RuleSet
|
|
8
|
+
from rule_lab.core.evaluator import evaluate_rule, evaluate_rules, simulate
|
|
9
|
+
from rule_lab.core.loader import load_rules_from_file, load_rules_from_dict, load_rules_from_list
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"APP_NAME",
|
|
13
|
+
"APP_VERSION",
|
|
14
|
+
"Rule",
|
|
15
|
+
"EvaluationResult",
|
|
16
|
+
"BatchResult",
|
|
17
|
+
"RuleSet",
|
|
18
|
+
"evaluate_rule",
|
|
19
|
+
"evaluate_rules",
|
|
20
|
+
"simulate",
|
|
21
|
+
"load_rules_from_file",
|
|
22
|
+
"load_rules_from_dict",
|
|
23
|
+
"load_rules_from_list",
|
|
24
|
+
]
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Sequence
|
|
8
|
+
|
|
9
|
+
from rule_lab.core.conflicts import detect_conflicts
|
|
10
|
+
from rule_lab.core.errors import RuleLabError
|
|
11
|
+
from rule_lab.core.evaluator import evaluate_rules, simulate
|
|
12
|
+
from rule_lab.core.loader import load_rules_from_file
|
|
13
|
+
from rule_lab.core.reporting import build_batch_report, build_run_report, format_text_report, write_report
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class CliError(Exception):
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
21
|
+
parser = argparse.ArgumentParser(prog="rule-lab", description="Deterministic rule evaluation engine")
|
|
22
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
23
|
+
|
|
24
|
+
validate_parser = subparsers.add_parser("validate", help="Validate a rule file")
|
|
25
|
+
validate_parser.add_argument("--rules", required=True, help="Path to rules JSON file")
|
|
26
|
+
|
|
27
|
+
simulate_parser = subparsers.add_parser("simulate", help="Simulate rules against contexts")
|
|
28
|
+
simulate_parser.add_argument("--rules", required=True, help="Path to rules JSON file")
|
|
29
|
+
simulate_parser.add_argument("--contexts", required=True, help="Path to contexts JSON file (list of dicts)")
|
|
30
|
+
simulate_parser.add_argument("--out", help="Write JSON report to this path")
|
|
31
|
+
simulate_parser.add_argument("--json", action="store_true", help="Emit JSON to stdout")
|
|
32
|
+
|
|
33
|
+
diff_parser = subparsers.add_parser("diff", help="Detect conflicts between rules")
|
|
34
|
+
diff_parser.add_argument("--rules", required=True, help="Path to rules JSON file")
|
|
35
|
+
diff_parser.add_argument("--json", action="store_true", help="Emit JSON to stdout")
|
|
36
|
+
|
|
37
|
+
return parser
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def main(argv: Sequence[str] | None = None) -> int:
|
|
41
|
+
parser = build_parser()
|
|
42
|
+
args = parser.parse_args(argv)
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
if args.command == "validate":
|
|
46
|
+
return _cmd_validate(args)
|
|
47
|
+
if args.command == "simulate":
|
|
48
|
+
return _cmd_simulate(args)
|
|
49
|
+
if args.command == "diff":
|
|
50
|
+
return _cmd_diff(args)
|
|
51
|
+
raise CliError(f"unsupported command: {args.command}")
|
|
52
|
+
except CliError as exc:
|
|
53
|
+
print(f"ERROR: {exc}", file=sys.stderr)
|
|
54
|
+
return 1
|
|
55
|
+
except RuleLabError as exc:
|
|
56
|
+
print(f"ERROR: {exc}", file=sys.stderr)
|
|
57
|
+
return 1
|
|
58
|
+
except FileNotFoundError as exc:
|
|
59
|
+
print(f"ERROR: file not found: {exc.filename or exc}", file=sys.stderr)
|
|
60
|
+
return 1
|
|
61
|
+
except json.JSONDecodeError as exc:
|
|
62
|
+
print(f"ERROR: invalid JSON: {exc}", file=sys.stderr)
|
|
63
|
+
return 1
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _cmd_validate(args: argparse.Namespace) -> int:
|
|
67
|
+
rules = load_rules_from_file(args.rules)
|
|
68
|
+
print(f"validate OK — {len(rules)} rule(s) loaded from {args.rules}")
|
|
69
|
+
return 0
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _cmd_simulate(args: argparse.Namespace) -> int:
|
|
73
|
+
rules = load_rules_from_file(args.rules)
|
|
74
|
+
contexts_path = Path(args.contexts)
|
|
75
|
+
if not contexts_path.exists():
|
|
76
|
+
raise CliError(f"contexts file not found: {contexts_path}")
|
|
77
|
+
contexts: list[dict[str, Any]] = json.loads(contexts_path.read_text(encoding="utf-8"))
|
|
78
|
+
if not isinstance(contexts, list):
|
|
79
|
+
raise CliError("contexts file must contain a JSON array")
|
|
80
|
+
|
|
81
|
+
conflicts = detect_conflicts(rules)
|
|
82
|
+
batch_results = simulate(rules, contexts)
|
|
83
|
+
report = build_batch_report(batch_results, conflicts, contexts)
|
|
84
|
+
|
|
85
|
+
if args.json:
|
|
86
|
+
print(json.dumps(report, indent=2, ensure_ascii=False))
|
|
87
|
+
else:
|
|
88
|
+
print(format_text_report({
|
|
89
|
+
"total_rules": report["total_rules"],
|
|
90
|
+
"matched_count": sum(r["matched_count"] for r in report["runs"]),
|
|
91
|
+
"conflict_count": report["conflict_count"],
|
|
92
|
+
"results": [res for run in report["runs"] for res in run["results"]],
|
|
93
|
+
"conflicts": report["conflicts"],
|
|
94
|
+
}))
|
|
95
|
+
|
|
96
|
+
if args.out:
|
|
97
|
+
write_report(report, args.out)
|
|
98
|
+
print(f"report written to {args.out}")
|
|
99
|
+
|
|
100
|
+
return 0
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _cmd_diff(args: argparse.Namespace) -> int:
|
|
104
|
+
rules = load_rules_from_file(args.rules)
|
|
105
|
+
conflicts = detect_conflicts(rules)
|
|
106
|
+
|
|
107
|
+
if args.json:
|
|
108
|
+
print(json.dumps([c.to_dict() for c in conflicts], indent=2, ensure_ascii=False))
|
|
109
|
+
else:
|
|
110
|
+
if not conflicts:
|
|
111
|
+
print("diff: no conflicts detected")
|
|
112
|
+
else:
|
|
113
|
+
for c in conflicts:
|
|
114
|
+
print(f"CONFLICT {c.rule_a} vs {c.rule_b}: {c.message}")
|
|
115
|
+
|
|
116
|
+
return 1 if conflicts else 0
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
if __name__ == "__main__":
|
|
120
|
+
raise SystemExit(main())
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from .errors import ConditionError
|
|
6
|
+
|
|
7
|
+
SUPPORTED_OPERATORS = {"eq", "neq", "gt", "gte", "lt", "lte", "contains", "not_contains", "exists", "not_exists"}
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def evaluate_condition(condition: dict[str, Any], context: dict[str, Any]) -> bool:
|
|
11
|
+
"""Evaluate a single condition against a context dict.
|
|
12
|
+
|
|
13
|
+
Condition shape:
|
|
14
|
+
{"field": "score", "op": "gte", "value": 7.0}
|
|
15
|
+
"""
|
|
16
|
+
field = condition.get("field")
|
|
17
|
+
op = condition.get("op")
|
|
18
|
+
expected = condition.get("value")
|
|
19
|
+
|
|
20
|
+
if not isinstance(field, str) or not field.strip():
|
|
21
|
+
raise ConditionError(f"condition missing 'field': {condition}")
|
|
22
|
+
if op not in SUPPORTED_OPERATORS:
|
|
23
|
+
raise ConditionError(f"unsupported operator '{op}' in condition: {condition}")
|
|
24
|
+
|
|
25
|
+
actual = context.get(field)
|
|
26
|
+
|
|
27
|
+
if op == "exists":
|
|
28
|
+
return field in context
|
|
29
|
+
if op == "not_exists":
|
|
30
|
+
return field not in context
|
|
31
|
+
|
|
32
|
+
if actual is None:
|
|
33
|
+
return False
|
|
34
|
+
|
|
35
|
+
if op == "eq":
|
|
36
|
+
return actual == expected
|
|
37
|
+
if op == "neq":
|
|
38
|
+
return actual != expected
|
|
39
|
+
if op == "gt":
|
|
40
|
+
return float(actual) > float(expected)
|
|
41
|
+
if op == "gte":
|
|
42
|
+
return float(actual) >= float(expected)
|
|
43
|
+
if op == "lt":
|
|
44
|
+
return float(actual) < float(expected)
|
|
45
|
+
if op == "lte":
|
|
46
|
+
return float(actual) <= float(expected)
|
|
47
|
+
if op == "contains":
|
|
48
|
+
return str(expected) in str(actual)
|
|
49
|
+
if op == "not_contains":
|
|
50
|
+
return str(expected) not in str(actual)
|
|
51
|
+
|
|
52
|
+
raise ConditionError(f"unhandled operator '{op}'")
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def evaluate_all(conditions: list[dict[str, Any]], context: dict[str, Any]) -> tuple[int, int]:
|
|
56
|
+
"""Evaluate all conditions. Returns (total_checked, total_passed)."""
|
|
57
|
+
passed = sum(1 for c in conditions if evaluate_condition(c, context))
|
|
58
|
+
return len(conditions), passed
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from .contracts import ConflictReport, Rule
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def detect_conflicts(rules: list[Rule]) -> list[ConflictReport]:
|
|
7
|
+
"""Detect same-priority rules with the same conditions but different actions."""
|
|
8
|
+
conflicts: list[ConflictReport] = []
|
|
9
|
+
seen: list[Rule] = []
|
|
10
|
+
|
|
11
|
+
for rule in rules:
|
|
12
|
+
for other in seen:
|
|
13
|
+
if _conditions_overlap(rule, other) and rule.action != other.action:
|
|
14
|
+
conflicts.append(
|
|
15
|
+
ConflictReport(
|
|
16
|
+
rule_a=rule.rule_id,
|
|
17
|
+
rule_b=other.rule_id,
|
|
18
|
+
conflict_type="action_conflict",
|
|
19
|
+
message=(
|
|
20
|
+
f"Rules '{rule.rule_id}' and '{other.rule_id}' have overlapping "
|
|
21
|
+
f"conditions but different actions: '{rule.action}' vs '{other.action}'"
|
|
22
|
+
),
|
|
23
|
+
)
|
|
24
|
+
)
|
|
25
|
+
seen.append(rule)
|
|
26
|
+
|
|
27
|
+
return conflicts
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _conditions_overlap(a: Rule, b: Rule) -> bool:
|
|
31
|
+
"""True if both rules share at least one identical condition dict."""
|
|
32
|
+
if not a.conditions or not b.conditions:
|
|
33
|
+
return False
|
|
34
|
+
b_conditions = b.conditions
|
|
35
|
+
return any(cond in b_conditions for cond in a.conditions)
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(slots=True)
|
|
8
|
+
class Rule:
|
|
9
|
+
rule_id: str
|
|
10
|
+
name: str
|
|
11
|
+
conditions: list[dict[str, Any]]
|
|
12
|
+
action: str
|
|
13
|
+
priority: int = 0
|
|
14
|
+
tags: list[str] = field(default_factory=list)
|
|
15
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
16
|
+
|
|
17
|
+
def to_dict(self) -> dict[str, Any]:
|
|
18
|
+
return {
|
|
19
|
+
"rule_id": self.rule_id,
|
|
20
|
+
"name": self.name,
|
|
21
|
+
"conditions": self.conditions,
|
|
22
|
+
"action": self.action,
|
|
23
|
+
"priority": self.priority,
|
|
24
|
+
"tags": self.tags,
|
|
25
|
+
"metadata": self.metadata,
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(slots=True)
|
|
30
|
+
class EvaluationResult:
|
|
31
|
+
rule_id: str
|
|
32
|
+
rule_name: str
|
|
33
|
+
matched: bool
|
|
34
|
+
action: str | None
|
|
35
|
+
reason: str
|
|
36
|
+
conditions_checked: int
|
|
37
|
+
conditions_passed: int
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> dict[str, Any]:
|
|
40
|
+
return {
|
|
41
|
+
"rule_id": self.rule_id,
|
|
42
|
+
"rule_name": self.rule_name,
|
|
43
|
+
"matched": self.matched,
|
|
44
|
+
"action": self.action if self.matched else None,
|
|
45
|
+
"reason": self.reason,
|
|
46
|
+
"conditions_checked": self.conditions_checked,
|
|
47
|
+
"conditions_passed": self.conditions_passed,
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@dataclass(slots=True)
|
|
52
|
+
class BatchResult:
|
|
53
|
+
total_rules: int
|
|
54
|
+
matched_count: int
|
|
55
|
+
results: list[EvaluationResult]
|
|
56
|
+
|
|
57
|
+
def to_dict(self) -> dict[str, Any]:
|
|
58
|
+
return {
|
|
59
|
+
"total_rules": self.total_rules,
|
|
60
|
+
"matched_count": self.matched_count,
|
|
61
|
+
"results": [r.to_dict() for r in self.results],
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass(slots=True)
|
|
66
|
+
class ConflictReport:
|
|
67
|
+
rule_a: str
|
|
68
|
+
rule_b: str
|
|
69
|
+
conflict_type: str
|
|
70
|
+
message: str
|
|
71
|
+
|
|
72
|
+
def to_dict(self) -> dict[str, Any]:
|
|
73
|
+
return {
|
|
74
|
+
"rule_a": self.rule_a,
|
|
75
|
+
"rule_b": self.rule_b,
|
|
76
|
+
"conflict_type": self.conflict_type,
|
|
77
|
+
"message": self.message,
|
|
78
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class RuleLabError(Exception):
|
|
5
|
+
"""Base error for rule_lab."""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class RuleLoadError(RuleLabError):
|
|
9
|
+
"""Raised when a rule file cannot be loaded or parsed."""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RuleValidationError(RuleLabError):
|
|
13
|
+
"""Raised when a rule definition fails schema validation."""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ConditionError(RuleLabError):
|
|
17
|
+
"""Raised when a condition cannot be evaluated."""
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from .conditions import evaluate_all
|
|
6
|
+
from .contracts import BatchResult, EvaluationResult, Rule
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def evaluate_rule(rule: Rule, context: dict[str, Any]) -> EvaluationResult:
|
|
10
|
+
if not rule.conditions:
|
|
11
|
+
return EvaluationResult(
|
|
12
|
+
rule_id=rule.rule_id,
|
|
13
|
+
rule_name=rule.name,
|
|
14
|
+
matched=True,
|
|
15
|
+
action=rule.action,
|
|
16
|
+
reason="no conditions — unconditional match",
|
|
17
|
+
conditions_checked=0,
|
|
18
|
+
conditions_passed=0,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
total, passed = evaluate_all(rule.conditions, context)
|
|
22
|
+
matched = passed == total
|
|
23
|
+
|
|
24
|
+
return EvaluationResult(
|
|
25
|
+
rule_id=rule.rule_id,
|
|
26
|
+
rule_name=rule.name,
|
|
27
|
+
matched=matched,
|
|
28
|
+
action=rule.action if matched else None,
|
|
29
|
+
reason=f"{passed}/{total} conditions passed",
|
|
30
|
+
conditions_checked=total,
|
|
31
|
+
conditions_passed=passed,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def evaluate_rules(rules: list[Rule], context: dict[str, Any]) -> BatchResult:
|
|
36
|
+
results = [evaluate_rule(rule, context) for rule in rules]
|
|
37
|
+
matched = [r for r in results if r.matched]
|
|
38
|
+
return BatchResult(
|
|
39
|
+
total_rules=len(rules),
|
|
40
|
+
matched_count=len(matched),
|
|
41
|
+
results=results,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def simulate(rules: list[Rule], contexts: list[dict[str, Any]]) -> list[BatchResult]:
|
|
46
|
+
return [evaluate_rules(rules, ctx) for ctx in contexts]
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .contracts import Rule
|
|
8
|
+
from .errors import RuleLoadError, RuleValidationError
|
|
9
|
+
|
|
10
|
+
REQUIRED_RULE_FIELDS = {"rule_id", "name", "conditions", "action"}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _validate_rule_dict(raw: dict[str, Any]) -> None:
|
|
14
|
+
missing = REQUIRED_RULE_FIELDS - set(raw.keys())
|
|
15
|
+
if missing:
|
|
16
|
+
raise RuleValidationError(f"rule missing required fields: {sorted(missing)}")
|
|
17
|
+
if not isinstance(raw["conditions"], list):
|
|
18
|
+
raise RuleValidationError(f"rule '{raw['rule_id']}': conditions must be a list")
|
|
19
|
+
if not isinstance(raw["action"], str) or not raw["action"].strip():
|
|
20
|
+
raise RuleValidationError(f"rule '{raw['rule_id']}': action must be a non-empty string")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def load_rules_from_dict(data: dict[str, Any]) -> list[Rule]:
|
|
24
|
+
raw_rules = data.get("rules", [])
|
|
25
|
+
if not isinstance(raw_rules, list):
|
|
26
|
+
raise RuleLoadError("ruleset 'rules' field must be a list")
|
|
27
|
+
|
|
28
|
+
rules: list[Rule] = []
|
|
29
|
+
for raw in raw_rules:
|
|
30
|
+
if not isinstance(raw, dict):
|
|
31
|
+
raise RuleLoadError(f"rule entry must be a dict, got: {type(raw)}")
|
|
32
|
+
_validate_rule_dict(raw)
|
|
33
|
+
rules.append(
|
|
34
|
+
Rule(
|
|
35
|
+
rule_id=str(raw["rule_id"]),
|
|
36
|
+
name=str(raw["name"]),
|
|
37
|
+
conditions=list(raw["conditions"]),
|
|
38
|
+
action=str(raw["action"]),
|
|
39
|
+
priority=int(raw.get("priority", 0)),
|
|
40
|
+
tags=list(raw.get("tags", [])),
|
|
41
|
+
metadata=dict(raw.get("metadata", {})),
|
|
42
|
+
)
|
|
43
|
+
)
|
|
44
|
+
return sorted(rules, key=lambda r: (-r.priority, r.rule_id))
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def load_rules_from_file(path: str | Path) -> list[Rule]:
|
|
48
|
+
p = Path(path)
|
|
49
|
+
if not p.exists():
|
|
50
|
+
raise RuleLoadError(f"rule file not found: {p}")
|
|
51
|
+
try:
|
|
52
|
+
data = json.loads(p.read_text(encoding="utf-8"))
|
|
53
|
+
except json.JSONDecodeError as exc:
|
|
54
|
+
raise RuleLoadError(f"invalid JSON in rule file {p}: {exc}") from exc
|
|
55
|
+
if not isinstance(data, dict):
|
|
56
|
+
raise RuleLoadError(f"rule file must contain a JSON object: {p}")
|
|
57
|
+
return load_rules_from_dict(data)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def load_rules_from_list(raw_rules: list[dict[str, Any]]) -> list[Rule]:
|
|
61
|
+
return load_rules_from_dict({"rules": raw_rules})
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(slots=True)
|
|
8
|
+
class RuleSet:
|
|
9
|
+
name: str
|
|
10
|
+
version: str
|
|
11
|
+
rules: list[dict[str, Any]] = field(default_factory=list)
|
|
12
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
13
|
+
|
|
14
|
+
def to_dict(self) -> dict[str, Any]:
|
|
15
|
+
return {
|
|
16
|
+
"name": self.name,
|
|
17
|
+
"version": self.version,
|
|
18
|
+
"rules": self.rules,
|
|
19
|
+
"metadata": self.metadata,
|
|
20
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .contracts import BatchResult, ConflictReport
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def build_run_report(
|
|
11
|
+
batch: BatchResult,
|
|
12
|
+
conflicts: list[ConflictReport],
|
|
13
|
+
context: dict[str, Any],
|
|
14
|
+
) -> dict[str, Any]:
|
|
15
|
+
return {
|
|
16
|
+
"context": context,
|
|
17
|
+
"total_rules": batch.total_rules,
|
|
18
|
+
"matched_count": batch.matched_count,
|
|
19
|
+
"conflict_count": len(conflicts),
|
|
20
|
+
"results": [r.to_dict() for r in batch.results],
|
|
21
|
+
"conflicts": [c.to_dict() for c in conflicts],
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def build_batch_report(
|
|
26
|
+
batch_results: list[BatchResult],
|
|
27
|
+
conflicts: list[ConflictReport],
|
|
28
|
+
contexts: list[dict[str, Any]],
|
|
29
|
+
) -> dict[str, Any]:
|
|
30
|
+
return {
|
|
31
|
+
"total_contexts": len(contexts),
|
|
32
|
+
"total_rules": batch_results[0].total_rules if batch_results else 0,
|
|
33
|
+
"conflict_count": len(conflicts),
|
|
34
|
+
"conflicts": [c.to_dict() for c in conflicts],
|
|
35
|
+
"runs": [
|
|
36
|
+
{
|
|
37
|
+
"context_index": i,
|
|
38
|
+
"matched_count": r.matched_count,
|
|
39
|
+
"results": [res.to_dict() for res in r.results],
|
|
40
|
+
}
|
|
41
|
+
for i, r in enumerate(batch_results)
|
|
42
|
+
],
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def format_text_report(report: dict[str, Any]) -> str:
|
|
47
|
+
lines = [f"rule_lab report:"]
|
|
48
|
+
lines.append(f" total_rules : {report.get('total_rules', '?')}")
|
|
49
|
+
lines.append(f" matched_count : {report.get('matched_count', '?')}")
|
|
50
|
+
lines.append(f" conflict_count : {report.get('conflict_count', 0)}")
|
|
51
|
+
for result in report.get("results", []):
|
|
52
|
+
status = "MATCH" if result["matched"] else "miss"
|
|
53
|
+
lines.append(f" [{status}] {result['rule_id']} — {result['reason']}")
|
|
54
|
+
if report.get("conflicts"):
|
|
55
|
+
lines.append(" conflicts:")
|
|
56
|
+
for c in report["conflicts"]:
|
|
57
|
+
lines.append(f" {c['rule_a']} vs {c['rule_b']}: {c['message']}")
|
|
58
|
+
return "\n".join(lines)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def write_report(report: dict[str, Any], path: str | Path) -> None:
|
|
62
|
+
out = Path(path)
|
|
63
|
+
out.parent.mkdir(parents=True, exist_ok=True)
|
|
64
|
+
out.write_text(json.dumps(report, indent=2, ensure_ascii=False) + "\n", encoding="utf-8")
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rule-lab
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: WinstonRedGuard local-first deterministic rule evaluation engine
|
|
5
|
+
Author-email: Yakuphan <yakuphan.yucel11@gmail.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/yakuphanycl/WinstonRedGuard
|
|
8
|
+
Requires-Python: >=3.10
|
|
9
|
+
Description-Content-Type: text/markdown
|
|
10
|
+
Provides-Extra: dev
|
|
11
|
+
Requires-Dist: pytest>=8; extra == "dev"
|
|
12
|
+
|
|
13
|
+
# rule_lab
|
|
14
|
+
|
|
15
|
+
Deterministik kural değerlendirme motoru. JSON kural dosyasını context listesine karşı simüle eder, çakışmaları tespit eder.
|
|
16
|
+
|
|
17
|
+
## Kurulum
|
|
18
|
+
```
|
|
19
|
+
cd apps/rule_lab
|
|
20
|
+
pip install -e ".[dev]"
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Kullanım
|
|
24
|
+
```
|
|
25
|
+
rule-lab validate --rules rules.json
|
|
26
|
+
rule-lab simulate --rules rules.json --contexts contexts.json
|
|
27
|
+
rule-lab diff --rules rules.json
|
|
28
|
+
```
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
src/rule_lab/__init__.py
|
|
4
|
+
src/rule_lab/cli.py
|
|
5
|
+
src/rule_lab.egg-info/PKG-INFO
|
|
6
|
+
src/rule_lab.egg-info/SOURCES.txt
|
|
7
|
+
src/rule_lab.egg-info/dependency_links.txt
|
|
8
|
+
src/rule_lab.egg-info/entry_points.txt
|
|
9
|
+
src/rule_lab.egg-info/requires.txt
|
|
10
|
+
src/rule_lab.egg-info/top_level.txt
|
|
11
|
+
src/rule_lab/core/__init__.py
|
|
12
|
+
src/rule_lab/core/conditions.py
|
|
13
|
+
src/rule_lab/core/conflicts.py
|
|
14
|
+
src/rule_lab/core/contracts.py
|
|
15
|
+
src/rule_lab/core/errors.py
|
|
16
|
+
src/rule_lab/core/evaluator.py
|
|
17
|
+
src/rule_lab/core/loader.py
|
|
18
|
+
src/rule_lab/core/models.py
|
|
19
|
+
src/rule_lab/core/reporting.py
|
|
20
|
+
tests/test_batch.py
|
|
21
|
+
tests/test_cli.py
|
|
22
|
+
tests/test_conditions.py
|
|
23
|
+
tests/test_diff.py
|
|
24
|
+
tests/test_metadata_consistency.py
|
|
25
|
+
tests/test_simulate.py
|
|
26
|
+
tests/test_validate.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
rule_lab
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import json
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from rule_lab.core.contracts import Rule
|
|
5
|
+
from rule_lab.core.evaluator import evaluate_rules, simulate
|
|
6
|
+
from rule_lab.core.conflicts import detect_conflicts
|
|
7
|
+
from rule_lab.core.reporting import build_run_report, build_batch_report, format_text_report, write_report
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _rule(rule_id: str, conditions: list, action: str) -> Rule:
|
|
11
|
+
return Rule(rule_id=rule_id, name=rule_id, conditions=conditions, action=action)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
RULE_KEEP = _rule("r1", [{"field": "score", "op": "gte", "value": 7.0}], "keep")
|
|
15
|
+
RULE_DISCARD = _rule("r2", [{"field": "score", "op": "lt", "value": 4.0}], "discard")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def test_build_run_report_shape():
|
|
19
|
+
ctx = {"score": 8.0}
|
|
20
|
+
batch = evaluate_rules([RULE_KEEP, RULE_DISCARD], ctx)
|
|
21
|
+
conflicts = detect_conflicts([RULE_KEEP, RULE_DISCARD])
|
|
22
|
+
report = build_run_report(batch, conflicts, ctx)
|
|
23
|
+
|
|
24
|
+
for key in ("context", "total_rules", "matched_count", "conflict_count", "results", "conflicts"):
|
|
25
|
+
assert key in report
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def test_build_batch_report_shape():
|
|
29
|
+
contexts = [{"score": 8.0}, {"score": 2.0}]
|
|
30
|
+
results = simulate([RULE_KEEP, RULE_DISCARD], contexts)
|
|
31
|
+
conflicts = detect_conflicts([RULE_KEEP, RULE_DISCARD])
|
|
32
|
+
report = build_batch_report(results, conflicts, contexts)
|
|
33
|
+
|
|
34
|
+
assert report["total_contexts"] == 2
|
|
35
|
+
assert report["total_rules"] == 2
|
|
36
|
+
assert len(report["runs"]) == 2
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_format_text_report_is_string():
|
|
40
|
+
ctx = {"score": 8.0}
|
|
41
|
+
batch = evaluate_rules([RULE_KEEP], ctx)
|
|
42
|
+
report = build_run_report(batch, [], ctx)
|
|
43
|
+
text = format_text_report(report)
|
|
44
|
+
assert isinstance(text, str)
|
|
45
|
+
assert "rule_lab report" in text
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def test_write_report_creates_file(tmp_path: Path):
|
|
49
|
+
ctx = {"score": 8.0}
|
|
50
|
+
batch = evaluate_rules([RULE_KEEP], ctx)
|
|
51
|
+
report = build_run_report(batch, [], ctx)
|
|
52
|
+
out = tmp_path / "out" / "report.json"
|
|
53
|
+
write_report(report, out)
|
|
54
|
+
assert out.exists()
|
|
55
|
+
parsed = json.loads(out.read_text(encoding="utf-8"))
|
|
56
|
+
assert "total_rules" in parsed
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
|
|
8
|
+
from rule_lab.cli import main
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# ---------------------------------------------------------------------------
|
|
12
|
+
# Helpers
|
|
13
|
+
# ---------------------------------------------------------------------------
|
|
14
|
+
|
|
15
|
+
VALID_RULE = {
|
|
16
|
+
"rule_id": "r1",
|
|
17
|
+
"name": "Score threshold",
|
|
18
|
+
"conditions": [{"field": "score", "op": "gte", "value": 7.0}],
|
|
19
|
+
"action": "keep",
|
|
20
|
+
"priority": 1,
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
CONFLICTING_RULE = {
|
|
24
|
+
"rule_id": "r2",
|
|
25
|
+
"name": "Score threshold conflict",
|
|
26
|
+
"conditions": [{"field": "score", "op": "gte", "value": 7.0}],
|
|
27
|
+
"action": "discard",
|
|
28
|
+
"priority": 1,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _write_rules(path: Path, rules: list) -> None:
|
|
33
|
+
path.write_text(json.dumps({"rules": rules}), encoding="utf-8")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _write_contexts(path: Path, contexts: list) -> None:
|
|
37
|
+
path.write_text(json.dumps(contexts), encoding="utf-8")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# ---------------------------------------------------------------------------
|
|
41
|
+
# validate
|
|
42
|
+
# ---------------------------------------------------------------------------
|
|
43
|
+
|
|
44
|
+
def test_validate_ok(tmp_path: Path) -> None:
|
|
45
|
+
rules_file = tmp_path / "rules.json"
|
|
46
|
+
_write_rules(rules_file, [VALID_RULE])
|
|
47
|
+
rc = main(["validate", "--rules", str(rules_file)])
|
|
48
|
+
assert rc == 0
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_validate_missing_file_returns_nonzero() -> None:
|
|
52
|
+
rc = main(["validate", "--rules", "/nonexistent/rules.json"])
|
|
53
|
+
assert rc != 0
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def test_validate_invalid_json(tmp_path: Path) -> None:
|
|
57
|
+
rules_file = tmp_path / "bad.json"
|
|
58
|
+
rules_file.write_text("NOT JSON", encoding="utf-8")
|
|
59
|
+
rc = main(["validate", "--rules", str(rules_file)])
|
|
60
|
+
assert rc != 0
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
# ---------------------------------------------------------------------------
|
|
64
|
+
# simulate
|
|
65
|
+
# ---------------------------------------------------------------------------
|
|
66
|
+
|
|
67
|
+
def test_simulate_ok(tmp_path: Path) -> None:
|
|
68
|
+
rules_file = tmp_path / "rules.json"
|
|
69
|
+
contexts_file = tmp_path / "contexts.json"
|
|
70
|
+
_write_rules(rules_file, [VALID_RULE])
|
|
71
|
+
_write_contexts(contexts_file, [{"score": 8.0}, {"score": 3.0}])
|
|
72
|
+
rc = main(["simulate", "--rules", str(rules_file), "--contexts", str(contexts_file)])
|
|
73
|
+
assert rc == 0
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def test_simulate_json_flag(tmp_path: Path) -> None:
|
|
77
|
+
rules_file = tmp_path / "rules.json"
|
|
78
|
+
contexts_file = tmp_path / "contexts.json"
|
|
79
|
+
_write_rules(rules_file, [VALID_RULE])
|
|
80
|
+
_write_contexts(contexts_file, [{"score": 8.0}])
|
|
81
|
+
rc = main(["simulate", "--rules", str(rules_file), "--contexts", str(contexts_file), "--json"])
|
|
82
|
+
assert rc == 0
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def test_simulate_writes_out_file(tmp_path: Path) -> None:
|
|
86
|
+
rules_file = tmp_path / "rules.json"
|
|
87
|
+
contexts_file = tmp_path / "contexts.json"
|
|
88
|
+
out_file = tmp_path / "report.json"
|
|
89
|
+
_write_rules(rules_file, [VALID_RULE])
|
|
90
|
+
_write_contexts(contexts_file, [{"score": 8.0}])
|
|
91
|
+
rc = main(["simulate", "--rules", str(rules_file), "--contexts", str(contexts_file), "--out", str(out_file)])
|
|
92
|
+
assert rc == 0
|
|
93
|
+
assert out_file.exists()
|
|
94
|
+
parsed = json.loads(out_file.read_text(encoding="utf-8"))
|
|
95
|
+
assert "runs" in parsed
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def test_simulate_missing_contexts_returns_nonzero(tmp_path: Path) -> None:
|
|
99
|
+
rules_file = tmp_path / "rules.json"
|
|
100
|
+
_write_rules(rules_file, [VALID_RULE])
|
|
101
|
+
rc = main(["simulate", "--rules", str(rules_file), "--contexts", str(tmp_path / "missing.json")])
|
|
102
|
+
assert rc != 0
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# ---------------------------------------------------------------------------
|
|
106
|
+
# diff
|
|
107
|
+
# ---------------------------------------------------------------------------
|
|
108
|
+
|
|
109
|
+
def test_diff_no_conflicts_returns_zero(tmp_path: Path) -> None:
|
|
110
|
+
rules_file = tmp_path / "rules.json"
|
|
111
|
+
_write_rules(rules_file, [VALID_RULE])
|
|
112
|
+
rc = main(["diff", "--rules", str(rules_file)])
|
|
113
|
+
assert rc == 0
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def test_diff_with_conflicts_returns_nonzero(tmp_path: Path) -> None:
|
|
117
|
+
rules_file = tmp_path / "rules.json"
|
|
118
|
+
_write_rules(rules_file, [VALID_RULE, CONFLICTING_RULE])
|
|
119
|
+
rc = main(["diff", "--rules", str(rules_file)])
|
|
120
|
+
assert rc != 0
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def test_diff_json_flag(tmp_path: Path) -> None:
|
|
124
|
+
rules_file = tmp_path / "rules.json"
|
|
125
|
+
_write_rules(rules_file, [VALID_RULE, CONFLICTING_RULE])
|
|
126
|
+
rc = main(["diff", "--rules", str(rules_file), "--json"])
|
|
127
|
+
assert rc != 0
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from rule_lab.core.conditions import evaluate_condition, evaluate_all
|
|
3
|
+
from rule_lab.core.errors import ConditionError
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def test_eq_match():
|
|
8
|
+
assert evaluate_condition({"field": "status", "op": "eq", "value": "active"}, {"status": "active"})
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def test_eq_no_match():
|
|
12
|
+
assert not evaluate_condition({"field": "status", "op": "eq", "value": "active"}, {"status": "inactive"})
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_gte_match():
|
|
16
|
+
assert evaluate_condition({"field": "score", "op": "gte", "value": 7.0}, {"score": 8.5})
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_gte_no_match():
|
|
20
|
+
assert not evaluate_condition({"field": "score", "op": "gte", "value": 7.0}, {"score": 5.0})
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def test_lt_match():
|
|
24
|
+
assert evaluate_condition({"field": "age", "op": "lt", "value": 30}, {"age": 25})
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def test_contains_match():
|
|
28
|
+
assert evaluate_condition({"field": "tag", "op": "contains", "value": "lab"}, {"tag": "rule_lab"})
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def test_not_contains_match():
|
|
32
|
+
assert evaluate_condition({"field": "tag", "op": "not_contains", "value": "prod"}, {"tag": "dev"})
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_exists_true():
|
|
36
|
+
assert evaluate_condition({"field": "score", "op": "exists"}, {"score": 0})
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_exists_false():
|
|
40
|
+
assert not evaluate_condition({"field": "score", "op": "exists"}, {})
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def test_not_exists_true():
|
|
44
|
+
assert evaluate_condition({"field": "missing", "op": "not_exists"}, {})
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_missing_field_returns_false():
|
|
48
|
+
assert not evaluate_condition({"field": "score", "op": "eq", "value": 5}, {})
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_unsupported_operator_raises():
|
|
52
|
+
with pytest.raises(ConditionError):
|
|
53
|
+
evaluate_condition({"field": "x", "op": "regex", "value": ".*"}, {"x": "hello"})
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def test_missing_field_key_raises():
|
|
57
|
+
with pytest.raises(ConditionError):
|
|
58
|
+
evaluate_condition({"op": "eq", "value": 1}, {"x": 1})
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def test_evaluate_all_counts():
|
|
62
|
+
conditions = [
|
|
63
|
+
{"field": "score", "op": "gte", "value": 5.0},
|
|
64
|
+
{"field": "status", "op": "eq", "value": "ok"},
|
|
65
|
+
]
|
|
66
|
+
total, passed = evaluate_all(conditions, {"score": 8.0, "status": "ok"})
|
|
67
|
+
assert total == 2
|
|
68
|
+
assert passed == 2
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def test_evaluate_all_partial():
|
|
72
|
+
conditions = [
|
|
73
|
+
{"field": "score", "op": "gte", "value": 5.0},
|
|
74
|
+
{"field": "status", "op": "eq", "value": "ok"},
|
|
75
|
+
]
|
|
76
|
+
total, passed = evaluate_all(conditions, {"score": 8.0, "status": "fail"})
|
|
77
|
+
assert total == 2
|
|
78
|
+
assert passed == 1
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from rule_lab.core.contracts import Rule
|
|
3
|
+
from rule_lab.core.conflicts import detect_conflicts
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _rule(rule_id: str, conditions: list, action: str) -> Rule:
|
|
7
|
+
return Rule(rule_id=rule_id, name=rule_id, conditions=conditions, action=action)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
COND_A = {"field": "score", "op": "gte", "value": 7.0}
|
|
11
|
+
COND_B = {"field": "status", "op": "eq", "value": "ok"}
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def test_no_conflicts_when_same_action():
|
|
15
|
+
rules = [
|
|
16
|
+
_rule("r1", [COND_A], "keep"),
|
|
17
|
+
_rule("r2", [COND_A], "keep"),
|
|
18
|
+
]
|
|
19
|
+
assert detect_conflicts(rules) == []
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def test_conflict_detected_when_different_actions():
|
|
23
|
+
rules = [
|
|
24
|
+
_rule("r1", [COND_A], "keep"),
|
|
25
|
+
_rule("r2", [COND_A], "discard"),
|
|
26
|
+
]
|
|
27
|
+
conflicts = detect_conflicts(rules)
|
|
28
|
+
assert len(conflicts) == 1
|
|
29
|
+
assert conflicts[0].conflict_type == "action_conflict"
|
|
30
|
+
assert "r1" in conflicts[0].rule_a or "r1" in conflicts[0].rule_b
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def test_no_conflict_when_conditions_differ():
|
|
34
|
+
rules = [
|
|
35
|
+
_rule("r1", [COND_A], "keep"),
|
|
36
|
+
_rule("r2", [COND_B], "discard"),
|
|
37
|
+
]
|
|
38
|
+
assert detect_conflicts(rules) == []
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_no_conflict_when_no_conditions():
|
|
42
|
+
rules = [
|
|
43
|
+
_rule("r1", [], "keep"),
|
|
44
|
+
_rule("r2", [], "discard"),
|
|
45
|
+
]
|
|
46
|
+
assert detect_conflicts(rules) == []
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def test_conflict_report_to_dict():
|
|
50
|
+
rules = [
|
|
51
|
+
_rule("r1", [COND_A], "keep"),
|
|
52
|
+
_rule("r2", [COND_A], "discard"),
|
|
53
|
+
]
|
|
54
|
+
conflicts = detect_conflicts(rules)
|
|
55
|
+
d = conflicts[0].to_dict()
|
|
56
|
+
for key in ("rule_a", "rule_b", "conflict_type", "message"):
|
|
57
|
+
assert key in d
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def test_multiple_conflicts():
|
|
61
|
+
rules = [
|
|
62
|
+
_rule("r1", [COND_A], "keep"),
|
|
63
|
+
_rule("r2", [COND_A], "discard"),
|
|
64
|
+
_rule("r3", [COND_A], "review"),
|
|
65
|
+
]
|
|
66
|
+
conflicts = detect_conflicts(rules)
|
|
67
|
+
assert len(conflicts) >= 2
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from rule_lab.core.contracts import Rule
|
|
4
|
+
from rule_lab.core.evaluator import evaluate_rule, evaluate_rules
|
|
5
|
+
from rule_lab.core.loader import load_rules_from_list
|
|
6
|
+
from rule_lab.core.models import RuleSet
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# ---------------------------------------------------------------------------
|
|
10
|
+
# Rule metadata round-trip
|
|
11
|
+
# ---------------------------------------------------------------------------
|
|
12
|
+
|
|
13
|
+
def test_rule_metadata_survives_loader() -> None:
|
|
14
|
+
raw = [{
|
|
15
|
+
"rule_id": "r1",
|
|
16
|
+
"name": "Meta rule",
|
|
17
|
+
"conditions": [{"field": "score", "op": "gte", "value": 5.0}],
|
|
18
|
+
"action": "keep",
|
|
19
|
+
"tags": ["important", "v2"],
|
|
20
|
+
"metadata": {"author": "yakuphan", "version": "0.2"},
|
|
21
|
+
}]
|
|
22
|
+
rules = load_rules_from_list(raw)
|
|
23
|
+
assert rules[0].metadata == {"author": "yakuphan", "version": "0.2"}
|
|
24
|
+
assert rules[0].tags == ["important", "v2"]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def test_rule_metadata_in_to_dict() -> None:
|
|
28
|
+
rule = Rule(
|
|
29
|
+
rule_id="r1",
|
|
30
|
+
name="test",
|
|
31
|
+
conditions=[],
|
|
32
|
+
action="keep",
|
|
33
|
+
tags=["x"],
|
|
34
|
+
metadata={"source": "unit_test"},
|
|
35
|
+
)
|
|
36
|
+
d = rule.to_dict()
|
|
37
|
+
assert d["metadata"] == {"source": "unit_test"}
|
|
38
|
+
assert d["tags"] == ["x"]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_rule_metadata_empty_by_default() -> None:
|
|
42
|
+
raw = [{
|
|
43
|
+
"rule_id": "r2",
|
|
44
|
+
"name": "No meta",
|
|
45
|
+
"conditions": [],
|
|
46
|
+
"action": "pass",
|
|
47
|
+
}]
|
|
48
|
+
rules = load_rules_from_list(raw)
|
|
49
|
+
assert rules[0].metadata == {}
|
|
50
|
+
assert rules[0].tags == []
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ---------------------------------------------------------------------------
|
|
54
|
+
# RuleSet model
|
|
55
|
+
# ---------------------------------------------------------------------------
|
|
56
|
+
|
|
57
|
+
def test_ruleset_to_dict_shape() -> None:
|
|
58
|
+
rs = RuleSet(
|
|
59
|
+
name="test_set",
|
|
60
|
+
version="1.0",
|
|
61
|
+
rules=[{"rule_id": "r1", "name": "x", "conditions": [], "action": "keep"}],
|
|
62
|
+
metadata={"owner": "team_a"},
|
|
63
|
+
)
|
|
64
|
+
d = rs.to_dict()
|
|
65
|
+
assert d["name"] == "test_set"
|
|
66
|
+
assert d["version"] == "1.0"
|
|
67
|
+
assert d["metadata"] == {"owner": "team_a"}
|
|
68
|
+
assert len(d["rules"]) == 1
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def test_ruleset_empty_metadata_by_default() -> None:
|
|
72
|
+
rs = RuleSet(name="empty", version="0.1")
|
|
73
|
+
assert rs.metadata == {}
|
|
74
|
+
assert rs.rules == []
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# ---------------------------------------------------------------------------
|
|
78
|
+
# Metadata does not affect evaluation
|
|
79
|
+
# ---------------------------------------------------------------------------
|
|
80
|
+
|
|
81
|
+
def test_metadata_does_not_affect_match_result() -> None:
|
|
82
|
+
rule_with_meta = Rule(
|
|
83
|
+
rule_id="r1",
|
|
84
|
+
name="with meta",
|
|
85
|
+
conditions=[{"field": "score", "op": "gte", "value": 7.0}],
|
|
86
|
+
action="keep",
|
|
87
|
+
metadata={"note": "some annotation"},
|
|
88
|
+
)
|
|
89
|
+
rule_without_meta = Rule(
|
|
90
|
+
rule_id="r2",
|
|
91
|
+
name="without meta",
|
|
92
|
+
conditions=[{"field": "score", "op": "gte", "value": 7.0}],
|
|
93
|
+
action="keep",
|
|
94
|
+
)
|
|
95
|
+
ctx = {"score": 8.0}
|
|
96
|
+
result_a = evaluate_rule(rule_with_meta, ctx)
|
|
97
|
+
result_b = evaluate_rule(rule_without_meta, ctx)
|
|
98
|
+
|
|
99
|
+
assert result_a.matched == result_b.matched
|
|
100
|
+
assert result_a.action == result_b.action
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# ---------------------------------------------------------------------------
|
|
104
|
+
# Priority metadata
|
|
105
|
+
# ---------------------------------------------------------------------------
|
|
106
|
+
|
|
107
|
+
def test_priority_preserved_in_to_dict() -> None:
|
|
108
|
+
raw = [
|
|
109
|
+
{"rule_id": "low", "name": "low", "conditions": [], "action": "pass", "priority": 1},
|
|
110
|
+
{"rule_id": "high", "name": "high", "conditions": [], "action": "pass", "priority": 10},
|
|
111
|
+
]
|
|
112
|
+
rules = load_rules_from_list(raw)
|
|
113
|
+
dicts = [r.to_dict() for r in rules]
|
|
114
|
+
priorities = [d["priority"] for d in dicts]
|
|
115
|
+
assert 10 in priorities
|
|
116
|
+
assert 1 in priorities
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def test_evaluation_result_does_not_leak_metadata() -> None:
|
|
120
|
+
rule = Rule(
|
|
121
|
+
rule_id="r1",
|
|
122
|
+
name="meta check",
|
|
123
|
+
conditions=[{"field": "x", "op": "eq", "value": 1}],
|
|
124
|
+
action="keep",
|
|
125
|
+
metadata={"secret": "internal"},
|
|
126
|
+
)
|
|
127
|
+
result = evaluate_rule(rule, {"x": 1})
|
|
128
|
+
d = result.to_dict()
|
|
129
|
+
assert "metadata" not in d
|
|
130
|
+
assert "secret" not in d
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from rule_lab.core.contracts import Rule
|
|
3
|
+
from rule_lab.core.evaluator import evaluate_rule, evaluate_rules, simulate
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _rule(rule_id: str, conditions: list, action: str, priority: int = 0) -> Rule:
|
|
7
|
+
return Rule(rule_id=rule_id, name=rule_id, conditions=conditions, action=action, priority=priority)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
SCORE_RULE = _rule("r1", [{"field": "score", "op": "gte", "value": 7.0}], "keep")
|
|
11
|
+
STATUS_RULE = _rule("r2", [{"field": "status", "op": "eq", "value": "ok"}], "promote")
|
|
12
|
+
NO_COND_RULE = _rule("r3", [], "default_action")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_evaluate_rule_match():
|
|
16
|
+
result = evaluate_rule(SCORE_RULE, {"score": 8.0})
|
|
17
|
+
assert result.matched is True
|
|
18
|
+
assert result.action == "keep"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def test_evaluate_rule_no_match():
|
|
22
|
+
result = evaluate_rule(SCORE_RULE, {"score": 5.0})
|
|
23
|
+
assert result.matched is False
|
|
24
|
+
assert result.action is None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def test_evaluate_rule_no_conditions_always_matches():
|
|
28
|
+
result = evaluate_rule(NO_COND_RULE, {})
|
|
29
|
+
assert result.matched is True
|
|
30
|
+
assert result.action == "default_action"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def test_evaluate_rule_result_fields():
|
|
34
|
+
result = evaluate_rule(SCORE_RULE, {"score": 9.0})
|
|
35
|
+
assert result.rule_id == "r1"
|
|
36
|
+
assert result.conditions_checked == 1
|
|
37
|
+
assert result.conditions_passed == 1
|
|
38
|
+
assert isinstance(result.reason, str)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_evaluate_rules_batch():
|
|
42
|
+
rules = [SCORE_RULE, STATUS_RULE]
|
|
43
|
+
batch = evaluate_rules(rules, {"score": 8.0, "status": "ok"})
|
|
44
|
+
assert batch.total_rules == 2
|
|
45
|
+
assert batch.matched_count == 2
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def test_evaluate_rules_partial_match():
|
|
49
|
+
rules = [SCORE_RULE, STATUS_RULE]
|
|
50
|
+
batch = evaluate_rules(rules, {"score": 3.0, "status": "ok"})
|
|
51
|
+
assert batch.matched_count == 1
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def test_evaluate_rules_no_match():
|
|
55
|
+
rules = [SCORE_RULE]
|
|
56
|
+
batch = evaluate_rules(rules, {"score": 1.0})
|
|
57
|
+
assert batch.matched_count == 0
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def test_simulate_multiple_contexts():
|
|
61
|
+
rules = [SCORE_RULE]
|
|
62
|
+
contexts = [{"score": 8.0}, {"score": 2.0}, {"score": 7.0}]
|
|
63
|
+
results = simulate(rules, contexts)
|
|
64
|
+
assert len(results) == 3
|
|
65
|
+
assert results[0].matched_count == 1
|
|
66
|
+
assert results[1].matched_count == 0
|
|
67
|
+
assert results[2].matched_count == 1
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def test_to_dict_shape():
|
|
71
|
+
result = evaluate_rule(SCORE_RULE, {"score": 8.0})
|
|
72
|
+
d = result.to_dict()
|
|
73
|
+
for key in ("rule_id", "rule_name", "matched", "action", "reason", "conditions_checked", "conditions_passed"):
|
|
74
|
+
assert key in d
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import json
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import pytest
|
|
5
|
+
from rule_lab.core.loader import load_rules_from_dict, load_rules_from_file, load_rules_from_list
|
|
6
|
+
from rule_lab.core.errors import RuleLoadError, RuleValidationError
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
VALID_RULE = {
|
|
10
|
+
"rule_id": "r1",
|
|
11
|
+
"name": "Score threshold",
|
|
12
|
+
"conditions": [{"field": "score", "op": "gte", "value": 7.0}],
|
|
13
|
+
"action": "keep",
|
|
14
|
+
"priority": 5,
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def test_load_from_dict():
|
|
19
|
+
rules = load_rules_from_dict({"rules": [VALID_RULE]})
|
|
20
|
+
assert len(rules) == 1
|
|
21
|
+
assert rules[0].rule_id == "r1"
|
|
22
|
+
assert rules[0].action == "keep"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def test_load_from_list():
|
|
26
|
+
rules = load_rules_from_list([VALID_RULE])
|
|
27
|
+
assert len(rules) == 1
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def test_load_empty_ruleset():
|
|
31
|
+
rules = load_rules_from_dict({"rules": []})
|
|
32
|
+
assert rules == []
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_missing_required_field_raises():
|
|
36
|
+
bad = {k: v for k, v in VALID_RULE.items() if k != "action"}
|
|
37
|
+
with pytest.raises(RuleValidationError, match="action"):
|
|
38
|
+
load_rules_from_list([bad])
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_conditions_not_list_raises():
|
|
42
|
+
bad = {**VALID_RULE, "conditions": "not_a_list"}
|
|
43
|
+
with pytest.raises(RuleValidationError):
|
|
44
|
+
load_rules_from_list([bad])
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_rules_field_not_list_raises():
|
|
48
|
+
with pytest.raises(RuleLoadError):
|
|
49
|
+
load_rules_from_dict({"rules": "bad"})
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def test_load_from_file(tmp_path: Path):
|
|
53
|
+
f = tmp_path / "rules.json"
|
|
54
|
+
f.write_text(json.dumps({"rules": [VALID_RULE]}), encoding="utf-8")
|
|
55
|
+
rules = load_rules_from_file(f)
|
|
56
|
+
assert len(rules) == 1
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def test_load_from_missing_file_raises():
|
|
60
|
+
with pytest.raises(RuleLoadError, match="not found"):
|
|
61
|
+
load_rules_from_file("/nonexistent/path/rules.json")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def test_load_from_invalid_json_raises(tmp_path: Path):
|
|
65
|
+
f = tmp_path / "bad.json"
|
|
66
|
+
f.write_text("NOT JSON", encoding="utf-8")
|
|
67
|
+
with pytest.raises(RuleLoadError, match="invalid JSON"):
|
|
68
|
+
load_rules_from_file(f)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def test_priority_ordering():
|
|
72
|
+
rules_data = [
|
|
73
|
+
{**VALID_RULE, "rule_id": "low", "priority": 1},
|
|
74
|
+
{**VALID_RULE, "rule_id": "high", "priority": 10},
|
|
75
|
+
]
|
|
76
|
+
rules = load_rules_from_list(rules_data)
|
|
77
|
+
assert rules[0].rule_id == "high"
|
|
78
|
+
assert rules[1].rule_id == "low"
|