pddl-pyvalidator 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 SPL@BGU
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,13 @@
1
+ Metadata-Version: 2.4
2
+ Name: pddl-pyvalidator
3
+ Version: 0.1.0
4
+ Summary: Pure Python PDDL plan validator
5
+ License-Expression: MIT
6
+ Requires-Python: >=3.10
7
+ License-File: LICENSE
8
+ Requires-Dist: unified-planning>=1.1.0
9
+ Provides-Extra: dev
10
+ Requires-Dist: pytest>=7.0; extra == "dev"
11
+ Provides-Extra: copilot
12
+ Requires-Dist: pddl-plus-parser; extra == "copilot"
13
+ Dynamic: license-file
@@ -0,0 +1,13 @@
1
+ Metadata-Version: 2.4
2
+ Name: pddl-pyvalidator
3
+ Version: 0.1.0
4
+ Summary: Pure Python PDDL plan validator
5
+ License-Expression: MIT
6
+ Requires-Python: >=3.10
7
+ License-File: LICENSE
8
+ Requires-Dist: unified-planning>=1.1.0
9
+ Provides-Extra: dev
10
+ Requires-Dist: pytest>=7.0; extra == "dev"
11
+ Provides-Extra: copilot
12
+ Requires-Dist: pddl-plus-parser; extra == "copilot"
13
+ Dynamic: license-file
@@ -0,0 +1,23 @@
1
+ LICENSE
2
+ pyproject.toml
3
+ pddl_pyvalidator.egg-info/PKG-INFO
4
+ pddl_pyvalidator.egg-info/SOURCES.txt
5
+ pddl_pyvalidator.egg-info/dependency_links.txt
6
+ pddl_pyvalidator.egg-info/entry_points.txt
7
+ pddl_pyvalidator.egg-info/requires.txt
8
+ pddl_pyvalidator.egg-info/top_level.txt
9
+ pyval/__init__.py
10
+ pyval/cli.py
11
+ pyval/diagnostics.py
12
+ pyval/models.py
13
+ pyval/numeric_tracker.py
14
+ pyval/plan_simulator.py
15
+ pyval/report_formatter.py
16
+ pyval/syntax_checker.py
17
+ pyval/validator.py
18
+ tests/test_cli.py
19
+ tests/test_integration.py
20
+ tests/test_plan_simulator.py
21
+ tests/test_report_formatter.py
22
+ tests/test_syntax_checker.py
23
+ tests/test_validator.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ pyval = pyval.cli:main
@@ -0,0 +1,7 @@
1
+ unified-planning>=1.1.0
2
+
3
+ [copilot]
4
+ pddl-plus-parser
5
+
6
+ [dev]
7
+ pytest>=7.0
@@ -0,0 +1,20 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "pddl-pyvalidator"
7
+ version = "0.1.0"
8
+ description = "Pure Python PDDL plan validator"
9
+ license = "MIT"
10
+ requires-python = ">=3.10"
11
+ dependencies = [
12
+ "unified-planning>=1.1.0",
13
+ ]
14
+
15
+ [project.optional-dependencies]
16
+ dev = ["pytest>=7.0"]
17
+ copilot = ["pddl-plus-parser"]
18
+
19
+ [project.scripts]
20
+ pyval = "pyval.cli:main"
@@ -0,0 +1,21 @@
1
+ """PyVAL — Pure Python PDDL plan validator."""
2
+
3
+ from pyval.models import (
4
+ GoalResult,
5
+ NumericChange,
6
+ PreconditionFailure,
7
+ StateSnapshot,
8
+ StepResult,
9
+ ValidationResult,
10
+ )
11
+ from pyval.validator import PDDLValidator
12
+
13
+ __all__ = [
14
+ "PDDLValidator",
15
+ "ValidationResult",
16
+ "StepResult",
17
+ "NumericChange",
18
+ "PreconditionFailure",
19
+ "GoalResult",
20
+ "StateSnapshot",
21
+ ]
@@ -0,0 +1,78 @@
1
+ """CLI entry point — mirrors VAL's Validate interface."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import sys
7
+
8
+ from pyval.report_formatter import format_json, format_plain_text, format_trajectory
9
+ from pyval.validator import PDDLValidator
10
+
11
+
12
+ def main() -> None:
13
+ parser = argparse.ArgumentParser(
14
+ prog="pyval",
15
+ description="PyVAL — Pure Python PDDL plan validator",
16
+ )
17
+ parser.add_argument(
18
+ "files",
19
+ nargs="+",
20
+ metavar="FILE",
21
+ help="PDDL files: domain [problem [plan]]",
22
+ )
23
+ parser.add_argument(
24
+ "-v", "--verbose", action="store_true", help="Verbose output"
25
+ )
26
+ parser.add_argument(
27
+ "--json", action="store_true", help="Output as structured JSON"
28
+ )
29
+ parser.add_argument(
30
+ "--trajectory", action="store_true", help="Output numeric fluent trajectory"
31
+ )
32
+ parser.add_argument(
33
+ "--track",
34
+ action="append",
35
+ metavar="FLUENT",
36
+ help="Track specific numeric fluent (repeatable)",
37
+ )
38
+ parser.add_argument(
39
+ "--version", action="version", version="pyval 0.1.0"
40
+ )
41
+
42
+ args = parser.parse_args()
43
+ files = args.files
44
+
45
+ if len(files) > 3:
46
+ parser.error("Expected 1-3 files: domain [problem [plan]]")
47
+
48
+ validator = PDDLValidator()
49
+
50
+ if len(files) == 1:
51
+ result = validator.validate_syntax(domain_path=files[0])
52
+ elif len(files) == 2:
53
+ result = validator.validate_syntax(
54
+ domain_path=files[0], problem_path=files[1]
55
+ )
56
+ else:
57
+ result = validator.validate(
58
+ domain_path=files[0],
59
+ problem_path=files[1],
60
+ plan_path=files[2],
61
+ tracked_fluents=args.track,
62
+ )
63
+
64
+ # Output
65
+ import json
66
+
67
+ if args.json:
68
+ print(json.dumps(format_json(result), indent=2))
69
+ elif args.trajectory:
70
+ print(format_trajectory(result, tracked=args.track))
71
+ else:
72
+ print(format_plain_text(result, verbose=args.verbose))
73
+
74
+ sys.exit(0 if result.is_valid else 1)
75
+
76
+
77
+ if __name__ == "__main__":
78
+ main()
@@ -0,0 +1,253 @@
1
+ """Diagnostic message generation — precondition decomposition and goal checking."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from unified_planning.model import FNode, OperatorKind
6
+
7
+ from pyval.models import GoalResult, PreconditionFailure
8
+
9
+
10
+ def decompose_preconditions(
11
+ action, parameters: tuple, state, problem
12
+ ) -> list[PreconditionFailure]:
13
+ """Walk action preconditions, evaluate each against state, return failures.
14
+
15
+ Parameters are the grounded objects substituted into the action schema.
16
+ """
17
+ failures: list[PreconditionFailure] = []
18
+ param_map = dict(zip(action.parameters, parameters))
19
+
20
+ for prec in action.preconditions:
21
+ _evaluate_expr(prec, state, param_map, failures)
22
+
23
+ return failures
24
+
25
+
26
+ def check_goals(problem, state) -> list[GoalResult]:
27
+ """Evaluate all goals against a state, return results for each."""
28
+ results: list[GoalResult] = []
29
+ for goal in problem.goals:
30
+ _evaluate_goal(goal, state, results)
31
+ return results
32
+
33
+
34
+ def _evaluate_expr(
35
+ expr: FNode,
36
+ state,
37
+ param_map: dict,
38
+ failures: list[PreconditionFailure],
39
+ ) -> None:
40
+ """Recursively evaluate a precondition expression, collecting failures."""
41
+ node_type = expr.node_type
42
+
43
+ if node_type == OperatorKind.AND:
44
+ for arg in expr.args:
45
+ _evaluate_expr(arg, state, param_map, failures)
46
+ return
47
+
48
+ if node_type == OperatorKind.OR:
49
+ # Check if any disjunct is satisfied
50
+ sub_failures: list[PreconditionFailure] = []
51
+ for arg in expr.args:
52
+ _evaluate_expr(arg, state, param_map, sub_failures)
53
+ if len(sub_failures) == len(expr.args):
54
+ # None satisfied — report the OR as failed
55
+ failures.append(PreconditionFailure(
56
+ expression=_expr_to_pddl(expr, param_map),
57
+ type="boolean",
58
+ current_values={},
59
+ explanation="None of the disjuncts are satisfied",
60
+ ))
61
+ return
62
+
63
+ if node_type == OperatorKind.NOT:
64
+ inner = expr.args[0]
65
+ grounded = _substitute(inner, param_map)
66
+ try:
67
+ val = state.get_value(grounded)
68
+ if val.is_true():
69
+ failures.append(PreconditionFailure(
70
+ expression=_expr_to_pddl(expr, param_map),
71
+ type="boolean",
72
+ current_values={_expr_to_pddl(inner, param_map): True},
73
+ explanation=f"{_expr_to_pddl(inner, param_map)} is true but should be false",
74
+ ))
75
+ except Exception:
76
+ pass
77
+ return
78
+
79
+ # Comparison operators: LE, LT, EQUALS (UPF normalizes >= to <= with swapped args)
80
+ if node_type in (OperatorKind.LE, OperatorKind.LT, OperatorKind.EQUALS):
81
+ _evaluate_comparison(expr, state, param_map, failures)
82
+ return
83
+
84
+ # Fluent expression (boolean)
85
+ if expr.is_fluent_exp():
86
+ grounded = _substitute(expr, param_map)
87
+ try:
88
+ val = state.get_value(grounded)
89
+ if val.is_false():
90
+ failures.append(PreconditionFailure(
91
+ expression=_expr_to_pddl(expr, param_map),
92
+ type="boolean",
93
+ current_values={_expr_to_pddl(expr, param_map): False},
94
+ explanation=f"{_expr_to_pddl(expr, param_map)} is not true in the current state",
95
+ ))
96
+ except Exception:
97
+ failures.append(PreconditionFailure(
98
+ expression=_expr_to_pddl(expr, param_map),
99
+ type="boolean",
100
+ current_values={},
101
+ explanation=f"Could not evaluate {_expr_to_pddl(expr, param_map)}",
102
+ ))
103
+ return
104
+
105
+
106
+ def _evaluate_comparison(
107
+ expr: FNode,
108
+ state,
109
+ param_map: dict,
110
+ failures: list[PreconditionFailure],
111
+ ) -> None:
112
+ """Evaluate a numeric comparison, report failure with deficit."""
113
+ left_expr, right_expr = expr.args[0], expr.args[1]
114
+ left_grounded = _substitute(left_expr, param_map)
115
+ right_grounded = _substitute(right_expr, param_map)
116
+
117
+ try:
118
+ left_val = state.get_value(left_grounded).constant_value()
119
+ right_val = state.get_value(right_grounded).constant_value()
120
+ except Exception:
121
+ failures.append(PreconditionFailure(
122
+ expression=_expr_to_pddl(expr, param_map),
123
+ type="numeric",
124
+ current_values={},
125
+ explanation=f"Could not evaluate numeric comparison",
126
+ ))
127
+ return
128
+
129
+ op = expr.node_type
130
+ satisfied = _check_comparison(op, left_val, right_val)
131
+
132
+ if not satisfied:
133
+ current_values = {}
134
+ if left_expr.is_fluent_exp():
135
+ current_values[_expr_to_pddl(left_expr, param_map)] = left_val
136
+ if right_expr.is_fluent_exp():
137
+ current_values[_expr_to_pddl(right_expr, param_map)] = right_val
138
+
139
+ deficit = _compute_deficit(op, left_val, right_val)
140
+ op_symbol = _op_symbol(op)
141
+
142
+ failures.append(PreconditionFailure(
143
+ expression=_expr_to_pddl(expr, param_map),
144
+ type="numeric",
145
+ current_values=current_values,
146
+ explanation=(
147
+ f"Required: {_expr_to_pddl(left_expr, param_map)} "
148
+ f"{op_symbol} {_expr_to_pddl(right_expr, param_map)}"
149
+ ),
150
+ deficit=deficit,
151
+ ))
152
+
153
+
154
+ def _check_comparison(op: OperatorKind, left: float, right: float) -> bool:
155
+ eps = 1e-6
156
+ if op == OperatorKind.LE:
157
+ return left <= right + eps
158
+ if op == OperatorKind.LT:
159
+ return left < right
160
+ if op == OperatorKind.EQUALS:
161
+ return abs(left - right) < eps
162
+ return False
163
+
164
+
165
+ def _compute_deficit(op: OperatorKind, left: float, right: float) -> float:
166
+ if op == OperatorKind.LE:
167
+ return left - right
168
+ if op == OperatorKind.LT:
169
+ return left - right
170
+ if op == OperatorKind.EQUALS:
171
+ return abs(left - right)
172
+ return 0.0
173
+
174
+
175
+ def _op_symbol(op: OperatorKind) -> str:
176
+ return {
177
+ OperatorKind.LE: "<=",
178
+ OperatorKind.LT: "<",
179
+ OperatorKind.EQUALS: "=",
180
+ }.get(op, "?")
181
+
182
+
183
+ def _evaluate_goal(expr: FNode, state, results: list[GoalResult]) -> None:
184
+ """Evaluate a single goal expression, decomposing ANDs."""
185
+ if expr.node_type == OperatorKind.AND:
186
+ for arg in expr.args:
187
+ _evaluate_goal(arg, state, results)
188
+ return
189
+
190
+ try:
191
+ val = state.get_value(expr)
192
+ if val.is_bool_constant():
193
+ satisfied = val.is_true()
194
+ results.append(GoalResult(
195
+ expression=str(expr),
196
+ satisfied=satisfied,
197
+ current_values={str(expr): satisfied},
198
+ ))
199
+ else:
200
+ # Numeric goal (comparison)
201
+ satisfied = val.is_true() if val.is_bool_constant() else False
202
+ results.append(GoalResult(
203
+ expression=str(expr),
204
+ satisfied=satisfied,
205
+ current_values={str(expr): val.constant_value() if not val.is_bool_constant() else satisfied},
206
+ ))
207
+ except Exception:
208
+ # For complex expressions (comparisons), try to evaluate directly
209
+ satisfied = _evaluate_goal_expr(expr, state)
210
+ results.append(GoalResult(
211
+ expression=str(expr),
212
+ satisfied=satisfied,
213
+ current_values={},
214
+ ))
215
+
216
+
217
+ def _evaluate_goal_expr(expr: FNode, state) -> bool:
218
+ """Evaluate a goal expression that may be a comparison."""
219
+ if expr.node_type in (OperatorKind.LE, OperatorKind.LT, OperatorKind.EQUALS):
220
+ try:
221
+ left_val = state.get_value(expr.args[0]).constant_value()
222
+ right_val = state.get_value(expr.args[1]).constant_value()
223
+ return _check_comparison(expr.node_type, left_val, right_val)
224
+ except Exception:
225
+ return False
226
+
227
+ if expr.node_type == OperatorKind.NOT:
228
+ inner = expr.args[0]
229
+ try:
230
+ val = state.get_value(inner)
231
+ return val.is_false()
232
+ except Exception:
233
+ return False
234
+
235
+ try:
236
+ val = state.get_value(expr)
237
+ return val.is_true()
238
+ except Exception:
239
+ return False
240
+
241
+
242
+ def _substitute(expr: FNode, param_map: dict) -> FNode:
243
+ """Substitute action parameters with grounded objects in an expression."""
244
+ if not param_map:
245
+ return expr
246
+ return expr.substitute(param_map)
247
+
248
+
249
+ def _expr_to_pddl(expr: FNode, param_map: dict | None = None) -> str:
250
+ """Convert an FNode expression to a PDDL-like string."""
251
+ if param_map:
252
+ expr = _substitute(expr, param_map)
253
+ return str(expr)
@@ -0,0 +1,67 @@
1
+ """Data models for PyVAL validation results."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Any, Literal
7
+
8
+
9
+ @dataclass
10
+ class NumericChange:
11
+ before: float
12
+ after: float
13
+
14
+
15
+ @dataclass
16
+ class PreconditionFailure:
17
+ expression: str
18
+ type: Literal["boolean", "numeric"]
19
+ current_values: dict[str, Any]
20
+ explanation: str
21
+ deficit: float | None = None
22
+
23
+
24
+ @dataclass
25
+ class GoalResult:
26
+ expression: str
27
+ satisfied: bool
28
+ current_values: dict[str, Any] = field(default_factory=dict)
29
+
30
+
31
+ @dataclass
32
+ class StateSnapshot:
33
+ step: int
34
+ action: str | None
35
+ boolean_fluents: dict[str, bool] = field(default_factory=dict)
36
+ numeric_fluents: dict[str, float] = field(default_factory=dict)
37
+
38
+
39
+ @dataclass
40
+ class StepResult:
41
+ index: int
42
+ action: str
43
+ status: Literal["OK", "FAILED"]
44
+ boolean_changes: dict[str, bool] = field(default_factory=dict)
45
+ numeric_changes: dict[str, NumericChange] = field(default_factory=dict)
46
+ unsatisfied: list[PreconditionFailure] = field(default_factory=list)
47
+
48
+
49
+ @dataclass
50
+ class ValidationResult:
51
+ status: Literal["VALID", "INVALID", "SYNTAX_ERROR", "STRUCTURE_ERROR"]
52
+ is_valid: bool
53
+ phases: dict = field(default_factory=dict)
54
+ steps: list[StepResult] = field(default_factory=list)
55
+ trajectory: list[StateSnapshot] = field(default_factory=list)
56
+ numeric_trajectory: dict[str, list] = field(default_factory=dict)
57
+ failed_step: int | None = None
58
+ unsatisfied_goals: list[GoalResult] = field(default_factory=list)
59
+ warnings: list[str] = field(default_factory=list)
60
+
61
+ def report(self, verbose: bool = False) -> str:
62
+ from pyval.report_formatter import format_plain_text
63
+ return format_plain_text(self, verbose=verbose)
64
+
65
+ def to_json(self) -> dict:
66
+ from pyval.report_formatter import format_json
67
+ return format_json(self)
@@ -0,0 +1,77 @@
1
+ """Numeric fluent value tracking across plan steps."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from unified_planning.model import Problem
6
+
7
+ from pyval.models import StateSnapshot
8
+
9
+
10
+ class NumericTracker:
11
+ """Tracks numeric (and boolean) fluent values across plan execution steps."""
12
+
13
+ def __init__(
14
+ self, problem: Problem, tracked_fluents: list[str] | None = None
15
+ ):
16
+ self._problem = problem
17
+ self._snapshots: list[StateSnapshot] = []
18
+
19
+ # Identify all fluent expressions from initial values
20
+ self._boolean_exprs = []
21
+ self._numeric_exprs = []
22
+ for fluent_expr, val in problem.initial_values.items():
23
+ if val.is_bool_constant():
24
+ self._boolean_exprs.append(fluent_expr)
25
+ else:
26
+ self._numeric_exprs.append(fluent_expr)
27
+
28
+ # Filter numeric fluents if tracking specific ones
29
+ if tracked_fluents is not None:
30
+ tracked_set = set(tracked_fluents)
31
+ self._numeric_exprs = [
32
+ e for e in self._numeric_exprs
33
+ if _fluent_display_name(e) in tracked_set
34
+ ]
35
+
36
+ def record(self, step: int, action: str | None, state) -> StateSnapshot:
37
+ """Record a state snapshot at a given step."""
38
+ boolean_fluents = {}
39
+ for expr in self._boolean_exprs:
40
+ val = state.get_value(expr)
41
+ boolean_fluents[str(expr)] = val.is_true()
42
+
43
+ numeric_fluents = {}
44
+ for expr in self._numeric_exprs:
45
+ val = state.get_value(expr)
46
+ numeric_fluents[str(expr)] = float(val.constant_value())
47
+
48
+ snapshot = StateSnapshot(
49
+ step=step,
50
+ action=action,
51
+ boolean_fluents=boolean_fluents,
52
+ numeric_fluents=numeric_fluents,
53
+ )
54
+ self._snapshots.append(snapshot)
55
+ return snapshot
56
+
57
+ def get_numeric_trajectory(self) -> dict[str, list[float]]:
58
+ """Return {fluent_name: [val_step0, val_step1, ...]}."""
59
+ if not self._snapshots:
60
+ return {}
61
+
62
+ # Use the first snapshot's numeric keys as the canonical set
63
+ keys = list(self._snapshots[0].numeric_fluents.keys())
64
+ trajectory: dict[str, list[float]] = {k: [] for k in keys}
65
+ for snap in self._snapshots:
66
+ for k in keys:
67
+ trajectory[k].append(snap.numeric_fluents.get(k, 0.0))
68
+ return trajectory
69
+
70
+
71
+ def _fluent_display_name(expr) -> str:
72
+ """Extract a display name like 'fuel truck1' from a fluent expression."""
73
+ if expr.is_fluent_exp():
74
+ name = expr.fluent().name
75
+ args = " ".join(str(a) for a in expr.args)
76
+ return f"{name} {args}".strip() if args else name
77
+ return str(expr)