lionherd-core 1.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. lionherd_core/__init__.py +84 -0
  2. lionherd_core/base/__init__.py +30 -0
  3. lionherd_core/base/_utils.py +295 -0
  4. lionherd_core/base/broadcaster.py +128 -0
  5. lionherd_core/base/element.py +300 -0
  6. lionherd_core/base/event.py +322 -0
  7. lionherd_core/base/eventbus.py +112 -0
  8. lionherd_core/base/flow.py +236 -0
  9. lionherd_core/base/graph.py +616 -0
  10. lionherd_core/base/node.py +212 -0
  11. lionherd_core/base/pile.py +811 -0
  12. lionherd_core/base/progression.py +261 -0
  13. lionherd_core/errors.py +104 -0
  14. lionherd_core/libs/__init__.py +2 -0
  15. lionherd_core/libs/concurrency/__init__.py +60 -0
  16. lionherd_core/libs/concurrency/_cancel.py +85 -0
  17. lionherd_core/libs/concurrency/_errors.py +80 -0
  18. lionherd_core/libs/concurrency/_patterns.py +238 -0
  19. lionherd_core/libs/concurrency/_primitives.py +253 -0
  20. lionherd_core/libs/concurrency/_priority_queue.py +135 -0
  21. lionherd_core/libs/concurrency/_resource_tracker.py +66 -0
  22. lionherd_core/libs/concurrency/_task.py +58 -0
  23. lionherd_core/libs/concurrency/_utils.py +61 -0
  24. lionherd_core/libs/schema_handlers/__init__.py +35 -0
  25. lionherd_core/libs/schema_handlers/_function_call_parser.py +122 -0
  26. lionherd_core/libs/schema_handlers/_minimal_yaml.py +88 -0
  27. lionherd_core/libs/schema_handlers/_schema_to_model.py +251 -0
  28. lionherd_core/libs/schema_handlers/_typescript.py +153 -0
  29. lionherd_core/libs/string_handlers/__init__.py +15 -0
  30. lionherd_core/libs/string_handlers/_extract_json.py +65 -0
  31. lionherd_core/libs/string_handlers/_fuzzy_json.py +103 -0
  32. lionherd_core/libs/string_handlers/_string_similarity.py +347 -0
  33. lionherd_core/libs/string_handlers/_to_num.py +63 -0
  34. lionherd_core/ln/__init__.py +45 -0
  35. lionherd_core/ln/_async_call.py +314 -0
  36. lionherd_core/ln/_fuzzy_match.py +166 -0
  37. lionherd_core/ln/_fuzzy_validate.py +151 -0
  38. lionherd_core/ln/_hash.py +141 -0
  39. lionherd_core/ln/_json_dump.py +347 -0
  40. lionherd_core/ln/_list_call.py +110 -0
  41. lionherd_core/ln/_to_dict.py +373 -0
  42. lionherd_core/ln/_to_list.py +190 -0
  43. lionherd_core/ln/_utils.py +156 -0
  44. lionherd_core/lndl/__init__.py +62 -0
  45. lionherd_core/lndl/errors.py +30 -0
  46. lionherd_core/lndl/fuzzy.py +321 -0
  47. lionherd_core/lndl/parser.py +427 -0
  48. lionherd_core/lndl/prompt.py +137 -0
  49. lionherd_core/lndl/resolver.py +323 -0
  50. lionherd_core/lndl/types.py +287 -0
  51. lionherd_core/protocols.py +181 -0
  52. lionherd_core/py.typed +0 -0
  53. lionherd_core/types/__init__.py +46 -0
  54. lionherd_core/types/_sentinel.py +131 -0
  55. lionherd_core/types/base.py +341 -0
  56. lionherd_core/types/operable.py +133 -0
  57. lionherd_core/types/spec.py +313 -0
  58. lionherd_core/types/spec_adapters/__init__.py +10 -0
  59. lionherd_core/types/spec_adapters/_protocol.py +125 -0
  60. lionherd_core/types/spec_adapters/pydantic_field.py +177 -0
  61. lionherd_core-1.0.0a3.dist-info/METADATA +502 -0
  62. lionherd_core-1.0.0a3.dist-info/RECORD +64 -0
  63. lionherd_core-1.0.0a3.dist-info/WHEEL +4 -0
  64. lionherd_core-1.0.0a3.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,156 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import importlib.util
5
+ import uuid
6
+ from datetime import UTC, datetime
7
+ from pathlib import Path as StdPath
8
+ from typing import Any
9
+
10
+ from anyio import Path as AsyncPath
11
+
12
+ __all__ = (
13
+ "acreate_path",
14
+ "get_bins",
15
+ "import_module",
16
+ "is_import_installed",
17
+ "now_utc",
18
+ )
19
+
20
+
21
+ def now_utc() -> datetime:
22
+ """Get current UTC datetime."""
23
+ return datetime.now(UTC)
24
+
25
+
26
+ async def acreate_path(
27
+ directory: StdPath | AsyncPath | str,
28
+ filename: str,
29
+ extension: str | None = None,
30
+ timestamp: bool = False,
31
+ dir_exist_ok: bool = True,
32
+ file_exist_ok: bool = False,
33
+ time_prefix: bool = False,
34
+ timestamp_format: str | None = None,
35
+ random_hash_digits: int = 0,
36
+ timeout: float | None = None,
37
+ ) -> AsyncPath:
38
+ """Generate file path asynchronously with optional timeout.
39
+
40
+ Args:
41
+ directory: Base directory path
42
+ filename: Target filename (may contain subdirectory with /)
43
+ extension: File extension (if filename doesn't have one)
44
+ timestamp: Add timestamp to filename
45
+ dir_exist_ok: Allow existing directories
46
+ file_exist_ok: Allow existing files
47
+ time_prefix: Put timestamp before filename instead of after
48
+ timestamp_format: Custom strftime format for timestamp
49
+ random_hash_digits: Add random hash suffix (0 = disabled)
50
+ timeout: Maximum time in seconds for async I/O operations (None = no timeout)
51
+
52
+ Returns:
53
+ AsyncPath to the created/validated file path
54
+
55
+ Raises:
56
+ ValueError: If filename contains backslash
57
+ FileExistsError: If file exists and file_exist_ok is False
58
+ TimeoutError: If timeout is exceeded
59
+ """
60
+ from lionherd_core.libs.concurrency import move_on_after
61
+
62
+ async def _impl() -> AsyncPath:
63
+ # Use AsyncPath for construction and execution
64
+ nonlocal directory, filename
65
+
66
+ if "/" in filename:
67
+ sub_dir, filename = filename.split("/")[:-1], filename.split("/")[-1]
68
+ directory = AsyncPath(directory) / "/".join(sub_dir)
69
+
70
+ if "\\" in filename:
71
+ raise ValueError("Filename cannot contain directory separators.")
72
+
73
+ # Ensure directory is an AsyncPath
74
+ directory = AsyncPath(directory)
75
+ if "." in filename:
76
+ name, ext = filename.rsplit(".", 1)
77
+ else:
78
+ name = filename
79
+ ext = extension or ""
80
+ ext = f".{ext.lstrip('.')}" if ext else ""
81
+
82
+ if timestamp:
83
+ # datetime.now() is generally non-blocking
84
+ ts_str = datetime.now().strftime(timestamp_format or "%Y%m%d%H%M%S")
85
+ name = f"{ts_str}_{name}" if time_prefix else f"{name}_{ts_str}"
86
+
87
+ if random_hash_digits > 0:
88
+ random_suffix = uuid.uuid4().hex[:random_hash_digits]
89
+ name = f"{name}-{random_suffix}"
90
+
91
+ full_path = directory / f"{name}{ext}"
92
+
93
+ # --- CRITICAL: ASYNC I/O Operations ---
94
+ await full_path.parent.mkdir(parents=True, exist_ok=dir_exist_ok)
95
+
96
+ if await full_path.exists() and not file_exist_ok:
97
+ raise FileExistsError(f"File {full_path} already exists and file_exist_ok is False.")
98
+
99
+ return full_path
100
+
101
+ if timeout is None:
102
+ return await _impl()
103
+
104
+ with move_on_after(timeout) as cancel_scope:
105
+ result = await _impl()
106
+ if cancel_scope.cancelled_caught:
107
+ raise TimeoutError(f"acreate_path timed out after {timeout}s")
108
+ return result
109
+
110
+
111
+ def get_bins(input_: list[str], upper: int) -> list[list[int]]:
112
+ """Organize indices into bins by cumulative length."""
113
+ current = 0
114
+ bins = []
115
+ current_bin = []
116
+ for idx, item in enumerate(input_):
117
+ if current + len(item) < upper:
118
+ current_bin.append(idx)
119
+ current += len(item)
120
+ else:
121
+ bins.append(current_bin)
122
+ current_bin = [idx]
123
+ current = len(item)
124
+ if current_bin:
125
+ bins.append(current_bin)
126
+ return bins
127
+
128
+
129
+ def import_module(
130
+ package_name: str,
131
+ module_name: str | None = None,
132
+ import_name: str | list | None = None,
133
+ ) -> Any:
134
+ """Import module by path."""
135
+ try:
136
+ full_import_path = f"{package_name}.{module_name}" if module_name else package_name
137
+
138
+ if import_name:
139
+ import_name = [import_name] if not isinstance(import_name, list) else import_name
140
+ a = __import__(
141
+ full_import_path,
142
+ fromlist=import_name,
143
+ )
144
+ if len(import_name) == 1:
145
+ return getattr(a, import_name[0])
146
+ return [getattr(a, name) for name in import_name]
147
+ else:
148
+ return __import__(full_import_path)
149
+
150
+ except ImportError as e:
151
+ raise ImportError(f"Failed to import module {full_import_path}: {e}") from e
152
+
153
+
154
+ def is_import_installed(package_name: str) -> bool:
155
+ """Check if package is installed."""
156
+ return importlib.util.find_spec(package_name) is not None
@@ -0,0 +1,62 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ from .errors import (
5
+ AmbiguousMatchError,
6
+ InvalidConstructorError,
7
+ LNDLError,
8
+ MissingFieldError,
9
+ MissingLvarError,
10
+ MissingOutBlockError,
11
+ TypeMismatchError,
12
+ )
13
+ from .fuzzy import parse_lndl_fuzzy
14
+ from .parser import (
15
+ extract_lacts,
16
+ extract_lacts_prefixed,
17
+ extract_lvars,
18
+ extract_lvars_prefixed,
19
+ extract_out_block,
20
+ parse_out_block_array,
21
+ )
22
+ from .prompt import LNDL_SYSTEM_PROMPT, get_lndl_system_prompt
23
+ from .resolver import parse_lndl, resolve_references_prefixed
24
+ from .types import (
25
+ ActionCall,
26
+ LactMetadata,
27
+ LNDLOutput,
28
+ LvarMetadata,
29
+ ParsedConstructor,
30
+ ensure_no_action_calls,
31
+ has_action_calls,
32
+ revalidate_with_action_results,
33
+ )
34
+
35
+ __all__ = (
36
+ "LNDL_SYSTEM_PROMPT",
37
+ "ActionCall",
38
+ "AmbiguousMatchError",
39
+ "InvalidConstructorError",
40
+ "LNDLError",
41
+ "LNDLOutput",
42
+ "LactMetadata",
43
+ "LvarMetadata",
44
+ "MissingFieldError",
45
+ "MissingLvarError",
46
+ "MissingOutBlockError",
47
+ "ParsedConstructor",
48
+ "TypeMismatchError",
49
+ "ensure_no_action_calls",
50
+ "extract_lacts",
51
+ "extract_lacts_prefixed",
52
+ "extract_lvars", # backward compatibility
53
+ "extract_lvars_prefixed",
54
+ "extract_out_block",
55
+ "get_lndl_system_prompt",
56
+ "has_action_calls",
57
+ "parse_lndl",
58
+ "parse_lndl_fuzzy",
59
+ "parse_out_block_array",
60
+ "resolve_references_prefixed",
61
+ "revalidate_with_action_results",
62
+ )
@@ -0,0 +1,30 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+
5
+ class LNDLError(Exception):
6
+ """Base exception for LNDL parsing/validation errors."""
7
+
8
+
9
+ class MissingLvarError(LNDLError):
10
+ """Referenced lvar does not exist."""
11
+
12
+
13
+ class MissingFieldError(LNDLError):
14
+ """Required Spec field missing from OUT{} block."""
15
+
16
+
17
+ class TypeMismatchError(LNDLError):
18
+ """Constructor class doesn't match Spec type."""
19
+
20
+
21
+ class InvalidConstructorError(LNDLError):
22
+ """Cannot parse constructor syntax."""
23
+
24
+
25
+ class MissingOutBlockError(LNDLError):
26
+ """No OUT{} block found in response."""
27
+
28
+
29
+ class AmbiguousMatchError(LNDLError):
30
+ """Multiple fields match with similar similarity scores (tie)."""
@@ -0,0 +1,321 @@
1
+ # Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import logging
5
+
6
+ from lionherd_core.libs.string_handlers._string_similarity import (
7
+ SIMILARITY_ALGO_MAP,
8
+ string_similarity,
9
+ )
10
+ from lionherd_core.types import Operable
11
+
12
+ from .errors import AmbiguousMatchError, MissingFieldError
13
+ from .parser import (
14
+ extract_lacts_prefixed,
15
+ extract_lvars_prefixed,
16
+ extract_out_block,
17
+ parse_out_block_array,
18
+ )
19
+ from .resolver import resolve_references_prefixed
20
+ from .types import LactMetadata, LNDLOutput, LvarMetadata
21
+
22
+ __all__ = ("parse_lndl_fuzzy",)
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ def _correct_name(
28
+ target: str,
29
+ candidates: list[str],
30
+ threshold: float,
31
+ context: str = "name",
32
+ ) -> str:
33
+ """Correct name using fuzzy matching with tie detection.
34
+
35
+ Args:
36
+ target: User-provided name (may have typo)
37
+ candidates: Valid names to match against
38
+ threshold: Similarity threshold (0.0-1.0)
39
+ context: Context for error messages (e.g., "field", "lvar")
40
+
41
+ Returns:
42
+ Corrected name
43
+
44
+ Raises:
45
+ MissingFieldError: No match above threshold
46
+ AmbiguousMatchError: Multiple matches within 0.05 similarity
47
+
48
+ Example:
49
+ >>> _correct_name("titel", ["title", "content"], 0.85, "field")
50
+ "title" # Jaro-Winkler: 0.933
51
+ """
52
+ # Exact match - no fuzzy needed
53
+ if target in candidates:
54
+ return target
55
+
56
+ # Strict mode (threshold=1.0) - exact match only
57
+ if threshold >= 1.0:
58
+ raise MissingFieldError(
59
+ f"{context.capitalize()} '{target}' not found. "
60
+ f"Available: {candidates} (strict mode: exact match required)"
61
+ )
62
+
63
+ # Fuzzy match with tie detection
64
+ result = string_similarity(
65
+ word=target,
66
+ correct_words=candidates,
67
+ algorithm="jaro_winkler",
68
+ threshold=threshold,
69
+ return_most_similar=False, # Get ALL matches for tie detection
70
+ )
71
+
72
+ if not result:
73
+ raise MissingFieldError(
74
+ f"{context.capitalize()} '{target}' not found above threshold {threshold}. "
75
+ f"Available: {candidates}"
76
+ )
77
+
78
+ # Calculate scores for tie detection
79
+ algo_func = SIMILARITY_ALGO_MAP["jaro_winkler"]
80
+ scores = {candidate: algo_func(target, candidate) for candidate in result}
81
+
82
+ # Find max score
83
+ max_score = max(scores.values())
84
+
85
+ # Check for ties (matches within 0.05)
86
+ ties = [k for k, v in scores.items() if abs(v - max_score) < 0.05]
87
+
88
+ if len(ties) > 1:
89
+ scores_str = ", ".join(f"'{k}': {scores[k]:.3f}" for k in ties)
90
+ raise AmbiguousMatchError(
91
+ f"Ambiguous match for {context} '{target}': [{scores_str}]. "
92
+ f"Multiple candidates scored within 0.05. Be more specific."
93
+ )
94
+
95
+ # Single clear winner
96
+ match = result[0]
97
+
98
+ # Log correction
99
+ if match != target:
100
+ logger.debug(f"Fuzzy corrected {context}: '{target}' → '{match}'")
101
+
102
+ return match
103
+
104
+
105
+ def parse_lndl_fuzzy(
106
+ response: str,
107
+ operable: Operable,
108
+ /,
109
+ *,
110
+ threshold: float = 0.85,
111
+ threshold_field: float | None = None,
112
+ threshold_lvar: float | None = None,
113
+ threshold_model: float | None = None,
114
+ threshold_spec: float | None = None,
115
+ ) -> LNDLOutput:
116
+ """Parse LNDL with fuzzy matching (default) or strict mode (threshold=1.0).
117
+
118
+ Args:
119
+ response: Full LLM response containing lvars and OUT{}
120
+ operable: Operable containing allowed specs
121
+ threshold: Global similarity threshold (default: 0.85)
122
+ - 0.85: Fuzzy matching (production-proven)
123
+ - 1.0: Strict mode (exact matches only)
124
+ - 0.7-0.95: Custom tolerance
125
+ threshold_field: Override threshold for field names (default: use threshold)
126
+ threshold_lvar: Override threshold for lvar references (default: use threshold)
127
+ threshold_model: Override threshold for model names (default: use threshold or 0.90)
128
+ threshold_spec: Override threshold for spec names (default: use threshold)
129
+
130
+ Returns:
131
+ LNDLOutput with validated fields
132
+
133
+ Raises:
134
+ MissingFieldError: No match above threshold
135
+ AmbiguousMatchError: Multiple matches within 0.05 similarity
136
+ ValueError: Validation errors from strict resolver
137
+
138
+ Example:
139
+ >>> # Default: Fuzzy matching
140
+ >>> response = '''
141
+ ... <lvar Report.titel title>Good Title</lvar>
142
+ ... OUT{reprot: [titel]}
143
+ ... '''
144
+ >>> parse_lndl_fuzzy(response, operable) # Auto-corrects typos
145
+
146
+ >>> # Strict mode
147
+ >>> parse_lndl_fuzzy(response, operable, threshold=1.0) # Raises error
148
+
149
+ Architecture:
150
+ 1. Parse LNDL (extract lvars and OUT{})
151
+ 2. Pre-correct typos in lvar names, model names, field names, spec names
152
+ 3. Call strict resolver with corrected inputs (zero duplication)
153
+ """
154
+ # Set default thresholds
155
+ threshold_field = threshold_field if threshold_field is not None else threshold
156
+ threshold_lvar = threshold_lvar if threshold_lvar is not None else threshold
157
+ threshold_model = (
158
+ threshold_model if threshold_model is not None else max(threshold, 0.90)
159
+ ) # Stricter for model names
160
+ threshold_spec = threshold_spec if threshold_spec is not None else threshold
161
+
162
+ # 1. Extract namespace-prefixed lvars, lacts, and OUT{} block
163
+ lvars_raw = extract_lvars_prefixed(response)
164
+ lacts_raw = extract_lacts_prefixed(response)
165
+ out_content = extract_out_block(response)
166
+ out_fields_raw = parse_out_block_array(out_content)
167
+
168
+ # Build spec map for O(1) lookups (used in both strict and fuzzy modes)
169
+ spec_map = {spec.base_type.__name__: spec for spec in operable.get_specs()}
170
+ expected_models = set(spec_map.keys())
171
+
172
+ # If threshold is 1.0 (strict mode), validate strictly then call resolver
173
+ if threshold >= 1.0:
174
+ for lvar in lvars_raw.values():
175
+ if lvar.model not in expected_models:
176
+ raise MissingFieldError(
177
+ f"Model '{lvar.model}' not found. "
178
+ f"Available: {list(expected_models)} (strict mode: exact match required)"
179
+ )
180
+
181
+ # Validate field names exist for each model
182
+ for lvar in lvars_raw.values():
183
+ # Get spec for this model (guaranteed to exist if lvar.model in expected_models)
184
+ spec = spec_map[lvar.model]
185
+
186
+ # Check if field exists
187
+ expected_fields = list(spec.base_type.model_fields.keys())
188
+ if lvar.field not in expected_fields:
189
+ raise MissingFieldError(
190
+ f"Field '{lvar.field}' not found in model {lvar.model}. "
191
+ f"Available: {expected_fields} (strict mode: exact match required)"
192
+ )
193
+
194
+ # Validate namespaced action model/field names (strict mode)
195
+ for lact in lacts_raw.values():
196
+ if lact.model: # Namespaced action
197
+ if lact.model not in expected_models:
198
+ raise MissingFieldError(
199
+ f"Action model '{lact.model}' not found. "
200
+ f"Available: {list(expected_models)} (strict mode: exact match required)"
201
+ )
202
+
203
+ # Find spec and validate field
204
+ spec = spec_map[lact.model]
205
+ expected_fields = list(spec.base_type.model_fields.keys())
206
+ if lact.field not in expected_fields:
207
+ raise MissingFieldError(
208
+ f"Action field '{lact.field}' not found in model {lact.model}. "
209
+ f"Available: {expected_fields} (strict mode: exact match required)"
210
+ )
211
+
212
+ # Validate spec names in OUT{} block
213
+ expected_spec_names = list(operable.allowed())
214
+ for spec_name in out_fields_raw:
215
+ if spec_name not in expected_spec_names:
216
+ raise MissingFieldError(
217
+ f"Spec '{spec_name}' not found. "
218
+ f"Available: {expected_spec_names} (strict mode: exact match required)"
219
+ )
220
+
221
+ return resolve_references_prefixed(out_fields_raw, lvars_raw, lacts_raw, operable)
222
+
223
+ # 2. Pre-correct lvar metadata (model names and field names)
224
+ # Collect all unique model names and field names from lvars
225
+ raw_model_names = {lvar.model for lvar in lvars_raw.values()}
226
+ raw_field_names_by_model: dict[str, set[str]] = {}
227
+ for lvar in lvars_raw.values():
228
+ if lvar.model not in raw_field_names_by_model:
229
+ raw_field_names_by_model[lvar.model] = set()
230
+ raw_field_names_by_model[lvar.model].add(lvar.field)
231
+
232
+ # Correct model names in lvars
233
+ model_corrections: dict[str, str] = {} # raw_model → corrected_model
234
+ for raw_model in raw_model_names:
235
+ corrected_model = _correct_name(raw_model, list(expected_models), threshold_model, "model")
236
+ model_corrections[raw_model] = corrected_model
237
+
238
+ # Correct field names in lvars (per model)
239
+ field_corrections: dict[tuple[str, str], str] = {} # (model, raw_field) → corrected_field
240
+ for raw_model, raw_fields in raw_field_names_by_model.items():
241
+ corrected_model = model_corrections[raw_model]
242
+
243
+ # Get expected fields for this model from spec (O(1) lookup)
244
+ # (spec guaranteed to exist: corrected_model from fuzzy match against expected_models)
245
+ spec = spec_map[corrected_model]
246
+ expected_fields = list(spec.base_type.model_fields.keys())
247
+
248
+ for raw_field in raw_fields:
249
+ corrected_field = _correct_name(
250
+ raw_field, expected_fields, threshold_field, f"field (model {corrected_model})"
251
+ )
252
+ field_corrections[(raw_model, raw_field)] = corrected_field
253
+
254
+ # Rebuild lvars with corrected model and field names
255
+ lvars_corrected: dict[str, LvarMetadata] = {}
256
+ for local_name, lvar in lvars_raw.items():
257
+ corrected_model = model_corrections.get(lvar.model, lvar.model)
258
+ corrected_field = field_corrections.get((lvar.model, lvar.field), lvar.field)
259
+
260
+ lvars_corrected[local_name] = LvarMetadata(
261
+ model=corrected_model,
262
+ field=corrected_field,
263
+ local_name=lvar.local_name,
264
+ value=lvar.value,
265
+ )
266
+
267
+ # 2b. Pre-correct lact metadata (model names and field names for namespaced actions)
268
+ # Namespaced actions share the same model/field correction as lvars
269
+ lacts_corrected: dict[str, LactMetadata] = {}
270
+ for local_name, lact in lacts_raw.items():
271
+ if lact.model: # Namespaced action
272
+ # Use existing model_corrections (same as lvars)
273
+ corrected_model = model_corrections.get(lact.model, lact.model)
274
+
275
+ # For field correction, use existing field_corrections
276
+ corrected_field = field_corrections.get((lact.model, lact.field), lact.field)
277
+
278
+ lacts_corrected[local_name] = LactMetadata(
279
+ model=corrected_model,
280
+ field=corrected_field,
281
+ local_name=lact.local_name,
282
+ call=lact.call,
283
+ )
284
+ else: # Direct action - no correction needed
285
+ lacts_corrected[local_name] = lact
286
+
287
+ # 3. Pre-correct OUT{} spec names (keys in out_fields_raw)
288
+ expected_spec_names = list(operable.allowed())
289
+ out_fields_corrected: dict[str, list[str] | str] = {}
290
+
291
+ for raw_spec_name, value in out_fields_raw.items():
292
+ corrected_spec_name = _correct_name(
293
+ raw_spec_name, expected_spec_names, threshold_spec, "spec"
294
+ )
295
+ out_fields_corrected[corrected_spec_name] = value
296
+
297
+ # 4. Pre-correct lvar and lact references in OUT{} arrays
298
+ available_lvar_names = list(lvars_corrected.keys())
299
+ available_lact_names = list(lacts_corrected.keys())
300
+ available_var_or_action_names = available_lvar_names + available_lact_names
301
+ out_fields_final: dict[str, list[str] | str] = {}
302
+
303
+ for spec_name, value in out_fields_corrected.items():
304
+ if isinstance(value, list):
305
+ # Array of variable/action references - correct each reference
306
+ corrected_refs = []
307
+ for raw_ref in value:
308
+ corrected_ref = _correct_name(
309
+ raw_ref,
310
+ available_var_or_action_names,
311
+ threshold_lvar,
312
+ "variable or action reference",
313
+ )
314
+ corrected_refs.append(corrected_ref)
315
+ out_fields_final[spec_name] = corrected_refs
316
+ else:
317
+ # Literal value - no correction needed
318
+ out_fields_final[spec_name] = value
319
+
320
+ # 5. Call strict resolver with corrected inputs (REUSE existing logic)
321
+ return resolve_references_prefixed(out_fields_final, lvars_corrected, lacts_corrected, operable)