tasktree 0.0.21__py3-none-any.whl → 0.0.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tasktree/cli.py +91 -31
- tasktree/docker.py +24 -17
- tasktree/executor.py +263 -211
- tasktree/graph.py +15 -10
- tasktree/hasher.py +13 -6
- tasktree/parser.py +220 -121
- tasktree/state.py +7 -8
- tasktree/substitution.py +27 -15
- tasktree/types.py +29 -12
- {tasktree-0.0.21.dist-info → tasktree-0.0.22.dist-info}/METADATA +13 -15
- tasktree-0.0.22.dist-info/RECORD +14 -0
- tasktree-0.0.21.dist-info/RECORD +0 -14
- {tasktree-0.0.21.dist-info → tasktree-0.0.22.dist-info}/WHEEL +0 -0
- {tasktree-0.0.21.dist-info → tasktree-0.0.22.dist-info}/entry_points.txt +0 -0
tasktree/parser.py
CHANGED
|
@@ -23,6 +23,7 @@ class CircularImportError(Exception):
|
|
|
23
23
|
Raised when a circular import is detected.
|
|
24
24
|
@athena: 935d53bc7d05
|
|
25
25
|
"""
|
|
26
|
+
|
|
26
27
|
pass
|
|
27
28
|
|
|
28
29
|
|
|
@@ -39,7 +40,9 @@ class Environment:
|
|
|
39
40
|
|
|
40
41
|
name: str
|
|
41
42
|
shell: str = "" # Path to shell (required for shell envs, optional for Docker)
|
|
42
|
-
args: list[str] | dict[str, str] = field(
|
|
43
|
+
args: list[str] | dict[str, str] = field(
|
|
44
|
+
default_factory=list
|
|
45
|
+
) # Shell args (list) or Docker build args (dict)
|
|
43
46
|
preamble: str = ""
|
|
44
47
|
# Docker-specific fields (presence of dockerfile indicates Docker environment)
|
|
45
48
|
dockerfile: str = "" # Path to Dockerfile
|
|
@@ -48,7 +51,9 @@ class Environment:
|
|
|
48
51
|
ports: list[str] = field(default_factory=list) # Port mappings
|
|
49
52
|
env_vars: dict[str, str] = field(default_factory=dict) # Environment variables
|
|
50
53
|
working_dir: str = "" # Working directory (container or host)
|
|
51
|
-
extra_args: List[str] = field(
|
|
54
|
+
extra_args: List[str] = field(
|
|
55
|
+
default_factory=list
|
|
56
|
+
) # Any extra arguments to pass to docker
|
|
52
57
|
run_as_root: bool = False # If True, skip user mapping (run as root in container)
|
|
53
58
|
|
|
54
59
|
def __post_init__(self):
|
|
@@ -70,26 +75,46 @@ class Task:
|
|
|
70
75
|
name: str
|
|
71
76
|
cmd: str
|
|
72
77
|
desc: str = ""
|
|
73
|
-
deps: list[str | dict[str, Any]] = field(
|
|
74
|
-
|
|
75
|
-
|
|
78
|
+
deps: list[str | dict[str, Any]] = field(
|
|
79
|
+
default_factory=list
|
|
80
|
+
) # Can be strings or dicts with args
|
|
81
|
+
inputs: list[str | dict[str, str]] = field(
|
|
82
|
+
default_factory=list
|
|
83
|
+
) # Can be strings or dicts with named inputs
|
|
84
|
+
outputs: list[str | dict[str, str]] = field(
|
|
85
|
+
default_factory=list
|
|
86
|
+
) # Can be strings or dicts with named outputs
|
|
76
87
|
working_dir: str = ""
|
|
77
|
-
args: list[str | dict[str, Any]] = field(
|
|
88
|
+
args: list[str | dict[str, Any]] = field(
|
|
89
|
+
default_factory=list
|
|
90
|
+
) # Can be strings or dicts (each dict has single key: arg name)
|
|
78
91
|
source_file: str = "" # Track which file defined this task
|
|
79
92
|
env: str = "" # Environment name to use for execution
|
|
80
93
|
private: bool = False # If True, task is hidden from --list output
|
|
81
94
|
|
|
82
95
|
# Internal fields for efficient output lookup (built in __post_init__)
|
|
83
|
-
_output_map: dict[str, str] = field(
|
|
84
|
-
|
|
96
|
+
_output_map: dict[str, str] = field(
|
|
97
|
+
init=False, default_factory=dict, repr=False
|
|
98
|
+
) # name → path mapping
|
|
99
|
+
_anonymous_outputs: list[str] = field(
|
|
100
|
+
init=False, default_factory=list, repr=False
|
|
101
|
+
) # unnamed outputs
|
|
85
102
|
|
|
86
103
|
# Internal fields for efficient input lookup (built in __post_init__)
|
|
87
|
-
_input_map: dict[str, str] = field(
|
|
88
|
-
|
|
104
|
+
_input_map: dict[str, str] = field(
|
|
105
|
+
init=False, default_factory=dict, repr=False
|
|
106
|
+
) # name → path mapping
|
|
107
|
+
_anonymous_inputs: list[str] = field(
|
|
108
|
+
init=False, default_factory=list, repr=False
|
|
109
|
+
) # unnamed inputs
|
|
89
110
|
|
|
90
111
|
# Internal fields for positional input/output access (built in __post_init__)
|
|
91
|
-
_indexed_inputs: list[str] = field(
|
|
92
|
-
|
|
112
|
+
_indexed_inputs: list[str] = field(
|
|
113
|
+
init=False, default_factory=list, repr=False
|
|
114
|
+
) # all inputs in YAML order
|
|
115
|
+
_indexed_outputs: list[str] = field(
|
|
116
|
+
init=False, default_factory=list, repr=False
|
|
117
|
+
) # all outputs in YAML order
|
|
93
118
|
|
|
94
119
|
def __post_init__(self):
|
|
95
120
|
"""
|
|
@@ -138,7 +163,7 @@ class Task:
|
|
|
138
163
|
f"Task '{self.name}': Named output '{name}' must have a string path, got {type(path).__name__}: {path}"
|
|
139
164
|
)
|
|
140
165
|
|
|
141
|
-
if not re.match(r
|
|
166
|
+
if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name):
|
|
142
167
|
raise ValueError(
|
|
143
168
|
f"Task '{self.name}': Named output '{name}' must be a valid identifier "
|
|
144
169
|
f"(letters, numbers, underscores, cannot start with number)"
|
|
@@ -180,7 +205,7 @@ class Task:
|
|
|
180
205
|
f"Task '{self.name}': Named input '{name}' must have a string path, got {type(path).__name__}: {path}"
|
|
181
206
|
)
|
|
182
207
|
|
|
183
|
-
if not re.match(r
|
|
208
|
+
if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name):
|
|
184
209
|
raise ValueError(
|
|
185
210
|
f"Task '{self.name}': Named input '{name}' must be a valid identifier "
|
|
186
211
|
f"(letters, numbers, underscores, cannot start with number)"
|
|
@@ -219,6 +244,7 @@ class DependencySpec:
|
|
|
219
244
|
for numeric types, to preserve template placeholders.
|
|
220
245
|
@athena: 7b2f8a15d312
|
|
221
246
|
"""
|
|
247
|
+
|
|
222
248
|
task_name: str
|
|
223
249
|
arg_templates: dict[str, str] | None = None
|
|
224
250
|
|
|
@@ -243,6 +269,7 @@ class DependencyInvocation:
|
|
|
243
269
|
args: Dictionary of argument names to values (None if no args specified)
|
|
244
270
|
@athena: 0c023366160b
|
|
245
271
|
"""
|
|
272
|
+
|
|
246
273
|
task_name: str
|
|
247
274
|
args: dict[str, Any] | None = None
|
|
248
275
|
|
|
@@ -272,6 +299,7 @@ class ArgSpec:
|
|
|
272
299
|
choices: List of valid choices for the argument (None if not specified)
|
|
273
300
|
@athena: fcaf20fb1ca2
|
|
274
301
|
"""
|
|
302
|
+
|
|
275
303
|
name: str
|
|
276
304
|
arg_type: str
|
|
277
305
|
default: str | None = None
|
|
@@ -294,11 +322,19 @@ class Recipe:
|
|
|
294
322
|
environments: dict[str, Environment] = field(default_factory=dict)
|
|
295
323
|
default_env: str = "" # Name of default environment
|
|
296
324
|
global_env_override: str = "" # Global environment override (set via CLI --env)
|
|
297
|
-
variables: dict[str, str] = field(
|
|
298
|
-
|
|
299
|
-
|
|
325
|
+
variables: dict[str, str] = field(
|
|
326
|
+
default_factory=dict
|
|
327
|
+
) # Global variables (resolved at parse time) - DEPRECATED, use evaluated_variables
|
|
328
|
+
raw_variables: dict[str, Any] = field(
|
|
329
|
+
default_factory=dict
|
|
330
|
+
) # Raw variable specs from YAML (not yet evaluated)
|
|
331
|
+
evaluated_variables: dict[str, str] = field(
|
|
332
|
+
default_factory=dict
|
|
333
|
+
) # Evaluated variable values (cached after evaluation)
|
|
300
334
|
_variables_evaluated: bool = False # Track if variables have been evaluated
|
|
301
|
-
_original_yaml_data: dict[str, Any] = field(
|
|
335
|
+
_original_yaml_data: dict[str, Any] = field(
|
|
336
|
+
default_factory=dict
|
|
337
|
+
) # Store original YAML data for lazy evaluation context
|
|
302
338
|
|
|
303
339
|
def get_task(self, name: str) -> Task | None:
|
|
304
340
|
"""
|
|
@@ -370,7 +406,9 @@ class Recipe:
|
|
|
370
406
|
# (CLI will provide its own "Task not found" error)
|
|
371
407
|
try:
|
|
372
408
|
reachable_tasks = collect_reachable_tasks(self.tasks, root_task)
|
|
373
|
-
variables_to_eval = collect_reachable_variables(
|
|
409
|
+
variables_to_eval = collect_reachable_variables(
|
|
410
|
+
self.tasks, self.environments, reachable_tasks
|
|
411
|
+
)
|
|
374
412
|
except ValueError:
|
|
375
413
|
# Root task not found - fall back to eager evaluation
|
|
376
414
|
# This allows the recipe to be parsed even with invalid task names
|
|
@@ -387,7 +425,7 @@ class Recipe:
|
|
|
387
425
|
self.raw_variables,
|
|
388
426
|
variables_to_eval,
|
|
389
427
|
self.recipe_path,
|
|
390
|
-
self._original_yaml_data
|
|
428
|
+
self._original_yaml_data,
|
|
391
429
|
)
|
|
392
430
|
|
|
393
431
|
# Also update the deprecated 'variables' field for backward compatibility
|
|
@@ -402,18 +440,24 @@ class Recipe:
|
|
|
402
440
|
|
|
403
441
|
task.cmd = substitute_variables(task.cmd, self.evaluated_variables)
|
|
404
442
|
task.desc = substitute_variables(task.desc, self.evaluated_variables)
|
|
405
|
-
task.working_dir = substitute_variables(
|
|
443
|
+
task.working_dir = substitute_variables(
|
|
444
|
+
task.working_dir, self.evaluated_variables
|
|
445
|
+
)
|
|
406
446
|
|
|
407
447
|
# Substitute variables in inputs (handle both string and dict inputs)
|
|
408
448
|
resolved_inputs = []
|
|
409
449
|
for inp in task.inputs:
|
|
410
450
|
if isinstance(inp, str):
|
|
411
|
-
resolved_inputs.append(
|
|
451
|
+
resolved_inputs.append(
|
|
452
|
+
substitute_variables(inp, self.evaluated_variables)
|
|
453
|
+
)
|
|
412
454
|
elif isinstance(inp, dict):
|
|
413
455
|
# Named input: substitute the path value
|
|
414
456
|
resolved_dict = {}
|
|
415
457
|
for name, path in inp.items():
|
|
416
|
-
resolved_dict[name] = substitute_variables(
|
|
458
|
+
resolved_dict[name] = substitute_variables(
|
|
459
|
+
path, self.evaluated_variables
|
|
460
|
+
)
|
|
417
461
|
resolved_inputs.append(resolved_dict)
|
|
418
462
|
else:
|
|
419
463
|
resolved_inputs.append(inp)
|
|
@@ -423,12 +467,16 @@ class Recipe:
|
|
|
423
467
|
resolved_outputs = []
|
|
424
468
|
for out in task.outputs:
|
|
425
469
|
if isinstance(out, str):
|
|
426
|
-
resolved_outputs.append(
|
|
470
|
+
resolved_outputs.append(
|
|
471
|
+
substitute_variables(out, self.evaluated_variables)
|
|
472
|
+
)
|
|
427
473
|
elif isinstance(out, dict):
|
|
428
474
|
# Named output: substitute the path value
|
|
429
475
|
resolved_dict = {}
|
|
430
476
|
for name, path in out.items():
|
|
431
|
-
resolved_dict[name] = substitute_variables(
|
|
477
|
+
resolved_dict[name] = substitute_variables(
|
|
478
|
+
path, self.evaluated_variables
|
|
479
|
+
)
|
|
432
480
|
resolved_outputs.append(resolved_dict)
|
|
433
481
|
else:
|
|
434
482
|
resolved_outputs.append(out)
|
|
@@ -441,7 +489,9 @@ class Recipe:
|
|
|
441
489
|
resolved_args = []
|
|
442
490
|
for arg in task.args:
|
|
443
491
|
if isinstance(arg, str):
|
|
444
|
-
resolved_args.append(
|
|
492
|
+
resolved_args.append(
|
|
493
|
+
substitute_variables(arg, self.evaluated_variables)
|
|
494
|
+
)
|
|
445
495
|
elif isinstance(arg, dict):
|
|
446
496
|
# Dict arg: substitute in nested values (like default values)
|
|
447
497
|
resolved_dict = {}
|
|
@@ -451,11 +501,19 @@ class Recipe:
|
|
|
451
501
|
resolved_spec = {}
|
|
452
502
|
for key, value in arg_spec.items():
|
|
453
503
|
if isinstance(value, str):
|
|
454
|
-
resolved_spec[key] = substitute_variables(
|
|
504
|
+
resolved_spec[key] = substitute_variables(
|
|
505
|
+
value, self.evaluated_variables
|
|
506
|
+
)
|
|
455
507
|
elif isinstance(value, list):
|
|
456
508
|
# Handle lists like 'choices'
|
|
457
509
|
resolved_spec[key] = [
|
|
458
|
-
|
|
510
|
+
(
|
|
511
|
+
substitute_variables(
|
|
512
|
+
v, self.evaluated_variables
|
|
513
|
+
)
|
|
514
|
+
if isinstance(v, str)
|
|
515
|
+
else v
|
|
516
|
+
)
|
|
459
517
|
for v in value
|
|
460
518
|
]
|
|
461
519
|
else:
|
|
@@ -463,7 +521,11 @@ class Recipe:
|
|
|
463
521
|
resolved_dict[arg_name] = resolved_spec
|
|
464
522
|
else:
|
|
465
523
|
# Simple value
|
|
466
|
-
resolved_dict[arg_name] =
|
|
524
|
+
resolved_dict[arg_name] = (
|
|
525
|
+
substitute_variables(arg_spec, self.evaluated_variables)
|
|
526
|
+
if isinstance(arg_spec, str)
|
|
527
|
+
else arg_spec
|
|
528
|
+
)
|
|
467
529
|
resolved_args.append(resolved_dict)
|
|
468
530
|
else:
|
|
469
531
|
resolved_args.append(arg)
|
|
@@ -472,15 +534,23 @@ class Recipe:
|
|
|
472
534
|
# Substitute evaluated variables into all environments
|
|
473
535
|
for env in self.environments.values():
|
|
474
536
|
if env.preamble:
|
|
475
|
-
env.preamble = substitute_variables(
|
|
537
|
+
env.preamble = substitute_variables(
|
|
538
|
+
env.preamble, self.evaluated_variables
|
|
539
|
+
)
|
|
476
540
|
|
|
477
541
|
# Substitute in volumes
|
|
478
542
|
if env.volumes:
|
|
479
|
-
env.volumes = [
|
|
543
|
+
env.volumes = [
|
|
544
|
+
substitute_variables(vol, self.evaluated_variables)
|
|
545
|
+
for vol in env.volumes
|
|
546
|
+
]
|
|
480
547
|
|
|
481
548
|
# Substitute in ports
|
|
482
549
|
if env.ports:
|
|
483
|
-
env.ports = [
|
|
550
|
+
env.ports = [
|
|
551
|
+
substitute_variables(port, self.evaluated_variables)
|
|
552
|
+
for port in env.ports
|
|
553
|
+
]
|
|
484
554
|
|
|
485
555
|
# Substitute in env_vars values
|
|
486
556
|
if env.env_vars:
|
|
@@ -491,7 +561,9 @@ class Recipe:
|
|
|
491
561
|
|
|
492
562
|
# Substitute in working_dir
|
|
493
563
|
if env.working_dir:
|
|
494
|
-
env.working_dir = substitute_variables(
|
|
564
|
+
env.working_dir = substitute_variables(
|
|
565
|
+
env.working_dir, self.evaluated_variables
|
|
566
|
+
)
|
|
495
567
|
|
|
496
568
|
# Substitute in build args (dict for Docker environments)
|
|
497
569
|
if env.args and isinstance(env.args, dict):
|
|
@@ -595,7 +667,7 @@ def _validate_variable_name(name: str) -> None:
|
|
|
595
667
|
ValueError: If name is not a valid identifier
|
|
596
668
|
@athena: 61f92f7ad278
|
|
597
669
|
"""
|
|
598
|
-
if not re.match(r
|
|
670
|
+
if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name):
|
|
599
671
|
raise ValueError(
|
|
600
672
|
f"Variable name '{name}' is invalid. Names must start with "
|
|
601
673
|
f"letter/underscore and contain only alphanumerics and underscores."
|
|
@@ -616,12 +688,7 @@ def _infer_variable_type(value: Any) -> str:
|
|
|
616
688
|
ValueError: If value type is not supported
|
|
617
689
|
@athena: 335ae24e1504
|
|
618
690
|
"""
|
|
619
|
-
type_map = {
|
|
620
|
-
str: "str",
|
|
621
|
-
int: "int",
|
|
622
|
-
float: "float",
|
|
623
|
-
bool: "bool"
|
|
624
|
-
}
|
|
691
|
+
type_map = {str: "str", int: "int", float: "float", bool: "bool"}
|
|
625
692
|
python_type = type(value)
|
|
626
693
|
if python_type not in type_map:
|
|
627
694
|
raise ValueError(
|
|
@@ -645,7 +712,9 @@ def _is_env_variable_reference(value: Any) -> bool:
|
|
|
645
712
|
return isinstance(value, dict) and "env" in value
|
|
646
713
|
|
|
647
714
|
|
|
648
|
-
def _validate_env_variable_reference(
|
|
715
|
+
def _validate_env_variable_reference(
|
|
716
|
+
var_name: str, value: dict
|
|
717
|
+
) -> tuple[str, str | None]:
|
|
649
718
|
"""
|
|
650
719
|
Validate and extract environment variable name and optional default from reference.
|
|
651
720
|
|
|
@@ -666,7 +735,7 @@ def _validate_env_variable_reference(var_name: str, value: dict) -> tuple[str, s
|
|
|
666
735
|
if invalid_keys:
|
|
667
736
|
raise ValueError(
|
|
668
737
|
f"Invalid environment variable reference in variable '{var_name}'.\n"
|
|
669
|
-
f
|
|
738
|
+
f'Expected: {{ env: VARIABLE_NAME }} or {{ env: VARIABLE_NAME, default: "value" }}\n'
|
|
670
739
|
f"Found invalid keys: {', '.join(invalid_keys)}"
|
|
671
740
|
)
|
|
672
741
|
|
|
@@ -675,7 +744,7 @@ def _validate_env_variable_reference(var_name: str, value: dict) -> tuple[str, s
|
|
|
675
744
|
raise ValueError(
|
|
676
745
|
f"Invalid environment variable reference in variable '{var_name}'.\n"
|
|
677
746
|
f"Missing required 'env' key.\n"
|
|
678
|
-
f
|
|
747
|
+
f'Expected: {{ env: VARIABLE_NAME }} or {{ env: VARIABLE_NAME, default: "value" }}'
|
|
679
748
|
)
|
|
680
749
|
|
|
681
750
|
env_var_name = value["env"]
|
|
@@ -684,12 +753,12 @@ def _validate_env_variable_reference(var_name: str, value: dict) -> tuple[str, s
|
|
|
684
753
|
if not env_var_name or not isinstance(env_var_name, str):
|
|
685
754
|
raise ValueError(
|
|
686
755
|
f"Invalid environment variable reference in variable '{var_name}'.\n"
|
|
687
|
-
f
|
|
756
|
+
f'Expected: {{ env: VARIABLE_NAME }} or {{ env: VARIABLE_NAME, default: "value" }}'
|
|
688
757
|
f"Found: {{ env: {env_var_name!r} }}"
|
|
689
758
|
)
|
|
690
759
|
|
|
691
760
|
# Validate env var name format (allow both uppercase and mixed case for flexibility)
|
|
692
|
-
if not re.match(r
|
|
761
|
+
if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", env_var_name):
|
|
693
762
|
raise ValueError(
|
|
694
763
|
f"Invalid environment variable name '{env_var_name}' in variable '{var_name}'.\n"
|
|
695
764
|
f"Environment variable names must start with a letter or underscore,\n"
|
|
@@ -705,13 +774,15 @@ def _validate_env_variable_reference(var_name: str, value: dict) -> tuple[str, s
|
|
|
705
774
|
f"Invalid default value in variable '{var_name}'.\n"
|
|
706
775
|
f"Environment variable defaults must be strings.\n"
|
|
707
776
|
f"Got: {default!r} (type: {type(default).__name__})\n"
|
|
708
|
-
f
|
|
777
|
+
f'Use a quoted string: {{ env: {env_var_name}, default: "{default}" }}'
|
|
709
778
|
)
|
|
710
779
|
|
|
711
780
|
return env_var_name, default
|
|
712
781
|
|
|
713
782
|
|
|
714
|
-
def _resolve_env_variable(
|
|
783
|
+
def _resolve_env_variable(
|
|
784
|
+
var_name: str, env_var_name: str, default: str | None = None
|
|
785
|
+
) -> str:
|
|
715
786
|
"""
|
|
716
787
|
Resolve environment variable value.
|
|
717
788
|
|
|
@@ -859,7 +930,7 @@ def _resolve_file_variable(var_name: str, filepath: str, resolved_path: Path) ->
|
|
|
859
930
|
|
|
860
931
|
# Read file with UTF-8 error handling
|
|
861
932
|
try:
|
|
862
|
-
content = resolved_path.read_text(encoding=
|
|
933
|
+
content = resolved_path.read_text(encoding="utf-8")
|
|
863
934
|
except PermissionError:
|
|
864
935
|
raise ValueError(
|
|
865
936
|
f"Failed to read file for variable '{var_name}': {filepath}\n"
|
|
@@ -875,7 +946,7 @@ def _resolve_file_variable(var_name: str, filepath: str, resolved_path: Path) ->
|
|
|
875
946
|
)
|
|
876
947
|
|
|
877
948
|
# Strip single trailing newline if present
|
|
878
|
-
if content.endswith(
|
|
949
|
+
if content.endswith("\n"):
|
|
879
950
|
content = content[:-1]
|
|
880
951
|
|
|
881
952
|
return content
|
|
@@ -939,20 +1010,17 @@ def _get_default_shell_and_args() -> tuple[str, list[str]]:
|
|
|
939
1010
|
|
|
940
1011
|
Returns:
|
|
941
1012
|
Tuple of (shell, args) for platform default
|
|
942
|
-
@athena:
|
|
1013
|
+
@athena: 475863b02b48
|
|
943
1014
|
"""
|
|
944
1015
|
is_windows = platform.system() == "Windows"
|
|
945
1016
|
if is_windows:
|
|
946
|
-
return
|
|
1017
|
+
return "cmd", ["/c"]
|
|
947
1018
|
else:
|
|
948
|
-
return
|
|
1019
|
+
return "bash", ["-c"]
|
|
949
1020
|
|
|
950
1021
|
|
|
951
1022
|
def _resolve_eval_variable(
|
|
952
|
-
var_name: str,
|
|
953
|
-
command: str,
|
|
954
|
-
recipe_file_path: Path,
|
|
955
|
-
recipe_data: dict
|
|
1023
|
+
var_name: str, command: str, recipe_file_path: Path, recipe_data: dict
|
|
956
1024
|
) -> str:
|
|
957
1025
|
"""
|
|
958
1026
|
Execute command and capture output for variable value.
|
|
@@ -1033,7 +1101,7 @@ def _resolve_eval_variable(
|
|
|
1033
1101
|
output = result.stdout
|
|
1034
1102
|
|
|
1035
1103
|
# Strip single trailing newline if present
|
|
1036
|
-
if output.endswith(
|
|
1104
|
+
if output.endswith("\n"):
|
|
1037
1105
|
output = output[:-1]
|
|
1038
1106
|
|
|
1039
1107
|
return output
|
|
@@ -1045,7 +1113,7 @@ def _resolve_variable_value(
|
|
|
1045
1113
|
resolved: dict[str, str],
|
|
1046
1114
|
resolution_stack: list[str],
|
|
1047
1115
|
file_path: Path,
|
|
1048
|
-
recipe_data: dict | None = None
|
|
1116
|
+
recipe_data: dict | None = None,
|
|
1049
1117
|
) -> str:
|
|
1050
1118
|
"""
|
|
1051
1119
|
Resolve a single variable value with circular reference detection.
|
|
@@ -1083,6 +1151,7 @@ def _resolve_variable_value(
|
|
|
1083
1151
|
|
|
1084
1152
|
# Still perform variable-in-variable substitution
|
|
1085
1153
|
from tasktree.substitution import substitute_variables
|
|
1154
|
+
|
|
1086
1155
|
try:
|
|
1087
1156
|
resolved_value = substitute_variables(string_value, resolved)
|
|
1088
1157
|
except ValueError as e:
|
|
@@ -1094,7 +1163,9 @@ def _resolve_variable_value(
|
|
|
1094
1163
|
undefined_var = match.group(1)
|
|
1095
1164
|
if undefined_var in resolution_stack:
|
|
1096
1165
|
cycle = " -> ".join(resolution_stack + [undefined_var])
|
|
1097
|
-
raise ValueError(
|
|
1166
|
+
raise ValueError(
|
|
1167
|
+
f"Circular reference detected in variables: {cycle}"
|
|
1168
|
+
)
|
|
1098
1169
|
# Re-raise the original error if not circular
|
|
1099
1170
|
raise
|
|
1100
1171
|
|
|
@@ -1113,6 +1184,7 @@ def _resolve_variable_value(
|
|
|
1113
1184
|
|
|
1114
1185
|
# Still perform variable-in-variable substitution
|
|
1115
1186
|
from tasktree.substitution import substitute_variables
|
|
1187
|
+
|
|
1116
1188
|
try:
|
|
1117
1189
|
resolved_value = substitute_variables(string_value, resolved)
|
|
1118
1190
|
except ValueError as e:
|
|
@@ -1124,7 +1196,9 @@ def _resolve_variable_value(
|
|
|
1124
1196
|
undefined_var = match.group(1)
|
|
1125
1197
|
if undefined_var in resolution_stack:
|
|
1126
1198
|
cycle = " -> ".join(resolution_stack + [undefined_var])
|
|
1127
|
-
raise ValueError(
|
|
1199
|
+
raise ValueError(
|
|
1200
|
+
f"Circular reference detected in variables: {cycle}"
|
|
1201
|
+
)
|
|
1128
1202
|
# Re-raise the original error if not circular
|
|
1129
1203
|
raise
|
|
1130
1204
|
|
|
@@ -1140,6 +1214,7 @@ def _resolve_variable_value(
|
|
|
1140
1214
|
|
|
1141
1215
|
# Still perform variable-in-variable substitution
|
|
1142
1216
|
from tasktree.substitution import substitute_variables
|
|
1217
|
+
|
|
1143
1218
|
try:
|
|
1144
1219
|
resolved_value = substitute_variables(string_value, resolved)
|
|
1145
1220
|
except ValueError as e:
|
|
@@ -1151,7 +1226,9 @@ def _resolve_variable_value(
|
|
|
1151
1226
|
undefined_var = match.group(1)
|
|
1152
1227
|
if undefined_var in resolution_stack:
|
|
1153
1228
|
cycle = " -> ".join(resolution_stack + [undefined_var])
|
|
1154
|
-
raise ValueError(
|
|
1229
|
+
raise ValueError(
|
|
1230
|
+
f"Circular reference detected in variables: {cycle}"
|
|
1231
|
+
)
|
|
1155
1232
|
# Re-raise the original error if not circular
|
|
1156
1233
|
raise
|
|
1157
1234
|
|
|
@@ -1160,6 +1237,7 @@ def _resolve_variable_value(
|
|
|
1160
1237
|
# Validate and infer type
|
|
1161
1238
|
type_name = _infer_variable_type(raw_value)
|
|
1162
1239
|
from tasktree.types import get_click_type
|
|
1240
|
+
|
|
1163
1241
|
validator = get_click_type(type_name)
|
|
1164
1242
|
|
|
1165
1243
|
# Validate and stringify the value
|
|
@@ -1173,6 +1251,7 @@ def _resolve_variable_value(
|
|
|
1173
1251
|
|
|
1174
1252
|
# Substitute any {{ var.name }} references in the string value
|
|
1175
1253
|
from tasktree.substitution import substitute_variables
|
|
1254
|
+
|
|
1176
1255
|
try:
|
|
1177
1256
|
resolved_value = substitute_variables(string_value_str, resolved)
|
|
1178
1257
|
except ValueError as e:
|
|
@@ -1185,7 +1264,9 @@ def _resolve_variable_value(
|
|
|
1185
1264
|
undefined_var = match.group(1)
|
|
1186
1265
|
if undefined_var in resolution_stack:
|
|
1187
1266
|
cycle = " -> ".join(resolution_stack + [undefined_var])
|
|
1188
|
-
raise ValueError(
|
|
1267
|
+
raise ValueError(
|
|
1268
|
+
f"Circular reference detected in variables: {cycle}"
|
|
1269
|
+
)
|
|
1189
1270
|
# Re-raise the original error if not circular
|
|
1190
1271
|
raise
|
|
1191
1272
|
|
|
@@ -1232,8 +1313,7 @@ def _parse_variables_section(data: dict, file_path: Path) -> dict[str, str]:
|
|
|
1232
1313
|
|
|
1233
1314
|
|
|
1234
1315
|
def _expand_variable_dependencies(
|
|
1235
|
-
variable_names: set[str],
|
|
1236
|
-
raw_variables: dict[str, Any]
|
|
1316
|
+
variable_names: set[str], raw_variables: dict[str, Any]
|
|
1237
1317
|
) -> set[str]:
|
|
1238
1318
|
"""
|
|
1239
1319
|
Expand variable set to include all transitively referenced variables.
|
|
@@ -1256,11 +1336,11 @@ def _expand_variable_dependencies(
|
|
|
1256
1336
|
... }
|
|
1257
1337
|
>>> _expand_variable_dependencies({"a"}, raw_vars)
|
|
1258
1338
|
{"a", "b", "c"}
|
|
1259
|
-
@athena:
|
|
1339
|
+
@athena: 98e583b402aa
|
|
1260
1340
|
"""
|
|
1261
1341
|
expanded = set(variable_names)
|
|
1262
1342
|
to_process = list(variable_names)
|
|
1263
|
-
pattern = re.compile(r
|
|
1343
|
+
pattern = re.compile(r"\{\{\s*var\.(\w+)\s*}}")
|
|
1264
1344
|
|
|
1265
1345
|
while to_process:
|
|
1266
1346
|
var_name = to_process.pop(0)
|
|
@@ -1279,8 +1359,8 @@ def _expand_variable_dependencies(
|
|
|
1279
1359
|
expanded.add(referenced_var)
|
|
1280
1360
|
to_process.append(referenced_var)
|
|
1281
1361
|
# Handle { read: filepath } variables - check file contents for variable references
|
|
1282
|
-
elif isinstance(raw_value, dict) and
|
|
1283
|
-
filepath = raw_value[
|
|
1362
|
+
elif isinstance(raw_value, dict) and "read" in raw_value:
|
|
1363
|
+
filepath = raw_value["read"]
|
|
1284
1364
|
# For dependency expansion, we speculatively read files to find variable references
|
|
1285
1365
|
# This is acceptable because file reads are relatively cheap compared to eval commands
|
|
1286
1366
|
try:
|
|
@@ -1288,6 +1368,7 @@ def _expand_variable_dependencies(
|
|
|
1288
1368
|
# Skip if filepath is None or empty (validation error will be caught during evaluation)
|
|
1289
1369
|
if filepath and isinstance(filepath, str):
|
|
1290
1370
|
from pathlib import Path
|
|
1371
|
+
|
|
1291
1372
|
if Path(filepath).exists():
|
|
1292
1373
|
file_content = Path(filepath).read_text()
|
|
1293
1374
|
# Extract variable references from file content
|
|
@@ -1301,8 +1382,12 @@ def _expand_variable_dependencies(
|
|
|
1301
1382
|
# The error will be caught during actual evaluation
|
|
1302
1383
|
pass
|
|
1303
1384
|
# Handle { env: VAR, default: ... } variables - check default value for variable references
|
|
1304
|
-
elif
|
|
1305
|
-
|
|
1385
|
+
elif (
|
|
1386
|
+
isinstance(raw_value, dict)
|
|
1387
|
+
and "env" in raw_value
|
|
1388
|
+
and "default" in raw_value
|
|
1389
|
+
):
|
|
1390
|
+
default_value = raw_value["default"]
|
|
1306
1391
|
# Check if default value contains variable references
|
|
1307
1392
|
if isinstance(default_value, str):
|
|
1308
1393
|
for match in pattern.finditer(default_value):
|
|
@@ -1315,10 +1400,7 @@ def _expand_variable_dependencies(
|
|
|
1315
1400
|
|
|
1316
1401
|
|
|
1317
1402
|
def _evaluate_variable_subset(
|
|
1318
|
-
raw_variables: dict[str, Any],
|
|
1319
|
-
variable_names: set[str],
|
|
1320
|
-
file_path: Path,
|
|
1321
|
-
data: dict
|
|
1403
|
+
raw_variables: dict[str, Any], variable_names: set[str], file_path: Path, data: dict
|
|
1322
1404
|
) -> dict[str, str]:
|
|
1323
1405
|
"""
|
|
1324
1406
|
Evaluate only specified variables from raw specs (for lazy evaluation).
|
|
@@ -1373,7 +1455,9 @@ def _parse_file_with_env(
|
|
|
1373
1455
|
namespace: str | None,
|
|
1374
1456
|
project_root: Path,
|
|
1375
1457
|
import_stack: list[Path] | None = None,
|
|
1376
|
-
) -> tuple[
|
|
1458
|
+
) -> tuple[
|
|
1459
|
+
dict[str, Task], dict[str, Environment], str, dict[str, Any], dict[str, Any]
|
|
1460
|
+
]:
|
|
1377
1461
|
"""
|
|
1378
1462
|
Parse file and extract tasks, environments, and variables.
|
|
1379
1463
|
|
|
@@ -1384,7 +1468,7 @@ def _parse_file_with_env(
|
|
|
1384
1468
|
import_stack: Stack of files being imported (for circular detection)
|
|
1385
1469
|
|
|
1386
1470
|
Returns:
|
|
1387
|
-
Tuple of (tasks, environments, default_env_name, raw_variables,
|
|
1471
|
+
Tuple of (tasks, environments, default_env_name, raw_variables, YAML_data)
|
|
1388
1472
|
Note: Variables are NOT evaluated here - they're stored as raw specs for lazy evaluation
|
|
1389
1473
|
@athena: b2dced506787
|
|
1390
1474
|
"""
|
|
@@ -1502,7 +1586,7 @@ def _parse_file_with_env(
|
|
|
1502
1586
|
env_vars=env_vars,
|
|
1503
1587
|
working_dir=working_dir,
|
|
1504
1588
|
extra_args=extra_args,
|
|
1505
|
-
run_as_root=run_as_root
|
|
1589
|
+
run_as_root=run_as_root,
|
|
1506
1590
|
)
|
|
1507
1591
|
|
|
1508
1592
|
return tasks, environments, default_env, raw_variables, yaml_data
|
|
@@ -1571,7 +1655,7 @@ def collect_reachable_tasks(tasks: dict[str, Task], root_task: str) -> set[str]:
|
|
|
1571
1655
|
def collect_reachable_variables(
|
|
1572
1656
|
tasks: dict[str, Task],
|
|
1573
1657
|
environments: dict[str, Environment],
|
|
1574
|
-
reachable_task_names: set[str]
|
|
1658
|
+
reachable_task_names: set[str],
|
|
1575
1659
|
) -> set[str]:
|
|
1576
1660
|
"""
|
|
1577
1661
|
Extract variable names used by reachable tasks.
|
|
@@ -1591,12 +1675,12 @@ def collect_reachable_variables(
|
|
|
1591
1675
|
>>> task = Task("build", cmd="echo {{ var.version }}")
|
|
1592
1676
|
>>> collect_reachable_variables({"build": task}, {"build"})
|
|
1593
1677
|
{"version"}
|
|
1594
|
-
@athena:
|
|
1678
|
+
@athena: e22e54537f8d
|
|
1595
1679
|
"""
|
|
1596
1680
|
import re
|
|
1597
1681
|
|
|
1598
1682
|
# Pattern to match {{ var.name }}
|
|
1599
|
-
var_pattern = re.compile(r
|
|
1683
|
+
var_pattern = re.compile(r"\{\{\s*var\s*\.\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*}}")
|
|
1600
1684
|
|
|
1601
1685
|
variables = set()
|
|
1602
1686
|
|
|
@@ -1715,9 +1799,7 @@ def collect_reachable_variables(
|
|
|
1715
1799
|
|
|
1716
1800
|
|
|
1717
1801
|
def parse_recipe(
|
|
1718
|
-
recipe_path: Path,
|
|
1719
|
-
project_root: Path | None = None,
|
|
1720
|
-
root_task: str | None = None
|
|
1802
|
+
recipe_path: Path, project_root: Path | None = None, root_task: str | None = None
|
|
1721
1803
|
) -> Recipe:
|
|
1722
1804
|
"""
|
|
1723
1805
|
Parse a recipe file and handle imports recursively.
|
|
@@ -1768,7 +1850,7 @@ def parse_recipe(
|
|
|
1768
1850
|
raw_variables=raw_variables,
|
|
1769
1851
|
evaluated_variables={}, # Empty initially
|
|
1770
1852
|
_variables_evaluated=False,
|
|
1771
|
-
_original_yaml_data=yaml_data
|
|
1853
|
+
_original_yaml_data=yaml_data,
|
|
1772
1854
|
)
|
|
1773
1855
|
|
|
1774
1856
|
# Trigger lazy variable evaluation
|
|
@@ -1801,7 +1883,7 @@ def _parse_file(
|
|
|
1801
1883
|
CircularImportError: If a circular import is detected
|
|
1802
1884
|
FileNotFoundError: If an imported file doesn't exist
|
|
1803
1885
|
ValueError: If task structure is invalid
|
|
1804
|
-
@athena:
|
|
1886
|
+
@athena: 8a903d791c2f
|
|
1805
1887
|
"""
|
|
1806
1888
|
# Initialize import stack if not provided
|
|
1807
1889
|
if import_stack is None:
|
|
@@ -1823,7 +1905,8 @@ def _parse_file(
|
|
|
1823
1905
|
data = {}
|
|
1824
1906
|
|
|
1825
1907
|
tasks: dict[str, Task] = {}
|
|
1826
|
-
|
|
1908
|
+
# TODO: Understand why this is not used.
|
|
1909
|
+
# file_dir = file_path.parent
|
|
1827
1910
|
|
|
1828
1911
|
# Default working directory is the project root (where tt is invoked)
|
|
1829
1912
|
# NOT the directory where the tasks file is located
|
|
@@ -1843,7 +1926,9 @@ def _parse_file(
|
|
|
1843
1926
|
local_import_namespaces.add(child_namespace)
|
|
1844
1927
|
|
|
1845
1928
|
# Build full namespace chain
|
|
1846
|
-
full_namespace =
|
|
1929
|
+
full_namespace = (
|
|
1930
|
+
f"{namespace}.{child_namespace}" if namespace else child_namespace
|
|
1931
|
+
)
|
|
1847
1932
|
|
|
1848
1933
|
# Resolve import path relative to current file's directory
|
|
1849
1934
|
child_path = file_path.parent / child_file
|
|
@@ -1861,15 +1946,16 @@ def _parse_file(
|
|
|
1861
1946
|
tasks.update(nested_tasks)
|
|
1862
1947
|
|
|
1863
1948
|
# Validate top-level keys (only imports, environments, tasks, and variables are allowed)
|
|
1864
|
-
|
|
1949
|
+
valid_top_level_keys = {"imports", "environments", "tasks", "variables"}
|
|
1865
1950
|
|
|
1866
1951
|
# Check if tasks key is missing when there appear to be task definitions at root
|
|
1867
1952
|
# Do this BEFORE checking for unknown keys, to provide better error message
|
|
1868
1953
|
if "tasks" not in data and data:
|
|
1869
1954
|
# Check if there are potential task definitions at root level
|
|
1870
1955
|
potential_tasks = [
|
|
1871
|
-
k
|
|
1872
|
-
|
|
1956
|
+
k
|
|
1957
|
+
for k, v in data.items()
|
|
1958
|
+
if isinstance(v, dict) and k not in valid_top_level_keys
|
|
1873
1959
|
]
|
|
1874
1960
|
|
|
1875
1961
|
if potential_tasks:
|
|
@@ -1879,13 +1965,13 @@ def _parse_file(
|
|
|
1879
1965
|
f"Found these keys at root level: {', '.join(potential_tasks)}\n\n"
|
|
1880
1966
|
f"Did you mean:\n\n"
|
|
1881
1967
|
f"tasks:\n"
|
|
1882
|
-
+
|
|
1883
|
-
"\n cmd: ...\n\n"
|
|
1884
|
-
f"Valid top-level keys are: {', '.join(sorted(
|
|
1968
|
+
+ "\n".join(f" {k}:" for k in potential_tasks)
|
|
1969
|
+
+ "\n cmd: ...\n\n"
|
|
1970
|
+
f"Valid top-level keys are: {', '.join(sorted(valid_top_level_keys))}"
|
|
1885
1971
|
)
|
|
1886
1972
|
|
|
1887
1973
|
# Now check for other invalid top-level keys (non-dict values)
|
|
1888
|
-
invalid_keys = set(data.keys()) -
|
|
1974
|
+
invalid_keys = set(data.keys()) - valid_top_level_keys
|
|
1889
1975
|
if invalid_keys:
|
|
1890
1976
|
raise ValueError(
|
|
1891
1977
|
f"Invalid recipe format in {file_path}\n\n"
|
|
@@ -1903,10 +1989,14 @@ def _parse_file(
|
|
|
1903
1989
|
|
|
1904
1990
|
# Process local tasks
|
|
1905
1991
|
for task_name, task_data in tasks_data.items():
|
|
1906
|
-
|
|
1907
1992
|
if not isinstance(task_data, dict):
|
|
1908
1993
|
raise ValueError(f"Task '{task_name}' must be a dictionary")
|
|
1909
1994
|
|
|
1995
|
+
if "." in task_name:
|
|
1996
|
+
raise ValueError(
|
|
1997
|
+
f"Task name '{task_name}' cannot contain dots (reserved for namespacing)"
|
|
1998
|
+
)
|
|
1999
|
+
|
|
1910
2000
|
if "cmd" not in task_data:
|
|
1911
2001
|
raise ValueError(f"Task '{task_name}' missing required 'cmd' field")
|
|
1912
2002
|
|
|
@@ -1944,19 +2034,19 @@ def _parse_file(
|
|
|
1944
2034
|
elif isinstance(dep, dict):
|
|
1945
2035
|
# Dict dependency with args - rewrite the task name key
|
|
1946
2036
|
rewritten_dep = {}
|
|
1947
|
-
for
|
|
1948
|
-
if "." not in
|
|
2037
|
+
for t_name, args in dep.items():
|
|
2038
|
+
if "." not in t_name:
|
|
1949
2039
|
# Simple name - prefix it
|
|
1950
|
-
rewritten_dep[f"{namespace}.{
|
|
2040
|
+
rewritten_dep[f"{namespace}.{t_name}"] = args
|
|
1951
2041
|
else:
|
|
1952
2042
|
# Check if it starts with a local import namespace
|
|
1953
|
-
dep_root =
|
|
2043
|
+
dep_root = t_name.split(".", 1)[0]
|
|
1954
2044
|
if dep_root in local_import_namespaces:
|
|
1955
2045
|
# Local import reference - prefix it
|
|
1956
|
-
rewritten_dep[f"{namespace}.{
|
|
2046
|
+
rewritten_dep[f"{namespace}.{t_name}"] = args
|
|
1957
2047
|
else:
|
|
1958
2048
|
# External reference - keep as-is
|
|
1959
|
-
rewritten_dep[
|
|
2049
|
+
rewritten_dep[t_name] = args
|
|
1960
2050
|
rewritten_deps.append(rewritten_dep)
|
|
1961
2051
|
else:
|
|
1962
2052
|
# Unknown type - keep as-is
|
|
@@ -2023,7 +2113,7 @@ def _check_case_sensitive_arg_collisions(args: list[str], task_name: str) -> Non
|
|
|
2023
2113
|
f"Warning: Task '{task_name}' has exported arguments that differ only in case: "
|
|
2024
2114
|
f"${other_name} and ${name}. "
|
|
2025
2115
|
f"This may be confusing on case-sensitive systems.",
|
|
2026
|
-
file=sys.stderr
|
|
2116
|
+
file=sys.stderr,
|
|
2027
2117
|
)
|
|
2028
2118
|
else:
|
|
2029
2119
|
seen_lower[lower_name] = name
|
|
@@ -2124,7 +2214,7 @@ def parse_arg_spec(arg_spec: str | dict) -> ArgSpec:
|
|
|
2124
2214
|
is_exported=is_exported,
|
|
2125
2215
|
min_val=None,
|
|
2126
2216
|
max_val=None,
|
|
2127
|
-
choices=None
|
|
2217
|
+
choices=None,
|
|
2128
2218
|
)
|
|
2129
2219
|
|
|
2130
2220
|
|
|
@@ -2176,22 +2266,18 @@ def _parse_arg_dict(arg_name: str, config: dict, is_exported: bool) -> ArgSpec:
|
|
|
2176
2266
|
f"Exported argument '${arg_name}' must have a string default value.\n"
|
|
2177
2267
|
f"Got: {default!r} (type: {type(default).__name__})\n"
|
|
2178
2268
|
f"Exported arguments become environment variables, which are always strings.\n"
|
|
2179
|
-
f
|
|
2269
|
+
f'Use a quoted string: ${arg_name}: {{ default: "{default}" }}'
|
|
2180
2270
|
)
|
|
2181
2271
|
|
|
2182
2272
|
# Validate choices
|
|
2183
2273
|
if choices is not None:
|
|
2184
2274
|
# Validate choices is a list
|
|
2185
2275
|
if not isinstance(choices, list):
|
|
2186
|
-
raise ValueError(
|
|
2187
|
-
f"Argument '{arg_name}': choices must be a list"
|
|
2188
|
-
)
|
|
2276
|
+
raise ValueError(f"Argument '{arg_name}': choices must be a list")
|
|
2189
2277
|
|
|
2190
2278
|
# Validate choices is not empty
|
|
2191
2279
|
if len(choices) == 0:
|
|
2192
|
-
raise ValueError(
|
|
2193
|
-
f"Argument '{arg_name}': choices list cannot be empty"
|
|
2194
|
-
)
|
|
2280
|
+
raise ValueError(f"Argument '{arg_name}': choices list cannot be empty")
|
|
2195
2281
|
|
|
2196
2282
|
# Check for mutual exclusivity with min/max
|
|
2197
2283
|
if min_val is not None or max_val is not None:
|
|
@@ -2220,7 +2306,9 @@ def _parse_arg_dict(arg_name: str, config: dict, is_exported: bool) -> ArgSpec:
|
|
|
2220
2306
|
for value_name, value_type in inferred_types[1:]:
|
|
2221
2307
|
if value_type != first_type:
|
|
2222
2308
|
# Build error message showing the conflicting types
|
|
2223
|
-
type_info = ", ".join(
|
|
2309
|
+
type_info = ", ".join(
|
|
2310
|
+
[f"{name}={vtype}" for name, vtype in inferred_types]
|
|
2311
|
+
)
|
|
2224
2312
|
raise ValueError(
|
|
2225
2313
|
f"Argument '{arg_name}': inconsistent types inferred from min, max, and default.\n"
|
|
2226
2314
|
f"All values must have the same type.\n"
|
|
@@ -2244,7 +2332,10 @@ def _parse_arg_dict(arg_name: str, config: dict, is_exported: bool) -> ArgSpec:
|
|
|
2244
2332
|
)
|
|
2245
2333
|
|
|
2246
2334
|
# Validate min/max are only used with numeric types
|
|
2247
|
-
if (min_val is not None or max_val is not None) and arg_type not in (
|
|
2335
|
+
if (min_val is not None or max_val is not None) and arg_type not in (
|
|
2336
|
+
"int",
|
|
2337
|
+
"float",
|
|
2338
|
+
):
|
|
2248
2339
|
raise ValueError(
|
|
2249
2340
|
f"Argument '{arg_name}': min/max constraints are only supported for 'int' and 'float' types, "
|
|
2250
2341
|
f"not '{arg_type}'"
|
|
@@ -2373,11 +2464,13 @@ def _parse_arg_dict(arg_name: str, config: dict, is_exported: bool) -> ArgSpec:
|
|
|
2373
2464
|
is_exported=is_exported,
|
|
2374
2465
|
min_val=min_val,
|
|
2375
2466
|
max_val=max_val,
|
|
2376
|
-
choices=choices
|
|
2467
|
+
choices=choices,
|
|
2377
2468
|
)
|
|
2378
2469
|
|
|
2379
2470
|
|
|
2380
|
-
def parse_dependency_spec(
|
|
2471
|
+
def parse_dependency_spec(
|
|
2472
|
+
dep_spec: str | dict[str, Any], recipe: Recipe
|
|
2473
|
+
) -> DependencyInvocation:
|
|
2381
2474
|
"""
|
|
2382
2475
|
Parse a dependency specification into a DependencyInvocation.
|
|
2383
2476
|
|
|
@@ -2505,7 +2598,9 @@ def _parse_positional_dependency_args(
|
|
|
2505
2598
|
spec = parsed_specs[i]
|
|
2506
2599
|
if isinstance(value, str):
|
|
2507
2600
|
# Convert string values using type validator
|
|
2508
|
-
click_type = get_click_type(
|
|
2601
|
+
click_type = get_click_type(
|
|
2602
|
+
spec.arg_type, min_val=spec.min_val, max_val=spec.max_val
|
|
2603
|
+
)
|
|
2509
2604
|
args_dict[spec.name] = click_type.convert(value, None, None)
|
|
2510
2605
|
else:
|
|
2511
2606
|
# Value is already typed (e.g., bool, int from YAML)
|
|
@@ -2516,7 +2611,9 @@ def _parse_positional_dependency_args(
|
|
|
2516
2611
|
spec = parsed_specs[i]
|
|
2517
2612
|
if spec.default is not None:
|
|
2518
2613
|
# Defaults in task specs are always strings, convert them
|
|
2519
|
-
click_type = get_click_type(
|
|
2614
|
+
click_type = get_click_type(
|
|
2615
|
+
spec.arg_type, min_val=spec.min_val, max_val=spec.max_val
|
|
2616
|
+
)
|
|
2520
2617
|
args_dict[spec.name] = click_type.convert(spec.default, None, None)
|
|
2521
2618
|
else:
|
|
2522
2619
|
raise ValueError(
|
|
@@ -2561,9 +2658,7 @@ def _parse_named_dependency_args(
|
|
|
2561
2658
|
# Validate all provided arg names exist
|
|
2562
2659
|
for arg_name in args_dict:
|
|
2563
2660
|
if arg_name not in spec_map:
|
|
2564
|
-
raise ValueError(
|
|
2565
|
-
f"Task '{task_name}' has no argument named '{arg_name}'"
|
|
2566
|
-
)
|
|
2661
|
+
raise ValueError(f"Task '{task_name}' has no argument named '{arg_name}'")
|
|
2567
2662
|
|
|
2568
2663
|
# Build normalized args dict with defaults
|
|
2569
2664
|
normalized_args = {}
|
|
@@ -2572,14 +2667,18 @@ def _parse_named_dependency_args(
|
|
|
2572
2667
|
# Use provided value with type conversion (only convert strings)
|
|
2573
2668
|
value = args_dict[spec.name]
|
|
2574
2669
|
if isinstance(value, str):
|
|
2575
|
-
click_type = get_click_type(
|
|
2670
|
+
click_type = get_click_type(
|
|
2671
|
+
spec.arg_type, min_val=spec.min_val, max_val=spec.max_val
|
|
2672
|
+
)
|
|
2576
2673
|
normalized_args[spec.name] = click_type.convert(value, None, None)
|
|
2577
2674
|
else:
|
|
2578
2675
|
# Value is already typed (e.g., bool, int from YAML)
|
|
2579
2676
|
normalized_args[spec.name] = value
|
|
2580
2677
|
elif spec.default is not None:
|
|
2581
2678
|
# Use default value (defaults are always strings in task specs)
|
|
2582
|
-
click_type = get_click_type(
|
|
2679
|
+
click_type = get_click_type(
|
|
2680
|
+
spec.arg_type, min_val=spec.min_val, max_val=spec.max_val
|
|
2681
|
+
)
|
|
2583
2682
|
normalized_args[spec.name] = click_type.convert(spec.default, None, None)
|
|
2584
2683
|
else:
|
|
2585
2684
|
# Required arg not provided
|