tasktree 0.0.18__py3-none-any.whl → 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tasktree/__init__.py CHANGED
@@ -13,6 +13,7 @@ from tasktree.graph import (
13
13
  TaskNotFoundError,
14
14
  build_dependency_tree,
15
15
  get_implicit_inputs,
16
+ resolve_dependency_output_references,
16
17
  resolve_execution_order,
17
18
  )
18
19
  from tasktree.hasher import hash_args, hash_task, make_cache_key
@@ -28,6 +29,7 @@ __all__ = [
28
29
  "TaskNotFoundError",
29
30
  "build_dependency_tree",
30
31
  "get_implicit_inputs",
32
+ "resolve_dependency_output_references",
31
33
  "resolve_execution_order",
32
34
  "hash_args",
33
35
  "hash_task",
tasktree/cli.py CHANGED
@@ -14,7 +14,7 @@ from rich.tree import Tree
14
14
 
15
15
  from tasktree import __version__
16
16
  from tasktree.executor import Executor
17
- from tasktree.graph import build_dependency_tree, resolve_execution_order
17
+ from tasktree.graph import build_dependency_tree, resolve_execution_order, resolve_dependency_output_references
18
18
  from tasktree.hasher import hash_task, hash_args
19
19
  from tasktree.parser import Recipe, find_recipe_file, parse_arg_spec, parse_recipe
20
20
  from tasktree.state import StateManager
@@ -110,8 +110,13 @@ def _list_tasks(tasks_file: Optional[str] = None):
110
110
  console.print("[red]No recipe file found (tasktree.yaml, tasktree.yml, tt.yaml, or *.tasks)[/red]")
111
111
  raise typer.Exit(1)
112
112
 
113
- # Calculate maximum task name length for fixed-width column
114
- max_task_name_len = max(len(name) for name in recipe.task_names()) if recipe.task_names() else 0
113
+ # Calculate maximum task name length for fixed-width column (only visible tasks)
114
+ visible_task_names = []
115
+ for name in recipe.task_names():
116
+ task = recipe.get_task(name)
117
+ if task and not task.private:
118
+ visible_task_names.append(name)
119
+ max_task_name_len = max(len(name) for name in visible_task_names) if visible_task_names else 0
115
120
 
116
121
  # Create borderless table with three columns
117
122
  table = Table(show_edge=False, show_header=False, box=None, padding=(0, 2))
@@ -127,6 +132,9 @@ def _list_tasks(tasks_file: Optional[str] = None):
127
132
 
128
133
  for task_name in sorted(recipe.task_names()):
129
134
  task = recipe.get_task(task_name)
135
+ # Skip private tasks in list output
136
+ if task and task.private:
137
+ continue
130
138
  desc = task.desc if task else ""
131
139
  args_formatted = _format_task_arguments(task.args) if task else ""
132
140
 
@@ -344,7 +352,9 @@ def main(
344
352
 
345
353
  console.print("[bold]Available tasks:[/bold]")
346
354
  for task_name in sorted(recipe.task_names()):
347
- console.print(f" - {task_name}")
355
+ task = recipe.get_task(task_name)
356
+ if task and not task.private:
357
+ console.print(f" - {task_name}")
348
358
  console.print("\nUse [cyan]tt --list[/cyan] for detailed information")
349
359
  console.print("Use [cyan]tt <task-name>[/cyan] to run a task")
350
360
 
@@ -437,7 +447,9 @@ def _execute_dynamic_task(args: list[str], force: bool = False, only: bool = Fal
437
447
  console.print(f"[red]Task not found: {task_name}[/red]")
438
448
  console.print("\nAvailable tasks:")
439
449
  for name in sorted(recipe.task_names()):
440
- console.print(f" - {name}")
450
+ task = recipe.get_task(name)
451
+ if task and not task.private:
452
+ console.print(f" - {name}")
441
453
  raise typer.Exit(1)
442
454
 
443
455
  # Parse task arguments
@@ -452,6 +464,10 @@ def _execute_dynamic_task(args: list[str], force: bool = False, only: bool = Fal
452
464
  # This is important for correct state pruning after template substitution
453
465
  execution_order = resolve_execution_order(recipe, task_name, args_dict)
454
466
 
467
+ # Resolve dependency output references in topological order
468
+ # This substitutes {{ dep.*.outputs.* }} templates before execution
469
+ resolve_dependency_output_references(recipe, execution_order)
470
+
455
471
  # Prune state based on tasks that will actually execute (with their specific arguments)
456
472
  # This ensures template-substituted dependencies are handled correctly
457
473
  valid_hashes = set()
tasktree/executor.py CHANGED
@@ -14,7 +14,7 @@ from pathlib import Path
14
14
  from typing import Any
15
15
 
16
16
  from tasktree import docker as docker_module
17
- from tasktree.graph import get_implicit_inputs, resolve_execution_order
17
+ from tasktree.graph import get_implicit_inputs, resolve_execution_order, resolve_dependency_output_references
18
18
  from tasktree.hasher import hash_args, hash_task, make_cache_key
19
19
  from tasktree.parser import Recipe, Task, Environment
20
20
  from tasktree.state import StateManager, TaskState
@@ -433,6 +433,10 @@ class Executor:
433
433
  # Execute task and all dependencies
434
434
  execution_order = resolve_execution_order(self.recipe, task_name, args_dict)
435
435
 
436
+ # Resolve dependency output references in topological order
437
+ # This substitutes {{ dep.*.outputs.* }} templates before execution
438
+ resolve_dependency_output_references(self.recipe, execution_order)
439
+
436
440
  # Single phase: Check and execute incrementally
437
441
  statuses: dict[str, TaskStatus] = {}
438
442
  for name, task_args in execution_order:
@@ -1042,6 +1046,25 @@ class Executor:
1042
1046
 
1043
1047
  return changed_files
1044
1048
 
1049
+ def _expand_output_paths(self, task: Task) -> list[str]:
1050
+ """Extract all output paths from task outputs (both named and anonymous).
1051
+
1052
+ Args:
1053
+ task: Task with outputs to extract
1054
+
1055
+ Returns:
1056
+ List of output path patterns (glob patterns as strings)
1057
+ """
1058
+ paths = []
1059
+ for output in task.outputs:
1060
+ if isinstance(output, str):
1061
+ # Anonymous output: just the path string
1062
+ paths.append(output)
1063
+ elif isinstance(output, dict):
1064
+ # Named output: extract the path value
1065
+ paths.extend(output.values())
1066
+ return paths
1067
+
1045
1068
  def _check_outputs_missing(self, task: Task) -> list[str]:
1046
1069
  """Check if any declared outputs are missing.
1047
1070
 
@@ -1057,7 +1080,10 @@ class Executor:
1057
1080
  missing_patterns = []
1058
1081
  base_path = self.recipe.project_root / task.working_dir
1059
1082
 
1060
- for pattern in task.outputs:
1083
+ # Expand outputs to paths (handles both named and anonymous)
1084
+ output_paths = self._expand_output_paths(task)
1085
+
1086
+ for pattern in output_paths:
1061
1087
  # Check if pattern has any matches
1062
1088
  matches = list(base_path.glob(pattern))
1063
1089
  if not matches:
tasktree/graph.py CHANGED
@@ -306,6 +306,114 @@ def resolve_execution_order(
306
306
  raise CycleError(f"Dependency cycle detected: {e}")
307
307
 
308
308
 
309
+ def resolve_dependency_output_references(
310
+ recipe: Recipe,
311
+ ordered_tasks: list[tuple[str, dict[str, Any]]],
312
+ ) -> None:
313
+ """Resolve {{ dep.<task>.outputs.<name> }} references in topological order.
314
+
315
+ This function walks through tasks in dependency order (dependencies first) and
316
+ resolves any references to dependency outputs in task fields. Templates are
317
+ resolved in place, modifying the Task objects in the recipe.
318
+
319
+ Args:
320
+ recipe: Recipe containing task definitions
321
+ ordered_tasks: List of (task_name, args) tuples in topological order
322
+
323
+ Raises:
324
+ ValueError: If template references cannot be resolved (missing task,
325
+ missing output, task not in dependencies, etc.)
326
+
327
+ Example:
328
+ Given tasks in topological order: [('build', {}), ('deploy', {})]
329
+ If deploy.cmd contains "{{ dep.build.outputs.bundle }}", it will be
330
+ resolved to the actual output path from the build task.
331
+ """
332
+ from tasktree.substitution import substitute_dependency_outputs
333
+
334
+ # Track which tasks have been resolved (for validation)
335
+ resolved_tasks = {}
336
+
337
+ for task_name, task_args in ordered_tasks:
338
+ task = recipe.tasks.get(task_name)
339
+ if task is None:
340
+ continue # Skip if task doesn't exist (shouldn't happen)
341
+
342
+ # Get list of dependency task names for this task
343
+ dep_task_names = []
344
+ for dep_spec in task.deps:
345
+ # Handle both string and dict dependency specs
346
+ if isinstance(dep_spec, str):
347
+ dep_task_names.append(dep_spec)
348
+ elif isinstance(dep_spec, dict):
349
+ # Dict spec: {"task_name": [args]}
350
+ dep_task_names.append(list(dep_spec.keys())[0])
351
+
352
+ # Resolve output references in command
353
+ if task.cmd:
354
+ task.cmd = substitute_dependency_outputs(
355
+ task.cmd,
356
+ task_name,
357
+ dep_task_names,
358
+ resolved_tasks,
359
+ )
360
+
361
+ # Resolve output references in working_dir
362
+ if task.working_dir:
363
+ task.working_dir = substitute_dependency_outputs(
364
+ task.working_dir,
365
+ task_name,
366
+ dep_task_names,
367
+ resolved_tasks,
368
+ )
369
+
370
+ # Resolve output references in outputs
371
+ resolved_outputs = []
372
+ for output in task.outputs:
373
+ if isinstance(output, str):
374
+ resolved_outputs.append(
375
+ substitute_dependency_outputs(
376
+ output,
377
+ task_name,
378
+ dep_task_names,
379
+ resolved_tasks,
380
+ )
381
+ )
382
+ elif isinstance(output, dict):
383
+ # Named output: resolve the path value
384
+ resolved_dict = {}
385
+ for name, path in output.items():
386
+ resolved_dict[name] = substitute_dependency_outputs(
387
+ path,
388
+ task_name,
389
+ dep_task_names,
390
+ resolved_tasks,
391
+ )
392
+ resolved_outputs.append(resolved_dict)
393
+ task.outputs = resolved_outputs
394
+
395
+ # Rebuild output maps after resolution
396
+ task.__post_init__()
397
+
398
+ # Resolve output references in argument defaults
399
+ if task.args:
400
+ for arg_spec in task.args:
401
+ if isinstance(arg_spec, dict):
402
+ # Get arg name and details
403
+ for arg_name, arg_details in arg_spec.items():
404
+ if isinstance(arg_details, dict) and "default" in arg_details:
405
+ if isinstance(arg_details["default"], str):
406
+ arg_details["default"] = substitute_dependency_outputs(
407
+ arg_details["default"],
408
+ task_name,
409
+ dep_task_names,
410
+ resolved_tasks,
411
+ )
412
+
413
+ # Mark this task as resolved for future references
414
+ resolved_tasks[task_name] = task
415
+
416
+
309
417
  def get_implicit_inputs(recipe: Recipe, task: Task) -> list[str]:
310
418
  """Get implicit inputs for a task based on its dependencies.
311
419
 
@@ -336,7 +444,14 @@ def get_implicit_inputs(recipe: Recipe, task: Task) -> list[str]:
336
444
 
337
445
  # If dependency has outputs, inherit them
338
446
  if dep_task.outputs:
339
- implicit_inputs.extend(dep_task.outputs)
447
+ # Extract paths from both named and anonymous outputs
448
+ for output in dep_task.outputs:
449
+ if isinstance(output, str):
450
+ # Anonymous output: just the path
451
+ implicit_inputs.append(output)
452
+ elif isinstance(output, dict):
453
+ # Named output: extract path values
454
+ implicit_inputs.extend(output.values())
340
455
  # If dependency has no outputs, inherit its inputs
341
456
  elif dep_task.inputs:
342
457
  implicit_inputs.extend(dep_task.inputs)
tasktree/hasher.py CHANGED
@@ -37,9 +37,38 @@ def _normalize_choices_lists(args: list[str | dict[str, Any]]) -> list[str | di
37
37
  return normalized_args
38
38
 
39
39
 
40
+ def _serialize_outputs_for_hash(outputs: list[str | dict[str, str]]) -> list[str]:
41
+ """Serialize outputs to consistent list of strings for hashing.
42
+
43
+ Converts both named outputs (dicts) and anonymous outputs (strings)
44
+ into a consistent, sortable format.
45
+
46
+ Args:
47
+ outputs: List of output specifications (strings or dicts)
48
+
49
+ Returns:
50
+ List of serialized output strings in sorted order
51
+
52
+ Example:
53
+ >>> _serialize_outputs_for_hash(["file.txt", {"bundle": "app.js"}])
54
+ ['bundle:app.js', 'file.txt']
55
+ """
56
+ serialized = []
57
+ for output in outputs:
58
+ if isinstance(output, str):
59
+ # Anonymous output: just the path
60
+ serialized.append(output)
61
+ elif isinstance(output, dict):
62
+ # Named output: serialize as "name:path" for each entry
63
+ # Sort dict items for consistent ordering
64
+ for name, path in sorted(output.items()):
65
+ serialized.append(f"{name}:{path}")
66
+ return sorted(serialized)
67
+
68
+
40
69
  def hash_task(
41
70
  cmd: str,
42
- outputs: list[str],
71
+ outputs: list[str | dict[str, str]],
43
72
  working_dir: str,
44
73
  args: list[str | dict[str, Any]],
45
74
  env: str = "",
@@ -49,7 +78,7 @@ def hash_task(
49
78
 
50
79
  Args:
51
80
  cmd: Task command
52
- outputs: Task outputs
81
+ outputs: Task outputs (strings or named dicts)
53
82
  working_dir: Working directory
54
83
  args: Task argument specifications
55
84
  env: Environment name
@@ -60,7 +89,7 @@ def hash_task(
60
89
  """
61
90
  data = {
62
91
  "cmd": cmd,
63
- "outputs": sorted(outputs),
92
+ "outputs": _serialize_outputs_for_hash(outputs),
64
93
  "working_dir": working_dir,
65
94
  "args": sorted(_normalize_choices_lists(args), key=_arg_sort_key),
66
95
  "env": env,
tasktree/parser.py CHANGED
@@ -58,14 +58,19 @@ class Task:
58
58
  desc: str = ""
59
59
  deps: list[str | dict[str, Any]] = field(default_factory=list) # Can be strings or dicts with args
60
60
  inputs: list[str] = field(default_factory=list)
61
- outputs: list[str] = field(default_factory=list)
61
+ outputs: list[str | dict[str, str]] = field(default_factory=list) # Can be strings or dicts with named outputs
62
62
  working_dir: str = ""
63
63
  args: list[str | dict[str, Any]] = field(default_factory=list) # Can be strings or dicts (each dict has single key: arg name)
64
64
  source_file: str = "" # Track which file defined this task
65
65
  env: str = "" # Environment name to use for execution
66
+ private: bool = False # If True, task is hidden from --list output
67
+
68
+ # Internal fields for efficient output lookup (built in __post_init__)
69
+ _output_map: dict[str, str] = field(init=False, default_factory=dict, repr=False) # name → path mapping
70
+ _anonymous_outputs: list[str] = field(init=False, default_factory=list, repr=False) # unnamed outputs
66
71
 
67
72
  def __post_init__(self):
68
- """Ensure lists are always lists."""
73
+ """Ensure lists are always lists and build output maps."""
69
74
  if isinstance(self.deps, str):
70
75
  self.deps = [self.deps]
71
76
  if isinstance(self.inputs, str):
@@ -88,6 +93,45 @@ class Task:
88
93
  f"Arguments must be defined as a list, not a dictionary."
89
94
  )
90
95
 
96
+ # Build output maps for efficient lookup
97
+ self._output_map = {}
98
+ self._anonymous_outputs = []
99
+
100
+ for idx, output in enumerate(self.outputs):
101
+ if isinstance(output, dict):
102
+ # Named output: validate and store
103
+ if len(output) != 1:
104
+ raise ValueError(
105
+ f"Task '{self.name}': Named output at index {idx} must have exactly one key-value pair, got {len(output)}: {output}"
106
+ )
107
+
108
+ name, path = next(iter(output.items()))
109
+
110
+ if not isinstance(path, str):
111
+ raise ValueError(
112
+ f"Task '{self.name}': Named output '{name}' must have a string path, got {type(path).__name__}: {path}"
113
+ )
114
+
115
+ if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', name):
116
+ raise ValueError(
117
+ f"Task '{self.name}': Named output '{name}' must be a valid identifier "
118
+ f"(letters, numbers, underscores, cannot start with number)"
119
+ )
120
+
121
+ if name in self._output_map:
122
+ raise ValueError(
123
+ f"Task '{self.name}': Duplicate output name '{name}' at index {idx}"
124
+ )
125
+
126
+ self._output_map[name] = path
127
+ elif isinstance(output, str):
128
+ # Anonymous output: just store
129
+ self._anonymous_outputs.append(output)
130
+ else:
131
+ raise ValueError(
132
+ f"Task '{self.name}': Output at index {idx} must be a string or dict, got {type(output).__name__}: {output}"
133
+ )
134
+
91
135
 
92
136
  @dataclass
93
137
  class DependencySpec:
@@ -266,9 +310,56 @@ class Recipe:
266
310
  task.desc = substitute_variables(task.desc, self.evaluated_variables)
267
311
  task.working_dir = substitute_variables(task.working_dir, self.evaluated_variables)
268
312
  task.inputs = [substitute_variables(inp, self.evaluated_variables) for inp in task.inputs]
269
- task.outputs = [substitute_variables(out, self.evaluated_variables) for out in task.outputs]
270
- # Substitute in argument default values (in arg spec strings)
271
- task.args = [substitute_variables(arg, self.evaluated_variables) for arg in task.args]
313
+
314
+ # Substitute variables in outputs (handle both string and dict outputs)
315
+ resolved_outputs = []
316
+ for out in task.outputs:
317
+ if isinstance(out, str):
318
+ resolved_outputs.append(substitute_variables(out, self.evaluated_variables))
319
+ elif isinstance(out, dict):
320
+ # Named output: substitute the path value
321
+ resolved_dict = {}
322
+ for name, path in out.items():
323
+ resolved_dict[name] = substitute_variables(path, self.evaluated_variables)
324
+ resolved_outputs.append(resolved_dict)
325
+ else:
326
+ resolved_outputs.append(out)
327
+ task.outputs = resolved_outputs
328
+
329
+ # Rebuild output maps after variable substitution
330
+ task.__post_init__()
331
+
332
+ # Substitute in argument default values (handle both string and dict args)
333
+ resolved_args = []
334
+ for arg in task.args:
335
+ if isinstance(arg, str):
336
+ resolved_args.append(substitute_variables(arg, self.evaluated_variables))
337
+ elif isinstance(arg, dict):
338
+ # Dict arg: substitute in nested values (like default values)
339
+ resolved_dict = {}
340
+ for arg_name, arg_spec in arg.items():
341
+ if isinstance(arg_spec, dict):
342
+ # Substitute in the nested dict values (e.g., default, help, choices)
343
+ resolved_spec = {}
344
+ for key, value in arg_spec.items():
345
+ if isinstance(value, str):
346
+ resolved_spec[key] = substitute_variables(value, self.evaluated_variables)
347
+ elif isinstance(value, list):
348
+ # Handle lists like 'choices'
349
+ resolved_spec[key] = [
350
+ substitute_variables(v, self.evaluated_variables) if isinstance(v, str) else v
351
+ for v in value
352
+ ]
353
+ else:
354
+ resolved_spec[key] = value
355
+ resolved_dict[arg_name] = resolved_spec
356
+ else:
357
+ # Simple value
358
+ resolved_dict[arg_name] = substitute_variables(arg_spec, self.evaluated_variables) if isinstance(arg_spec, str) else arg_spec
359
+ resolved_args.append(resolved_dict)
360
+ else:
361
+ resolved_args.append(arg)
362
+ task.args = resolved_args
272
363
 
273
364
  # Substitute evaluated variables into all environments
274
365
  for env in self.environments.values():
@@ -335,19 +426,15 @@ def find_recipe_file(start_dir: Path | None = None) -> Path | None:
335
426
  while True:
336
427
  candidates = []
337
428
 
338
- # Check for exact filenames first
429
+ # Check for exact filenames first (these are preferred)
339
430
  for filename in ["tasktree.yaml", "tasktree.yml", "tt.yaml"]:
340
431
  recipe_path = current / filename
341
432
  if recipe_path.exists():
342
433
  candidates.append(recipe_path)
343
434
 
344
- # Check for *.tasks files
345
- for tasks_file in current.glob("*.tasks"):
346
- if tasks_file.is_file():
347
- candidates.append(tasks_file)
348
-
435
+ # If we found standard recipe files, use the first one
349
436
  if len(candidates) > 1:
350
- # Multiple recipe files found - ambiguous
437
+ # Multiple standard recipe files found - ambiguous
351
438
  filenames = [c.name for c in candidates]
352
439
  raise ValueError(
353
440
  f"Multiple recipe files found in {current}:\n"
@@ -358,6 +445,25 @@ def find_recipe_file(start_dir: Path | None = None) -> Path | None:
358
445
  elif len(candidates) == 1:
359
446
  return candidates[0]
360
447
 
448
+ # Only check for *.tasks files if no standard recipe files found
449
+ # (*.tasks files are typically imports, not main recipes)
450
+ tasks_files = []
451
+ for tasks_file in current.glob("*.tasks"):
452
+ if tasks_file.is_file():
453
+ tasks_files.append(tasks_file)
454
+
455
+ if len(tasks_files) > 1:
456
+ # Multiple *.tasks files found - ambiguous
457
+ filenames = [t.name for t in tasks_files]
458
+ raise ValueError(
459
+ f"Multiple recipe files found in {current}:\n"
460
+ f" {', '.join(filenames)}\n\n"
461
+ f"Please specify which file to use with --tasks (-T):\n"
462
+ f" tt --tasks {filenames[0]} <task-name>"
463
+ )
464
+ elif len(tasks_files) == 1:
465
+ return tasks_files[0]
466
+
361
467
  # Move to parent directory
362
468
  parent = current.parent
363
469
  if parent == current:
@@ -1373,8 +1479,15 @@ def collect_reachable_variables(
1373
1479
  # Search in outputs
1374
1480
  if task.outputs:
1375
1481
  for output_pattern in task.outputs:
1376
- for match in var_pattern.finditer(output_pattern):
1377
- variables.add(match.group(1))
1482
+ if isinstance(output_pattern, str):
1483
+ for match in var_pattern.finditer(output_pattern):
1484
+ variables.add(match.group(1))
1485
+ elif isinstance(output_pattern, dict):
1486
+ # Named output - check the path value
1487
+ for output_path in output_pattern.values():
1488
+ if isinstance(output_path, str):
1489
+ for match in var_pattern.finditer(output_path):
1490
+ variables.add(match.group(1))
1378
1491
 
1379
1492
  # Search in argument defaults
1380
1493
  if task.args:
@@ -1700,6 +1813,7 @@ def _parse_file(
1700
1813
  args=task_data.get("args", []),
1701
1814
  source_file=str(file_path),
1702
1815
  env=task_data.get("env", ""),
1816
+ private=task_data.get("private", False),
1703
1817
  )
1704
1818
 
1705
1819
  # Check for case-sensitive argument collisions
tasktree/substitution.py CHANGED
@@ -15,6 +15,12 @@ PLACEHOLDER_PATTERN = re.compile(
15
15
  r'\{\{\s*(var|arg|env|tt)\s*\.\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}\}'
16
16
  )
17
17
 
18
+ # Pattern matches: {{ dep.task_name.outputs.output_name }} with optional whitespace
19
+ # Groups: (1) task_name (can include dots for namespacing), (2) output_name (identifier)
20
+ DEP_OUTPUT_PATTERN = re.compile(
21
+ r'\{\{\s*dep\s*\.\s*([a-zA-Z_][a-zA-Z0-9_.-]*)\s*\.\s*outputs\s*\.\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}\}'
22
+ )
23
+
18
24
 
19
25
  def substitute_variables(text: str | dict[str, Any], variables: dict[str, str]) -> str | dict[str, Any]:
20
26
  """Substitute {{ var.name }} placeholders with variable values.
@@ -290,3 +296,79 @@ def substitute_all(text: str, variables: dict[str, str], args: dict[str, Any]) -
290
296
  text = substitute_arguments(text, args)
291
297
  text = substitute_environment(text)
292
298
  return text
299
+
300
+
301
+ def substitute_dependency_outputs(
302
+ text: str,
303
+ current_task_name: str,
304
+ current_task_deps: list[str],
305
+ resolved_tasks: dict[str, Any],
306
+ ) -> str:
307
+ """Substitute {{ dep.<task>.outputs.<name> }} placeholders with dependency output paths.
308
+
309
+ This function resolves references to named outputs from dependency tasks.
310
+ It validates that:
311
+ - The referenced task exists in the resolved_tasks dict
312
+ - The current task lists the referenced task as a dependency
313
+ - The referenced output name exists in the dependency task
314
+
315
+ Args:
316
+ text: Text containing {{ dep.*.outputs.* }} placeholders
317
+ current_task_name: Name of task being resolved (for error messages)
318
+ current_task_deps: List of dependency task names for the current task
319
+ resolved_tasks: Dictionary mapping task names to Task objects (already resolved)
320
+
321
+ Returns:
322
+ Text with all {{ dep.*.outputs.* }} placeholders replaced with output paths
323
+
324
+ Raises:
325
+ ValueError: If referenced task doesn't exist, isn't a dependency,
326
+ or doesn't have the named output
327
+
328
+ Example:
329
+ >>> # Assuming build task has output { bundle: "dist/app.js" }
330
+ >>> substitute_dependency_outputs(
331
+ ... "Deploy {{ dep.build.outputs.bundle }}",
332
+ ... "deploy",
333
+ ... ["build"],
334
+ ... {"build": build_task}
335
+ ... )
336
+ 'Deploy dist/app.js'
337
+ """
338
+ def replacer(match: re.Match) -> str:
339
+ dep_task_name = match.group(1)
340
+ output_name = match.group(2)
341
+
342
+ # Check if dependency task exists in resolved tasks
343
+ if dep_task_name not in resolved_tasks:
344
+ raise ValueError(
345
+ f"Task '{current_task_name}' references output from unknown task '{dep_task_name}'.\n"
346
+ f"Check the task name in {{{{ dep.{dep_task_name}.outputs.{output_name} }}}}"
347
+ )
348
+
349
+ # Check if current task depends on referenced task
350
+ if dep_task_name not in current_task_deps:
351
+ raise ValueError(
352
+ f"Task '{current_task_name}' references output from '{dep_task_name}' "
353
+ f"but does not list it as a dependency.\n"
354
+ f"Add '{dep_task_name}' to the deps list:\n"
355
+ f" deps: [{', '.join(current_task_deps + [dep_task_name])}]"
356
+ )
357
+
358
+ # Get the dependency task
359
+ dep_task = resolved_tasks[dep_task_name]
360
+
361
+ # Look up the named output
362
+ if output_name not in dep_task._output_map:
363
+ available = list(dep_task._output_map.keys())
364
+ available_msg = ", ".join(available) if available else "(none - all outputs are anonymous)"
365
+ raise ValueError(
366
+ f"Task '{current_task_name}' references output '{output_name}' "
367
+ f"from task '{dep_task_name}', but '{dep_task_name}' has no output named '{output_name}'.\n"
368
+ f"Available named outputs in '{dep_task_name}': {available_msg}\n"
369
+ f"Hint: Define named outputs like: outputs: [{{ {output_name}: 'path/to/file' }}]"
370
+ )
371
+
372
+ return dep_task._output_map[output_name]
373
+
374
+ return DEP_OUTPUT_PATTERN.sub(replacer, text)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tasktree
3
- Version: 0.0.18
3
+ Version: 0.0.19
4
4
  Summary: A task automation tool with incremental execution
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: click>=8.1.0
@@ -248,6 +248,7 @@ tasks:
248
248
  outputs: [dist/binary] # Output files (glob patterns)
249
249
  working_dir: subproject/ # Execution directory (default: project root)
250
250
  env: bash-strict # Execution environment (optional)
251
+ private: false # Hide from --list output (default: false)
251
252
  args: # Task parameters
252
253
  - param1 # Simple argument
253
254
  - param2: { type: path, default: "." } # With type and default
@@ -687,6 +688,185 @@ tasks:
687
688
  cmd: echo "All tests passed"
688
689
  ```
689
690
 
691
+ ### Dependency Output References
692
+
693
+ Tasks can reference named outputs from their dependencies, enabling dynamic workflows where build artifacts, generated filenames, and other values are passed between tasks.
694
+
695
+ **Named Outputs:**
696
+
697
+ Tasks can define outputs with names for easy referencing:
698
+
699
+ ```yaml
700
+ tasks:
701
+ build:
702
+ outputs:
703
+ - bundle: "dist/app.js" # Named output
704
+ - sourcemap: "dist/app.js.map" # Named output
705
+ cmd: webpack build
706
+
707
+ deploy:
708
+ deps: [build]
709
+ cmd: |
710
+ echo "Deploying {{ dep.build.outputs.bundle }}"
711
+ scp {{ dep.build.outputs.bundle }} server:/var/www/
712
+ scp {{ dep.build.outputs.sourcemap }} server:/var/www/
713
+ ```
714
+
715
+ **Syntax:**
716
+
717
+ - **Defining named outputs**: `outputs: [{ name: "path/to/file" }]`
718
+ - **Referencing outputs**: `{{ dep.task_name.outputs.output_name }}`
719
+ - **Mixed format**: Can combine named and anonymous outputs in the same task
720
+
721
+ **Examples:**
722
+
723
+ ```yaml
724
+ tasks:
725
+ # Generate a config file
726
+ generate-config:
727
+ outputs:
728
+ - config: "build/config.json"
729
+ cmd: |
730
+ mkdir -p build
731
+ echo '{"version": "1.0.0"}' > build/config.json
732
+
733
+ # Compile using the generated config
734
+ compile:
735
+ deps: [generate-config]
736
+ outputs:
737
+ - binary: "build/app"
738
+ - symbols: "build/app.sym"
739
+ cmd: |
740
+ echo "Using config: {{ dep.generate-config.outputs.config }}"
741
+ gcc -o build/app src/*.c
742
+
743
+ # Package multiple dependency outputs
744
+ package:
745
+ deps: [compile]
746
+ outputs:
747
+ - archive: "dist/app.tar.gz"
748
+ cmd: |
749
+ mkdir -p dist
750
+ tar czf {{ dep.package.outputs.archive }} \
751
+ {{ dep.compile.outputs.binary }} \
752
+ {{ dep.compile.outputs.symbols }}
753
+ ```
754
+
755
+ **Mixed Named and Anonymous Outputs:**
756
+
757
+ Tasks can have both named and anonymous outputs:
758
+
759
+ ```yaml
760
+ tasks:
761
+ build:
762
+ outputs:
763
+ - binary: "build/app" # Named - can be referenced
764
+ - "build/app.debug" # Anonymous - tracked but not referenceable
765
+ - manifest: "build/manifest.json" # Named - can be referenced
766
+ cmd: make all
767
+ ```
768
+
769
+ **Transitive References:**
770
+
771
+ Output references work across multiple levels of dependencies:
772
+
773
+ ```yaml
774
+ tasks:
775
+ base:
776
+ outputs:
777
+ - lib: "out/libbase.a"
778
+ cmd: gcc -c base.c -o out/libbase.a
779
+
780
+ middleware:
781
+ deps: [base]
782
+ outputs:
783
+ - lib: "out/libmiddleware.a"
784
+ cmd: |
785
+ # Reference the base library
786
+ gcc -c middleware.c {{ dep.base.outputs.lib }} -o out/libmiddleware.a
787
+
788
+ app:
789
+ deps: [middleware]
790
+ cmd: |
791
+ # Reference middleware, which transitively used base
792
+ gcc main.c {{ dep.middleware.outputs.lib }} -o app
793
+ ```
794
+
795
+ **Key Behaviors:**
796
+
797
+ - **Template resolution**: Output references are resolved during dependency graph planning (in topological order)
798
+ - **Fail-fast validation**: Errors are caught before execution begins
799
+ - **Clear error messages**: If an output name doesn't exist, you get a list of available named outputs
800
+ - **Backward compatible**: Existing anonymous outputs (`outputs: ["file.txt"]`) work unchanged
801
+ - **Automatic input tracking**: Named outputs are automatically tracked as implicit inputs for dependent tasks
802
+
803
+ **Error Messages:**
804
+
805
+ If you reference a non-existent output:
806
+
807
+ ```yaml
808
+ tasks:
809
+ build:
810
+ outputs:
811
+ - bundle: "dist/app.js"
812
+ cmd: webpack build
813
+
814
+ deploy:
815
+ deps: [build]
816
+ cmd: echo "{{ dep.build.outputs.missing }}" # Error!
817
+ ```
818
+
819
+ You'll get a clear error before execution:
820
+
821
+ ```
822
+ Task 'deploy' references output 'missing' from task 'build',
823
+ but 'build' has no output named 'missing'.
824
+ Available named outputs in 'build': bundle
825
+ Hint: Define named outputs like: outputs: [{ missing: 'path/to/file' }]
826
+ ```
827
+
828
+ **Use Cases:**
829
+
830
+ - **Dynamic artifact names**: Pass generated filenames between tasks
831
+ - **Build metadata**: Reference manifests, checksums, or version files
832
+ - **Multi-stage builds**: Chain compilation steps with specific output references
833
+ - **Deployment pipelines**: Reference exact artifacts to deploy
834
+ - **Configuration propagation**: Pass generated config files through build stages
835
+
836
+
837
+ ### Private Tasks
838
+
839
+ Sometimes you may want to define helper tasks that are useful as dependencies but shouldn't be listed when users run `tt --list`. Mark these tasks as private:
840
+
841
+ ```yaml
842
+ tasks:
843
+ # Private helper task - hidden from --list
844
+ setup-deps:
845
+ private: true
846
+ cmd: |
847
+ npm install
848
+ pip install -r requirements.txt
849
+
850
+ # Public task that uses the helper
851
+ build:
852
+ deps: [setup-deps]
853
+ cmd: npm run build
854
+ ```
855
+
856
+ **Behavior:**
857
+ - `tt --list` shows only public tasks (`build` in this example)
858
+ - Private tasks can still be executed: `tt setup-deps` works
859
+ - Private tasks work normally as dependencies
860
+ - By default, all tasks are public (`private: false`)
861
+
862
+ **Use cases:**
863
+ - Internal helper tasks that shouldn't be run directly
864
+ - Implementation details you want to hide from users
865
+ - Shared setup tasks across multiple public tasks
866
+
867
+ Note that private tasks remain fully functional - they're only hidden from the list view. Users who know the task name can still execute it directly.
868
+
869
+
690
870
  ## Environment Variables
691
871
 
692
872
  Task Tree supports reading environment variables in two ways:
@@ -1028,6 +1208,42 @@ At the start of each invocation, state is checked for invalid task hashes and no
1028
1208
 
1029
1209
  Task Tree provides several command-line options for controlling task execution:
1030
1210
 
1211
+ ### Recipe File Selection
1212
+
1213
+ Task Tree automatically discovers recipe files in the current directory and parent directories. You can also explicitly specify which file to use.
1214
+
1215
+ **Automatic Discovery:**
1216
+
1217
+ Task Tree searches for recipe files in the following order of preference:
1218
+
1219
+ 1. **Standard recipe files** (searched first, in order):
1220
+ - `tasktree.yaml`
1221
+ - `tasktree.yml`
1222
+ - `tt.yaml`
1223
+
1224
+ 2. **Import files** (searched only if no standard files found):
1225
+ - `*.tasks` files (e.g., `build.tasks`, `deploy.tasks`)
1226
+
1227
+ If multiple files of the same priority level exist in the same directory, Task Tree will report an error and ask you to specify which file to use with `--tasks`.
1228
+
1229
+ **Manual Selection:**
1230
+
1231
+ ```bash
1232
+ # Specify a recipe file explicitly
1233
+ tt --tasks build.tasks build
1234
+ tt -T custom-recipe.yaml test
1235
+
1236
+ # Useful when you have multiple recipe files in the same directory
1237
+ tt --tasks ci.yaml deploy
1238
+ ```
1239
+
1240
+ **File Search Behavior:**
1241
+
1242
+ - Task Tree searches **upward** from the current directory to find recipe files
1243
+ - **Standard recipe files** (`.yaml`/`.yml`) are always preferred over `*.tasks` files
1244
+ - `*.tasks` files are typically used for imports and are only used as main recipes if no standard files exist
1245
+ - The `.tasktree-state` file is created in the directory containing the recipe file
1246
+
1031
1247
  ### Execution Control
1032
1248
 
1033
1249
  ```bash
@@ -0,0 +1,14 @@
1
+ tasktree/__init__.py,sha256=N-dZcggJDe4WaloC1MdEh0oMTHlUpBN9AEcAm6leDf4,1167
2
+ tasktree/cli.py,sha256=_GzqQIk2z2Hz17bkTOWhtNW4X4_Ijxn-uEL-WZGt8LM,20858
3
+ tasktree/docker.py,sha256=qvja8G63uAcC73YMVY739egda1_CcBtoqzm0qIJU_Q8,14443
4
+ tasktree/executor.py,sha256=7pzcH2wLWMZPk3hwhzWgz18RVkIPFCKdu23MboWUQs4,45914
5
+ tasktree/graph.py,sha256=9O5LByzMYa8ccedznqKBTb0Xe9N_aajSR1cAcb8zGQE,20366
6
+ tasktree/hasher.py,sha256=o7Akd_AgGkAsnv9biK0AcbhlcqUQ9ne5y_6r4zoFaw0,5493
7
+ tasktree/parser.py,sha256=PVgtGORCpnkb8wcXHFfsyVqDhJ3PwzwCqO3VWiuLQl4,94777
8
+ tasktree/state.py,sha256=Cktl4D8iDZVd55aO2LqVyPrc-BnljkesxxkcMcdcfOY,3541
9
+ tasktree/substitution.py,sha256=3-gdvHbBwPkQPflx3GVSpEEa0vTL_ivdcMIba77gtJc,14225
10
+ tasktree/types.py,sha256=R_YAyO5bMLB6XZnkMRT7VAtlkA_Xx6xu0aIpzQjrBXs,4357
11
+ tasktree-0.0.19.dist-info/METADATA,sha256=KTLp0p1Mkzzvah-Hy15KrXnTcKju5dlQyvDvEdnDOmg,43609
12
+ tasktree-0.0.19.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
+ tasktree-0.0.19.dist-info/entry_points.txt,sha256=lQINlvRYnimvteBbnhH84A9clTg8NnpEjCWqWkqg8KE,40
14
+ tasktree-0.0.19.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- tasktree/__init__.py,sha256=MVmdvKb3JdqLlo0x2_TPGMfgFC0HsDnP79HAzGnFnjI,1081
2
- tasktree/cli.py,sha256=Uhv0RNFrogjvqxBYKYIfxEPd0SdYAIpXH7SPGIxQnmk,20136
3
- tasktree/docker.py,sha256=qvja8G63uAcC73YMVY739egda1_CcBtoqzm0qIJU_Q8,14443
4
- tasktree/executor.py,sha256=QQcABThmof0MLTtwpJKpyqh80hr3YRIqqs3NZ-Ry-Bk,44873
5
- tasktree/graph.py,sha256=yITp71RfhJ7sdC-2zRf89SHYZqQyF3XVAnaqX-XnMdE,15821
6
- tasktree/hasher.py,sha256=0GrnCfwAXnwq_kpnHFFb12B5_2VFNXx6Ng7hTdcCyXo,4415
7
- tasktree/parser.py,sha256=o2AQuk9xeM9JCtyMzDeM0uJBsbJEhzPDxsPg9GwmJt4,88940
8
- tasktree/state.py,sha256=Cktl4D8iDZVd55aO2LqVyPrc-BnljkesxxkcMcdcfOY,3541
9
- tasktree/substitution.py,sha256=qG7SyEHn1PAKteWA0AgA1dUNbJfwQTupCLRq9FvOBD0,10724
10
- tasktree/types.py,sha256=R_YAyO5bMLB6XZnkMRT7VAtlkA_Xx6xu0aIpzQjrBXs,4357
11
- tasktree-0.0.18.dist-info/METADATA,sha256=u70mwnKCeb3sQkn5_B4cf0AOc8mI-FhvdTUbW0hoB3A,37234
12
- tasktree-0.0.18.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
- tasktree-0.0.18.dist-info/entry_points.txt,sha256=lQINlvRYnimvteBbnhH84A9clTg8NnpEjCWqWkqg8KE,40
14
- tasktree-0.0.18.dist-info/RECORD,,