atomicguard 0.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. atomicguard/__init__.py +8 -3
  2. atomicguard/application/action_pair.py +7 -1
  3. atomicguard/application/agent.py +46 -6
  4. atomicguard/application/workflow.py +494 -11
  5. atomicguard/domain/__init__.py +4 -1
  6. atomicguard/domain/exceptions.py +19 -0
  7. atomicguard/domain/interfaces.py +137 -6
  8. atomicguard/domain/models.py +120 -6
  9. atomicguard/guards/__init__.py +16 -5
  10. atomicguard/guards/composite/__init__.py +11 -0
  11. atomicguard/guards/dynamic/__init__.py +13 -0
  12. atomicguard/guards/dynamic/test_runner.py +207 -0
  13. atomicguard/guards/interactive/__init__.py +11 -0
  14. atomicguard/guards/static/__init__.py +13 -0
  15. atomicguard/guards/static/imports.py +177 -0
  16. atomicguard/infrastructure/__init__.py +4 -1
  17. atomicguard/infrastructure/llm/__init__.py +3 -1
  18. atomicguard/infrastructure/llm/huggingface.py +180 -0
  19. atomicguard/infrastructure/llm/mock.py +32 -6
  20. atomicguard/infrastructure/llm/ollama.py +40 -17
  21. atomicguard/infrastructure/persistence/__init__.py +7 -1
  22. atomicguard/infrastructure/persistence/checkpoint.py +361 -0
  23. atomicguard/infrastructure/persistence/filesystem.py +69 -5
  24. atomicguard/infrastructure/persistence/memory.py +25 -3
  25. atomicguard/infrastructure/registry.py +126 -0
  26. atomicguard/schemas/__init__.py +142 -0
  27. {atomicguard-0.1.0.dist-info → atomicguard-1.2.0.dist-info}/METADATA +75 -13
  28. atomicguard-1.2.0.dist-info/RECORD +37 -0
  29. {atomicguard-0.1.0.dist-info → atomicguard-1.2.0.dist-info}/WHEEL +1 -1
  30. atomicguard-1.2.0.dist-info/entry_points.txt +4 -0
  31. atomicguard/guards/test_runner.py +0 -176
  32. atomicguard-0.1.0.dist-info/RECORD +0 -27
  33. /atomicguard/guards/{base.py → composite/base.py} +0 -0
  34. /atomicguard/guards/{human.py → interactive/human.py} +0 -0
  35. /atomicguard/guards/{syntax.py → static/syntax.py} +0 -0
  36. {atomicguard-0.1.0.dist-info → atomicguard-1.2.0.dist-info}/licenses/LICENSE +0 -0
  37. {atomicguard-0.1.0.dist-info → atomicguard-1.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,177 @@
1
+ """
2
+ Import validation guard.
3
+
4
+ Pure AST-based guard that validates all used names are imported or defined.
5
+ Does NOT execute code - uses static analysis only.
6
+ """
7
+
8
+ import ast
9
+ import builtins
10
+ from typing import Any
11
+
12
+ from atomicguard.domain.interfaces import GuardInterface
13
+ from atomicguard.domain.models import Artifact, GuardResult
14
+
15
+
16
+ class ImportGuard(GuardInterface):
17
+ """
18
+ Validates that all names used in code are imported or defined.
19
+
20
+ Pure, static guard using AST analysis. Does NOT execute code.
21
+ Single responsibility: verify that referenced names exist.
22
+
23
+ This catches common LLM mistakes like using `pytest.raises()`
24
+ without `import pytest`.
25
+ """
26
+
27
+ # Python builtins that don't need to be imported
28
+ BUILTINS = set(dir(builtins))
29
+
30
+ def validate(self, artifact: Artifact, **_deps: Any) -> GuardResult:
31
+ """
32
+ Validate that all used names are defined or imported.
33
+
34
+ Args:
35
+ artifact: The code artifact to validate
36
+ **_deps: Ignored dependencies
37
+
38
+ Returns:
39
+ GuardResult with pass/fail and feedback
40
+ """
41
+ code = artifact.content
42
+ if not code:
43
+ return GuardResult(passed=False, feedback="No code provided")
44
+
45
+ try:
46
+ tree = ast.parse(code)
47
+ except SyntaxError as e:
48
+ return GuardResult(passed=False, feedback=f"Syntax error: {e}")
49
+
50
+ defined = self._collect_defined_names(tree)
51
+ used = self._collect_used_names(tree)
52
+
53
+ # Names that are used but not defined, imported, or builtin
54
+ undefined = used - defined - self.BUILTINS
55
+
56
+ if undefined:
57
+ sorted_names = ", ".join(sorted(undefined))
58
+ return GuardResult(
59
+ passed=False,
60
+ feedback=f"Undefined names (missing imports?): {sorted_names}",
61
+ )
62
+ return GuardResult(passed=True, feedback="All imports valid")
63
+
64
+ def _collect_defined_names(self, tree: ast.AST) -> set[str]:
65
+ """
66
+ Collect all names defined in the code.
67
+
68
+ Includes:
69
+ - Import statements (import X, from X import Y)
70
+ - Function definitions (def foo)
71
+ - Class definitions (class Bar)
72
+ - Assignments (x = ...)
73
+ - For loop variables (for x in ...)
74
+ - With statement variables (with ... as x)
75
+ - Exception handlers (except E as e)
76
+ - Comprehension variables
77
+ """
78
+ defined: set[str] = set()
79
+
80
+ for node in ast.walk(tree):
81
+ # Import statements
82
+ if isinstance(node, ast.Import):
83
+ for alias in node.names:
84
+ name = alias.asname if alias.asname else alias.name.split(".")[0]
85
+ defined.add(name)
86
+
87
+ elif isinstance(node, ast.ImportFrom):
88
+ for alias in node.names:
89
+ name = alias.asname if alias.asname else alias.name
90
+ if name != "*": # from X import * doesn't define specific names
91
+ defined.add(name)
92
+
93
+ # Function and class definitions
94
+ elif isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef):
95
+ defined.add(node.name)
96
+ # Add parameter names
97
+ for arg in node.args.args:
98
+ defined.add(arg.arg)
99
+ for arg in node.args.posonlyargs:
100
+ defined.add(arg.arg)
101
+ for arg in node.args.kwonlyargs:
102
+ defined.add(arg.arg)
103
+ if node.args.vararg:
104
+ defined.add(node.args.vararg.arg)
105
+ if node.args.kwarg:
106
+ defined.add(node.args.kwarg.arg)
107
+
108
+ elif isinstance(node, ast.ClassDef):
109
+ defined.add(node.name)
110
+
111
+ # Assignments
112
+ elif isinstance(node, ast.Assign):
113
+ for target in node.targets:
114
+ defined.update(self._extract_names_from_target(target))
115
+
116
+ elif isinstance(node, ast.AnnAssign):
117
+ if node.target:
118
+ defined.update(self._extract_names_from_target(node.target))
119
+
120
+ elif isinstance(node, ast.AugAssign):
121
+ defined.update(self._extract_names_from_target(node.target))
122
+
123
+ elif isinstance(node, ast.NamedExpr): # Walrus operator :=
124
+ defined.add(node.target.id)
125
+
126
+ # For loop variables
127
+ elif isinstance(node, ast.For):
128
+ defined.update(self._extract_names_from_target(node.target))
129
+
130
+ # With statement variables
131
+ elif isinstance(node, ast.With):
132
+ for item in node.items:
133
+ if item.optional_vars:
134
+ defined.update(
135
+ self._extract_names_from_target(item.optional_vars)
136
+ )
137
+
138
+ # Exception handlers
139
+ elif isinstance(node, ast.ExceptHandler):
140
+ if node.name:
141
+ defined.add(node.name)
142
+
143
+ # Comprehension variables
144
+ elif isinstance(node, ast.comprehension):
145
+ defined.update(self._extract_names_from_target(node.target))
146
+
147
+ return defined
148
+
149
+ def _extract_names_from_target(self, target: ast.AST) -> set[str]:
150
+ """Extract variable names from an assignment target."""
151
+ names: set[str] = set()
152
+
153
+ if isinstance(target, ast.Name):
154
+ names.add(target.id)
155
+ elif isinstance(target, ast.Tuple | ast.List):
156
+ for elt in target.elts:
157
+ names.update(self._extract_names_from_target(elt))
158
+ elif isinstance(target, ast.Starred):
159
+ names.update(self._extract_names_from_target(target.value))
160
+ # ast.Attribute and ast.Subscript don't define new names
161
+
162
+ return names
163
+
164
+ def _collect_used_names(self, tree: ast.AST) -> set[str]:
165
+ """
166
+ Collect all names used (referenced) in the code.
167
+
168
+ Only collects top-level names (e.g., `pytest` from `pytest.raises()`).
169
+ """
170
+ used: set[str] = set()
171
+
172
+ for node in ast.walk(tree):
173
+ # Check if this is a Name node in Load context (being read, not assigned)
174
+ if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Load):
175
+ used.add(node.id)
176
+
177
+ return used
@@ -1,7 +1,7 @@
1
1
  """
2
2
  Infrastructure layer for the Dual-State Framework.
3
3
 
4
- Contains adapters for external concerns (persistence, LLMs, etc.).
4
+ Contains adapters for external concerns (persistence, LLMs, registry, etc.).
5
5
  """
6
6
 
7
7
  from atomicguard.infrastructure.llm import (
@@ -12,6 +12,7 @@ from atomicguard.infrastructure.persistence import (
12
12
  FilesystemArtifactDAG,
13
13
  InMemoryArtifactDAG,
14
14
  )
15
+ from atomicguard.infrastructure.registry import GeneratorRegistry
15
16
 
16
17
  __all__ = [
17
18
  # Persistence
@@ -20,4 +21,6 @@ __all__ = [
20
21
  # LLM
21
22
  "OllamaGenerator",
22
23
  "MockGenerator",
24
+ # Registry
25
+ "GeneratorRegistry",
23
26
  ]
@@ -2,10 +2,12 @@
2
2
  LLM adapters for artifact generation.
3
3
  """
4
4
 
5
+ from atomicguard.infrastructure.llm.huggingface import HuggingFaceGenerator
5
6
  from atomicguard.infrastructure.llm.mock import MockGenerator
6
7
  from atomicguard.infrastructure.llm.ollama import OllamaGenerator
7
8
 
8
9
  __all__ = [
9
- "OllamaGenerator",
10
+ "HuggingFaceGenerator",
10
11
  "MockGenerator",
12
+ "OllamaGenerator",
11
13
  ]
@@ -0,0 +1,180 @@
1
+ """
2
+ HuggingFace Inference API generator implementation.
3
+
4
+ Connects to HuggingFace Inference Providers via the huggingface_hub
5
+ InferenceClient for chat completion.
6
+ """
7
+
8
+ import os
9
+ import re
10
+ import uuid
11
+ from dataclasses import dataclass
12
+ from datetime import datetime
13
+ from typing import Any, cast
14
+
15
+ from atomicguard.domain.interfaces import GeneratorInterface
16
+ from atomicguard.domain.models import (
17
+ Artifact,
18
+ ArtifactStatus,
19
+ Context,
20
+ ContextSnapshot,
21
+ )
22
+ from atomicguard.domain.prompts import PromptTemplate
23
+
24
+
25
+ @dataclass
26
+ class HuggingFaceGeneratorConfig:
27
+ """Configuration for HuggingFaceGenerator.
28
+
29
+ This typed config ensures unknown fields are rejected at construction time.
30
+ """
31
+
32
+ model: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
33
+ api_key: str | None = None # Auto-detects from HF_TOKEN env var
34
+ provider: str | None = None # e.g. "auto", "hf-inference", "together"
35
+ timeout: float = 120.0
36
+ temperature: float = 0.7
37
+ max_tokens: int = 4096
38
+
39
+
40
+ class HuggingFaceGenerator(GeneratorInterface):
41
+ """Connects to HuggingFace Inference API using huggingface_hub."""
42
+
43
+ config_class = HuggingFaceGeneratorConfig
44
+
45
+ def __init__(
46
+ self, config: HuggingFaceGeneratorConfig | None = None, **kwargs: Any
47
+ ) -> None:
48
+ """
49
+ Args:
50
+ config: Typed configuration object (preferred)
51
+ **kwargs: Legacy kwargs for backward compatibility (deprecated)
52
+ """
53
+ if config is None:
54
+ config = HuggingFaceGeneratorConfig(**kwargs)
55
+
56
+ try:
57
+ from huggingface_hub import InferenceClient
58
+ except ImportError as err:
59
+ raise ImportError(
60
+ "huggingface_hub library required: pip install huggingface_hub"
61
+ ) from err
62
+
63
+ api_key = config.api_key
64
+ if api_key is None:
65
+ api_key = os.environ.get("HF_TOKEN")
66
+ if not api_key:
67
+ raise ValueError(
68
+ "HuggingFace API key required: set HF_TOKEN environment "
69
+ "variable or pass api_key in config"
70
+ )
71
+
72
+ client_kwargs: dict[str, Any] = {
73
+ "api_key": api_key,
74
+ "timeout": config.timeout,
75
+ }
76
+ if config.provider is not None:
77
+ client_kwargs["provider"] = config.provider
78
+
79
+ self._model = config.model
80
+ self._client = InferenceClient(**client_kwargs)
81
+ self._temperature = config.temperature
82
+ self._max_tokens = config.max_tokens
83
+ self._version_counter = 0
84
+
85
+ def generate(
86
+ self,
87
+ context: Context,
88
+ template: PromptTemplate | None = None,
89
+ action_pair_id: str = "unknown",
90
+ workflow_id: str = "unknown",
91
+ ) -> Artifact:
92
+ """Generate an artifact based on context."""
93
+ if template:
94
+ prompt = template.render(context)
95
+ else:
96
+ prompt = self._build_basic_prompt(context)
97
+
98
+ messages: list[dict[str, str]] = [
99
+ {
100
+ "role": "system",
101
+ "content": (
102
+ "You are a Python programming assistant. "
103
+ "Provide complete, runnable code in a markdown block:\n"
104
+ "```python\n# code\n```"
105
+ ),
106
+ },
107
+ {"role": "user", "content": prompt},
108
+ ]
109
+
110
+ response = self._client.chat_completion(
111
+ messages=cast(Any, messages),
112
+ model=self._model,
113
+ temperature=self._temperature,
114
+ max_tokens=self._max_tokens,
115
+ )
116
+
117
+ content = response.choices[0].message.content or ""
118
+ code = self._extract_code(content)
119
+
120
+ self._version_counter += 1
121
+
122
+ return Artifact(
123
+ artifact_id=str(uuid.uuid4()),
124
+ workflow_id=workflow_id,
125
+ content=code,
126
+ previous_attempt_id=None,
127
+ parent_action_pair_id=None,
128
+ action_pair_id=action_pair_id,
129
+ created_at=datetime.now().isoformat(),
130
+ attempt_number=self._version_counter,
131
+ status=ArtifactStatus.PENDING,
132
+ guard_result=None,
133
+ feedback="",
134
+ context=ContextSnapshot(
135
+ workflow_id=workflow_id,
136
+ specification=context.specification,
137
+ constraints=context.ambient.constraints,
138
+ feedback_history=(),
139
+ dependency_artifacts=context.dependency_artifacts,
140
+ ),
141
+ )
142
+
143
+ def _extract_code(self, content: str) -> str:
144
+ """Extract Python code from response."""
145
+ if not content or content.isspace():
146
+ return ""
147
+
148
+ # Try python block
149
+ match = re.search(r"```python\n(.*?)\n```", content, re.DOTALL)
150
+ if match:
151
+ return match.group(1)
152
+
153
+ # Try generic block
154
+ match = re.search(r"```\n(.*?)\n```", content, re.DOTALL)
155
+ if match:
156
+ return match.group(1)
157
+
158
+ # Try first def/import/class
159
+ match = re.search(r"^(def |import |class )", content, re.MULTILINE)
160
+ if match:
161
+ return content[match.start() :]
162
+
163
+ # No code block found - return empty to trigger guard validation failure
164
+ return ""
165
+
166
+ def _build_basic_prompt(self, context: Context) -> str:
167
+ """Build a basic prompt from context."""
168
+ parts = [context.specification]
169
+
170
+ if context.current_artifact:
171
+ parts.append(f"\nPrevious attempt:\n{context.current_artifact}")
172
+
173
+ if context.feedback_history:
174
+ feedback_text = "\n".join(
175
+ f"Attempt {i + 1} feedback: {f}"
176
+ for i, (_, f) in enumerate(context.feedback_history)
177
+ )
178
+ parts.append(f"\nFeedback history:\n{feedback_text}")
179
+
180
+ return "\n".join(parts)
@@ -5,26 +5,49 @@ Returns predefined responses in sequence.
5
5
  """
6
6
 
7
7
  import uuid
8
+ from dataclasses import dataclass, field
8
9
  from datetime import datetime
10
+ from typing import Any
9
11
 
10
12
  from atomicguard.domain.interfaces import GeneratorInterface
11
13
  from atomicguard.domain.models import Artifact, ArtifactStatus, Context, ContextSnapshot
12
14
  from atomicguard.domain.prompts import PromptTemplate
13
15
 
14
16
 
17
+ @dataclass
18
+ class MockGeneratorConfig:
19
+ """Configuration for MockGenerator.
20
+
21
+ This typed config ensures unknown fields are rejected at construction time.
22
+ """
23
+
24
+ responses: list[str] = field(default_factory=list)
25
+
26
+
15
27
  class MockGenerator(GeneratorInterface):
16
28
  """Returns predefined responses for testing."""
17
29
 
18
- def __init__(self, responses: list[str]):
30
+ config_class = MockGeneratorConfig
31
+
32
+ def __init__(self, config: MockGeneratorConfig | None = None, **kwargs: Any):
19
33
  """
20
34
  Args:
21
- responses: List of response strings to return in sequence
35
+ config: Typed configuration object (preferred)
36
+ **kwargs: Legacy kwargs for backward compatibility (deprecated)
22
37
  """
23
- self._responses = responses
38
+ # Support both config object and legacy kwargs
39
+ if config is None:
40
+ config = MockGeneratorConfig(**kwargs)
41
+
42
+ self._responses = config.responses
24
43
  self._call_count = 0
25
44
 
26
45
  def generate(
27
- self, _context: Context, _template: PromptTemplate | None = None
46
+ self,
47
+ _context: Context,
48
+ _template: PromptTemplate | None = None,
49
+ action_pair_id: str = "unknown",
50
+ workflow_id: str = "unknown",
28
51
  ) -> Artifact:
29
52
  """Return the next predefined response."""
30
53
  if self._call_count >= len(self._responses):
@@ -35,19 +58,22 @@ class MockGenerator(GeneratorInterface):
35
58
 
36
59
  return Artifact(
37
60
  artifact_id=str(uuid.uuid4()),
61
+ workflow_id=workflow_id,
38
62
  content=content,
39
63
  previous_attempt_id=None,
40
- action_pair_id="mock",
64
+ parent_action_pair_id=None,
65
+ action_pair_id=action_pair_id,
41
66
  created_at=datetime.now().isoformat(),
42
67
  attempt_number=self._call_count,
43
68
  status=ArtifactStatus.PENDING,
44
69
  guard_result=None,
45
70
  feedback="",
46
71
  context=ContextSnapshot(
72
+ workflow_id=workflow_id,
47
73
  specification="",
48
74
  constraints="",
49
75
  feedback_history=(),
50
- dependency_ids=(),
76
+ dependency_artifacts=(),
51
77
  ),
52
78
  )
53
79
 
@@ -6,6 +6,7 @@ Connects to Ollama instances via the OpenAI-compatible API.
6
6
 
7
7
  import re
8
8
  import uuid
9
+ from dataclasses import dataclass
9
10
  from datetime import datetime
10
11
  from typing import Any, cast
11
12
 
@@ -21,36 +22,52 @@ from atomicguard.domain.prompts import PromptTemplate
21
22
  DEFAULT_OLLAMA_URL = "http://localhost:11434/v1"
22
23
 
23
24
 
25
+ @dataclass
26
+ class OllamaGeneratorConfig:
27
+ """Configuration for OllamaGenerator.
28
+
29
+ This typed config ensures unknown fields are rejected at construction time.
30
+ """
31
+
32
+ model: str = "qwen2.5-coder:7b"
33
+ base_url: str = DEFAULT_OLLAMA_URL
34
+ timeout: float = 120.0
35
+
36
+
24
37
  class OllamaGenerator(GeneratorInterface):
25
38
  """Connects to Ollama instance using OpenAI-compatible API."""
26
39
 
27
- def __init__(
28
- self,
29
- model: str = "qwen2.5-coder:7b",
30
- base_url: str = DEFAULT_OLLAMA_URL,
31
- timeout: float = 120.0,
32
- ):
40
+ config_class = OllamaGeneratorConfig
41
+
42
+ def __init__(self, config: OllamaGeneratorConfig | None = None, **kwargs: Any):
33
43
  """
34
44
  Args:
35
- model: Ollama model name
36
- base_url: Ollama API URL
37
- timeout: Request timeout in seconds
45
+ config: Typed configuration object (preferred)
46
+ **kwargs: Legacy kwargs for backward compatibility (deprecated)
38
47
  """
48
+ # Support both config object and legacy kwargs
49
+ if config is None:
50
+ config = OllamaGeneratorConfig(**kwargs)
51
+
39
52
  try:
40
53
  from openai import OpenAI
41
54
  except ImportError as err:
42
55
  raise ImportError("openai library required: pip install openai") from err
43
56
 
44
- self._model = model
57
+ self._model = config.model
45
58
  self._client = OpenAI(
46
- base_url=base_url,
59
+ base_url=config.base_url,
47
60
  api_key="ollama", # required but unused
48
- timeout=timeout,
61
+ timeout=config.timeout,
49
62
  )
50
63
  self._version_counter = 0
51
64
 
52
65
  def generate(
53
- self, context: Context, template: PromptTemplate | None = None
66
+ self,
67
+ context: Context,
68
+ template: PromptTemplate | None = None,
69
+ action_pair_id: str = "unknown",
70
+ workflow_id: str = "unknown",
54
71
  ) -> Artifact:
55
72
  """Generate an artifact based on context."""
56
73
  # Build prompt
@@ -79,24 +96,30 @@ class OllamaGenerator(GeneratorInterface):
79
96
 
80
97
  return Artifact(
81
98
  artifact_id=str(uuid.uuid4()),
99
+ workflow_id=workflow_id,
82
100
  content=code,
83
101
  previous_attempt_id=None,
84
- action_pair_id="ollama",
102
+ parent_action_pair_id=None,
103
+ action_pair_id=action_pair_id,
85
104
  created_at=datetime.now().isoformat(),
86
105
  attempt_number=self._version_counter,
87
106
  status=ArtifactStatus.PENDING,
88
107
  guard_result=None,
89
108
  feedback="",
90
109
  context=ContextSnapshot(
110
+ workflow_id=workflow_id,
91
111
  specification=context.specification,
92
112
  constraints=context.ambient.constraints,
93
113
  feedback_history=(),
94
- dependency_ids=(),
114
+ dependency_artifacts=context.dependency_artifacts,
95
115
  ),
96
116
  )
97
117
 
98
118
  def _extract_code(self, content: str) -> str:
99
119
  """Extract Python code from response."""
120
+ if not content or content.isspace():
121
+ return ""
122
+
100
123
  # Try python block
101
124
  match = re.search(r"```python\n(.*?)\n```", content, re.DOTALL)
102
125
  if match:
@@ -112,8 +135,8 @@ class OllamaGenerator(GeneratorInterface):
112
135
  if match:
113
136
  return content[match.start() :]
114
137
 
115
- # Fallback: full content
116
- return content
138
+ # No code block found - return empty to trigger guard validation failure
139
+ return ""
117
140
 
118
141
  def _build_basic_prompt(self, context: Context) -> str:
119
142
  """Build a basic prompt from context."""
@@ -1,11 +1,17 @@
1
1
  """
2
- Persistence adapters for the Artifact DAG.
2
+ Persistence adapters for the Artifact and Checkpoint DAGs.
3
3
  """
4
4
 
5
+ from atomicguard.infrastructure.persistence.checkpoint import (
6
+ FilesystemCheckpointDAG,
7
+ InMemoryCheckpointDAG,
8
+ )
5
9
  from atomicguard.infrastructure.persistence.filesystem import FilesystemArtifactDAG
6
10
  from atomicguard.infrastructure.persistence.memory import InMemoryArtifactDAG
7
11
 
8
12
  __all__ = [
9
13
  "InMemoryArtifactDAG",
10
14
  "FilesystemArtifactDAG",
15
+ "InMemoryCheckpointDAG",
16
+ "FilesystemCheckpointDAG",
11
17
  ]