atomicguard 1.1.0__tar.gz → 1.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {atomicguard-1.1.0/src/atomicguard.egg-info → atomicguard-1.2.0}/PKG-INFO +68 -6
  2. {atomicguard-1.1.0 → atomicguard-1.2.0}/README.md +67 -5
  3. {atomicguard-1.1.0 → atomicguard-1.2.0}/pyproject.toml +2 -1
  4. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/__init__.py +1 -1
  5. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/llm/__init__.py +2 -0
  6. atomicguard-1.2.0/src/atomicguard/infrastructure/llm/huggingface.py +180 -0
  7. {atomicguard-1.1.0 → atomicguard-1.2.0/src/atomicguard.egg-info}/PKG-INFO +68 -6
  8. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard.egg-info/SOURCES.txt +1 -0
  9. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard.egg-info/entry_points.txt +1 -0
  10. {atomicguard-1.1.0 → atomicguard-1.2.0}/LICENSE +0 -0
  11. {atomicguard-1.1.0 → atomicguard-1.2.0}/setup.cfg +0 -0
  12. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/application/__init__.py +0 -0
  13. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/application/action_pair.py +0 -0
  14. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/application/agent.py +0 -0
  15. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/application/workflow.py +0 -0
  16. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/domain/__init__.py +0 -0
  17. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/domain/exceptions.py +0 -0
  18. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/domain/interfaces.py +0 -0
  19. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/domain/models.py +0 -0
  20. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/domain/prompts.py +0 -0
  21. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/__init__.py +0 -0
  22. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/composite/__init__.py +0 -0
  23. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/composite/base.py +0 -0
  24. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/dynamic/__init__.py +0 -0
  25. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/dynamic/test_runner.py +0 -0
  26. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/interactive/__init__.py +0 -0
  27. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/interactive/human.py +0 -0
  28. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/static/__init__.py +0 -0
  29. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/static/imports.py +0 -0
  30. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/guards/static/syntax.py +0 -0
  31. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/__init__.py +0 -0
  32. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/llm/mock.py +0 -0
  33. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/llm/ollama.py +0 -0
  34. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/persistence/__init__.py +0 -0
  35. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/persistence/checkpoint.py +0 -0
  36. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/persistence/filesystem.py +0 -0
  37. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/persistence/memory.py +0 -0
  38. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/infrastructure/registry.py +0 -0
  39. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard/schemas/__init__.py +0 -0
  40. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard.egg-info/dependency_links.txt +0 -0
  41. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard.egg-info/requires.txt +0 -0
  42. {atomicguard-1.1.0 → atomicguard-1.2.0}/src/atomicguard.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: atomicguard
3
- Version: 1.1.0
3
+ Version: 1.2.0
4
4
  Summary: A Dual-State Agent Framework for reliable LLM code generation with guard-validated loops
5
5
  Author-email: Matthew Thompson <thompsonson@gmail.com>
6
6
  Maintainer-email: Matthew Thompson <thompsonson@gmail.com>
@@ -93,6 +93,67 @@ print(artifact.content)
93
93
 
94
94
  See [examples/](examples/) for more detailed usage, including a [mock example](examples/basic_mock.py) that works without an LLM.
95
95
 
96
+ ## LLM Backends
97
+
98
+ AtomicGuard supports multiple LLM backends. Each generator implements `GeneratorInterface` and can be swapped in with no other code changes.
99
+
100
+ ### Ollama (local or cloud)
101
+
102
+ Uses the OpenAI-compatible API. Works with any Ollama-served model:
103
+
104
+ ```python
105
+ from atomicguard.infrastructure.llm import OllamaGenerator
106
+
107
+ # Local instance (default: http://localhost:11434/v1)
108
+ generator = OllamaGenerator(model="qwen2.5-coder:7b")
109
+ ```
110
+
111
+ ### HuggingFace Inference API
112
+
113
+ Connects to HuggingFace Inference Providers via `huggingface_hub`. Supports any model available through the HF Inference API, including third-party providers like Together AI.
114
+
115
+ ```bash
116
+ # Install the optional dependency
117
+ pip install huggingface_hub
118
+
119
+ # Set your API token
120
+ export HF_TOKEN="hf_your_token_here"
121
+ ```
122
+
123
+ ```python
124
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
125
+ from atomicguard.infrastructure.llm.huggingface import HuggingFaceGeneratorConfig
126
+
127
+ # Default: Qwen/Qwen2.5-Coder-32B-Instruct
128
+ generator = HuggingFaceGenerator()
129
+
130
+ # Custom model and provider
131
+ generator = HuggingFaceGenerator(HuggingFaceGeneratorConfig(
132
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
133
+ provider="together", # or "auto", "hf-inference"
134
+ temperature=0.7,
135
+ max_tokens=4096,
136
+ ))
137
+ ```
138
+
139
+ Drop-in replacement in any workflow:
140
+
141
+ ```python
142
+ from atomicguard import (
143
+ SyntaxGuard, TestGuard, CompositeGuard,
144
+ ActionPair, DualStateAgent, InMemoryArtifactDAG
145
+ )
146
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
147
+
148
+ generator = HuggingFaceGenerator()
149
+ guard = CompositeGuard([SyntaxGuard(), TestGuard("assert add(2, 3) == 5")])
150
+ action_pair = ActionPair(generator=generator, guard=guard)
151
+ agent = DualStateAgent(action_pair, InMemoryArtifactDAG(), rmax=3)
152
+
153
+ artifact = agent.execute("Write a function that adds two numbers")
154
+ print(artifact.content)
155
+ ```
156
+
96
157
  ## Benchmarks
97
158
 
98
159
  Run the simulation from the paper:
@@ -122,15 +183,16 @@ If you use this framework in your research, please cite the paper:
122
183
  > Thompson, M. (2025). Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering. arXiv preprint arXiv:2512.20660.
123
184
 
124
185
  ```bibtex
125
- @article{thompson2025managing,
186
+ @misc{thompson2025managing,
126
187
  title={Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering},
127
188
  author={Thompson, Matthew},
128
- journal={arXiv preprint arXiv:2512.20660},
129
189
  year={2025},
130
- url={[https://arxiv.org/abs/2512.20660](https://arxiv.org/abs/2512.20660)}
190
+ eprint={2512.20660},
191
+ archivePrefix={arXiv},
192
+ primaryClass={cs.LG},
193
+ url={https://arxiv.org/abs/2512.20660}
131
194
  }
132
-
133
- ```
195
+ ```
134
196
 
135
197
  ## License
136
198
 
@@ -59,6 +59,67 @@ print(artifact.content)
59
59
 
60
60
  See [examples/](examples/) for more detailed usage, including a [mock example](examples/basic_mock.py) that works without an LLM.
61
61
 
62
+ ## LLM Backends
63
+
64
+ AtomicGuard supports multiple LLM backends. Each generator implements `GeneratorInterface` and can be swapped in with no other code changes.
65
+
66
+ ### Ollama (local or cloud)
67
+
68
+ Uses the OpenAI-compatible API. Works with any Ollama-served model:
69
+
70
+ ```python
71
+ from atomicguard.infrastructure.llm import OllamaGenerator
72
+
73
+ # Local instance (default: http://localhost:11434/v1)
74
+ generator = OllamaGenerator(model="qwen2.5-coder:7b")
75
+ ```
76
+
77
+ ### HuggingFace Inference API
78
+
79
+ Connects to HuggingFace Inference Providers via `huggingface_hub`. Supports any model available through the HF Inference API, including third-party providers like Together AI.
80
+
81
+ ```bash
82
+ # Install the optional dependency
83
+ pip install huggingface_hub
84
+
85
+ # Set your API token
86
+ export HF_TOKEN="hf_your_token_here"
87
+ ```
88
+
89
+ ```python
90
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
91
+ from atomicguard.infrastructure.llm.huggingface import HuggingFaceGeneratorConfig
92
+
93
+ # Default: Qwen/Qwen2.5-Coder-32B-Instruct
94
+ generator = HuggingFaceGenerator()
95
+
96
+ # Custom model and provider
97
+ generator = HuggingFaceGenerator(HuggingFaceGeneratorConfig(
98
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
99
+ provider="together", # or "auto", "hf-inference"
100
+ temperature=0.7,
101
+ max_tokens=4096,
102
+ ))
103
+ ```
104
+
105
+ Drop-in replacement in any workflow:
106
+
107
+ ```python
108
+ from atomicguard import (
109
+ SyntaxGuard, TestGuard, CompositeGuard,
110
+ ActionPair, DualStateAgent, InMemoryArtifactDAG
111
+ )
112
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
113
+
114
+ generator = HuggingFaceGenerator()
115
+ guard = CompositeGuard([SyntaxGuard(), TestGuard("assert add(2, 3) == 5")])
116
+ action_pair = ActionPair(generator=generator, guard=guard)
117
+ agent = DualStateAgent(action_pair, InMemoryArtifactDAG(), rmax=3)
118
+
119
+ artifact = agent.execute("Write a function that adds two numbers")
120
+ print(artifact.content)
121
+ ```
122
+
62
123
  ## Benchmarks
63
124
 
64
125
  Run the simulation from the paper:
@@ -88,15 +149,16 @@ If you use this framework in your research, please cite the paper:
88
149
  > Thompson, M. (2025). Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering. arXiv preprint arXiv:2512.20660.
89
150
 
90
151
  ```bibtex
91
- @article{thompson2025managing,
152
+ @misc{thompson2025managing,
92
153
  title={Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering},
93
154
  author={Thompson, Matthew},
94
- journal={arXiv preprint arXiv:2512.20660},
95
155
  year={2025},
96
- url={[https://arxiv.org/abs/2512.20660](https://arxiv.org/abs/2512.20660)}
156
+ eprint={2512.20660},
157
+ archivePrefix={arXiv},
158
+ primaryClass={cs.LG},
159
+ url={https://arxiv.org/abs/2512.20660}
97
160
  }
98
-
99
- ```
161
+ ```
100
162
 
101
163
  ## License
102
164
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "atomicguard"
3
- version = "1.1.0"
3
+ version = "1.2.0"
4
4
  description = "A Dual-State Agent Framework for reliable LLM code generation with guard-validated loops"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -51,6 +51,7 @@ Changelog = "https://github.com/thompsonson/atomicguard/blob/main/CHANGELOG.md"
51
51
  [project.entry-points."atomicguard.generators"]
52
52
  # Core generators
53
53
  OllamaGenerator = "atomicguard.infrastructure.llm:OllamaGenerator"
54
+ HuggingFaceGenerator = "atomicguard.infrastructure.llm:HuggingFaceGenerator"
54
55
  MockGenerator = "atomicguard.infrastructure.llm:MockGenerator"
55
56
 
56
57
  # Note: Dev dependencies are in [dependency-groups] below, not here.
@@ -73,7 +73,7 @@ from atomicguard.infrastructure.persistence import (
73
73
  InMemoryArtifactDAG,
74
74
  )
75
75
 
76
- __version__ = "1.1.0"
76
+ __version__ = "1.2.0"
77
77
 
78
78
  __all__ = [
79
79
  # Version
@@ -2,10 +2,12 @@
2
2
  LLM adapters for artifact generation.
3
3
  """
4
4
 
5
+ from atomicguard.infrastructure.llm.huggingface import HuggingFaceGenerator
5
6
  from atomicguard.infrastructure.llm.mock import MockGenerator
6
7
  from atomicguard.infrastructure.llm.ollama import OllamaGenerator
7
8
 
8
9
  __all__ = [
10
+ "HuggingFaceGenerator",
9
11
  "MockGenerator",
10
12
  "OllamaGenerator",
11
13
  ]
@@ -0,0 +1,180 @@
1
+ """
2
+ HuggingFace Inference API generator implementation.
3
+
4
+ Connects to HuggingFace Inference Providers via the huggingface_hub
5
+ InferenceClient for chat completion.
6
+ """
7
+
8
+ import os
9
+ import re
10
+ import uuid
11
+ from dataclasses import dataclass
12
+ from datetime import datetime
13
+ from typing import Any, cast
14
+
15
+ from atomicguard.domain.interfaces import GeneratorInterface
16
+ from atomicguard.domain.models import (
17
+ Artifact,
18
+ ArtifactStatus,
19
+ Context,
20
+ ContextSnapshot,
21
+ )
22
+ from atomicguard.domain.prompts import PromptTemplate
23
+
24
+
25
+ @dataclass
26
+ class HuggingFaceGeneratorConfig:
27
+ """Configuration for HuggingFaceGenerator.
28
+
29
+ This typed config ensures unknown fields are rejected at construction time.
30
+ """
31
+
32
+ model: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
33
+ api_key: str | None = None # Auto-detects from HF_TOKEN env var
34
+ provider: str | None = None # e.g. "auto", "hf-inference", "together"
35
+ timeout: float = 120.0
36
+ temperature: float = 0.7
37
+ max_tokens: int = 4096
38
+
39
+
40
+ class HuggingFaceGenerator(GeneratorInterface):
41
+ """Connects to HuggingFace Inference API using huggingface_hub."""
42
+
43
+ config_class = HuggingFaceGeneratorConfig
44
+
45
+ def __init__(
46
+ self, config: HuggingFaceGeneratorConfig | None = None, **kwargs: Any
47
+ ) -> None:
48
+ """
49
+ Args:
50
+ config: Typed configuration object (preferred)
51
+ **kwargs: Legacy kwargs for backward compatibility (deprecated)
52
+ """
53
+ if config is None:
54
+ config = HuggingFaceGeneratorConfig(**kwargs)
55
+
56
+ try:
57
+ from huggingface_hub import InferenceClient
58
+ except ImportError as err:
59
+ raise ImportError(
60
+ "huggingface_hub library required: pip install huggingface_hub"
61
+ ) from err
62
+
63
+ api_key = config.api_key
64
+ if api_key is None:
65
+ api_key = os.environ.get("HF_TOKEN")
66
+ if not api_key:
67
+ raise ValueError(
68
+ "HuggingFace API key required: set HF_TOKEN environment "
69
+ "variable or pass api_key in config"
70
+ )
71
+
72
+ client_kwargs: dict[str, Any] = {
73
+ "api_key": api_key,
74
+ "timeout": config.timeout,
75
+ }
76
+ if config.provider is not None:
77
+ client_kwargs["provider"] = config.provider
78
+
79
+ self._model = config.model
80
+ self._client = InferenceClient(**client_kwargs)
81
+ self._temperature = config.temperature
82
+ self._max_tokens = config.max_tokens
83
+ self._version_counter = 0
84
+
85
+ def generate(
86
+ self,
87
+ context: Context,
88
+ template: PromptTemplate | None = None,
89
+ action_pair_id: str = "unknown",
90
+ workflow_id: str = "unknown",
91
+ ) -> Artifact:
92
+ """Generate an artifact based on context."""
93
+ if template:
94
+ prompt = template.render(context)
95
+ else:
96
+ prompt = self._build_basic_prompt(context)
97
+
98
+ messages: list[dict[str, str]] = [
99
+ {
100
+ "role": "system",
101
+ "content": (
102
+ "You are a Python programming assistant. "
103
+ "Provide complete, runnable code in a markdown block:\n"
104
+ "```python\n# code\n```"
105
+ ),
106
+ },
107
+ {"role": "user", "content": prompt},
108
+ ]
109
+
110
+ response = self._client.chat_completion(
111
+ messages=cast(Any, messages),
112
+ model=self._model,
113
+ temperature=self._temperature,
114
+ max_tokens=self._max_tokens,
115
+ )
116
+
117
+ content = response.choices[0].message.content or ""
118
+ code = self._extract_code(content)
119
+
120
+ self._version_counter += 1
121
+
122
+ return Artifact(
123
+ artifact_id=str(uuid.uuid4()),
124
+ workflow_id=workflow_id,
125
+ content=code,
126
+ previous_attempt_id=None,
127
+ parent_action_pair_id=None,
128
+ action_pair_id=action_pair_id,
129
+ created_at=datetime.now().isoformat(),
130
+ attempt_number=self._version_counter,
131
+ status=ArtifactStatus.PENDING,
132
+ guard_result=None,
133
+ feedback="",
134
+ context=ContextSnapshot(
135
+ workflow_id=workflow_id,
136
+ specification=context.specification,
137
+ constraints=context.ambient.constraints,
138
+ feedback_history=(),
139
+ dependency_artifacts=context.dependency_artifacts,
140
+ ),
141
+ )
142
+
143
+ def _extract_code(self, content: str) -> str:
144
+ """Extract Python code from response."""
145
+ if not content or content.isspace():
146
+ return ""
147
+
148
+ # Try python block
149
+ match = re.search(r"```python\n(.*?)\n```", content, re.DOTALL)
150
+ if match:
151
+ return match.group(1)
152
+
153
+ # Try generic block
154
+ match = re.search(r"```\n(.*?)\n```", content, re.DOTALL)
155
+ if match:
156
+ return match.group(1)
157
+
158
+ # Try first def/import/class
159
+ match = re.search(r"^(def |import |class )", content, re.MULTILINE)
160
+ if match:
161
+ return content[match.start() :]
162
+
163
+ # No code block found - return empty to trigger guard validation failure
164
+ return ""
165
+
166
+ def _build_basic_prompt(self, context: Context) -> str:
167
+ """Build a basic prompt from context."""
168
+ parts = [context.specification]
169
+
170
+ if context.current_artifact:
171
+ parts.append(f"\nPrevious attempt:\n{context.current_artifact}")
172
+
173
+ if context.feedback_history:
174
+ feedback_text = "\n".join(
175
+ f"Attempt {i + 1} feedback: {f}"
176
+ for i, (_, f) in enumerate(context.feedback_history)
177
+ )
178
+ parts.append(f"\nFeedback history:\n{feedback_text}")
179
+
180
+ return "\n".join(parts)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: atomicguard
3
- Version: 1.1.0
3
+ Version: 1.2.0
4
4
  Summary: A Dual-State Agent Framework for reliable LLM code generation with guard-validated loops
5
5
  Author-email: Matthew Thompson <thompsonson@gmail.com>
6
6
  Maintainer-email: Matthew Thompson <thompsonson@gmail.com>
@@ -93,6 +93,67 @@ print(artifact.content)
93
93
 
94
94
  See [examples/](examples/) for more detailed usage, including a [mock example](examples/basic_mock.py) that works without an LLM.
95
95
 
96
+ ## LLM Backends
97
+
98
+ AtomicGuard supports multiple LLM backends. Each generator implements `GeneratorInterface` and can be swapped in with no other code changes.
99
+
100
+ ### Ollama (local or cloud)
101
+
102
+ Uses the OpenAI-compatible API. Works with any Ollama-served model:
103
+
104
+ ```python
105
+ from atomicguard.infrastructure.llm import OllamaGenerator
106
+
107
+ # Local instance (default: http://localhost:11434/v1)
108
+ generator = OllamaGenerator(model="qwen2.5-coder:7b")
109
+ ```
110
+
111
+ ### HuggingFace Inference API
112
+
113
+ Connects to HuggingFace Inference Providers via `huggingface_hub`. Supports any model available through the HF Inference API, including third-party providers like Together AI.
114
+
115
+ ```bash
116
+ # Install the optional dependency
117
+ pip install huggingface_hub
118
+
119
+ # Set your API token
120
+ export HF_TOKEN="hf_your_token_here"
121
+ ```
122
+
123
+ ```python
124
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
125
+ from atomicguard.infrastructure.llm.huggingface import HuggingFaceGeneratorConfig
126
+
127
+ # Default: Qwen/Qwen2.5-Coder-32B-Instruct
128
+ generator = HuggingFaceGenerator()
129
+
130
+ # Custom model and provider
131
+ generator = HuggingFaceGenerator(HuggingFaceGeneratorConfig(
132
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
133
+ provider="together", # or "auto", "hf-inference"
134
+ temperature=0.7,
135
+ max_tokens=4096,
136
+ ))
137
+ ```
138
+
139
+ Drop-in replacement in any workflow:
140
+
141
+ ```python
142
+ from atomicguard import (
143
+ SyntaxGuard, TestGuard, CompositeGuard,
144
+ ActionPair, DualStateAgent, InMemoryArtifactDAG
145
+ )
146
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
147
+
148
+ generator = HuggingFaceGenerator()
149
+ guard = CompositeGuard([SyntaxGuard(), TestGuard("assert add(2, 3) == 5")])
150
+ action_pair = ActionPair(generator=generator, guard=guard)
151
+ agent = DualStateAgent(action_pair, InMemoryArtifactDAG(), rmax=3)
152
+
153
+ artifact = agent.execute("Write a function that adds two numbers")
154
+ print(artifact.content)
155
+ ```
156
+
96
157
  ## Benchmarks
97
158
 
98
159
  Run the simulation from the paper:
@@ -122,15 +183,16 @@ If you use this framework in your research, please cite the paper:
122
183
  > Thompson, M. (2025). Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering. arXiv preprint arXiv:2512.20660.
123
184
 
124
185
  ```bibtex
125
- @article{thompson2025managing,
186
+ @misc{thompson2025managing,
126
187
  title={Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering},
127
188
  author={Thompson, Matthew},
128
- journal={arXiv preprint arXiv:2512.20660},
129
189
  year={2025},
130
- url={[https://arxiv.org/abs/2512.20660](https://arxiv.org/abs/2512.20660)}
190
+ eprint={2512.20660},
191
+ archivePrefix={arXiv},
192
+ primaryClass={cs.LG},
193
+ url={https://arxiv.org/abs/2512.20660}
131
194
  }
132
-
133
- ```
195
+ ```
134
196
 
135
197
  ## License
136
198
 
@@ -30,6 +30,7 @@ src/atomicguard/guards/static/syntax.py
30
30
  src/atomicguard/infrastructure/__init__.py
31
31
  src/atomicguard/infrastructure/registry.py
32
32
  src/atomicguard/infrastructure/llm/__init__.py
33
+ src/atomicguard/infrastructure/llm/huggingface.py
33
34
  src/atomicguard/infrastructure/llm/mock.py
34
35
  src/atomicguard/infrastructure/llm/ollama.py
35
36
  src/atomicguard/infrastructure/persistence/__init__.py
@@ -1,3 +1,4 @@
1
1
  [atomicguard.generators]
2
+ HuggingFaceGenerator = atomicguard.infrastructure.llm:HuggingFaceGenerator
2
3
  MockGenerator = atomicguard.infrastructure.llm:MockGenerator
3
4
  OllamaGenerator = atomicguard.infrastructure.llm:OllamaGenerator
File without changes
File without changes