atomicguard 1.1.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,162 @@
1
+ """
2
+ HuggingFace Inference API generator implementation.
3
+
4
+ Connects to HuggingFace Inference Providers via the huggingface_hub
5
+ InferenceClient for chat completion.
6
+ """
7
+
8
+ import os
9
+ import re
10
+ import uuid
11
+ from dataclasses import dataclass
12
+ from datetime import datetime
13
+ from typing import Any, cast
14
+
15
+ from atomicguard.domain.interfaces import GeneratorInterface
16
+ from atomicguard.domain.models import (
17
+ Artifact,
18
+ ArtifactStatus,
19
+ Context,
20
+ ContextSnapshot,
21
+ )
22
+ from atomicguard.domain.prompts import PromptTemplate
23
+
24
+
25
+ @dataclass
26
+ class HuggingFaceGeneratorConfig:
27
+ """Configuration for HuggingFaceGenerator.
28
+
29
+ This typed config ensures unknown fields are rejected at construction time.
30
+ """
31
+
32
+ model: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
33
+ api_key: str | None = None # Auto-detects from HF_TOKEN env var
34
+ provider: str | None = None # e.g. "auto", "hf-inference", "together"
35
+ timeout: float = 120.0
36
+ temperature: float = 0.7
37
+ max_tokens: int = 4096
38
+
39
+
40
+ class HuggingFaceGenerator(GeneratorInterface):
41
+ """Connects to HuggingFace Inference API using huggingface_hub."""
42
+
43
+ config_class = HuggingFaceGeneratorConfig
44
+
45
+ def __init__(
46
+ self, config: HuggingFaceGeneratorConfig | None = None, **kwargs: Any
47
+ ) -> None:
48
+ """
49
+ Args:
50
+ config: Typed configuration object (preferred)
51
+ **kwargs: Legacy kwargs for backward compatibility (deprecated)
52
+ """
53
+ if config is None:
54
+ config = HuggingFaceGeneratorConfig(**kwargs)
55
+
56
+ try:
57
+ from huggingface_hub import InferenceClient
58
+ except ImportError as err:
59
+ raise ImportError(
60
+ "huggingface_hub library required: pip install huggingface_hub"
61
+ ) from err
62
+
63
+ api_key = config.api_key
64
+ if api_key is None:
65
+ api_key = os.environ.get("HF_TOKEN")
66
+ if not api_key:
67
+ raise ValueError(
68
+ "HuggingFace API key required: set HF_TOKEN environment "
69
+ "variable or pass api_key in config"
70
+ )
71
+
72
+ client_kwargs: dict[str, Any] = {
73
+ "api_key": api_key,
74
+ "timeout": config.timeout,
75
+ }
76
+ if config.provider is not None:
77
+ client_kwargs["provider"] = config.provider
78
+
79
+ self._model = config.model
80
+ self._client = InferenceClient(**client_kwargs)
81
+ self._temperature = config.temperature
82
+ self._max_tokens = config.max_tokens
83
+ self._version_counter = 0
84
+
85
+ def generate(
86
+ self,
87
+ context: Context,
88
+ template: PromptTemplate,
89
+ action_pair_id: str = "unknown",
90
+ workflow_id: str = "unknown",
91
+ workflow_ref: str | None = None,
92
+ ) -> Artifact:
93
+ """Generate an artifact based on context."""
94
+ prompt = template.render(context)
95
+
96
+ messages: list[dict[str, str]] = [
97
+ {
98
+ "role": "system",
99
+ "content": (
100
+ "You are a Python programming assistant. "
101
+ "Provide complete, runnable code in a markdown block:\n"
102
+ "```python\n# code\n```"
103
+ ),
104
+ },
105
+ {"role": "user", "content": prompt},
106
+ ]
107
+
108
+ response = self._client.chat_completion(
109
+ messages=cast(Any, messages),
110
+ model=self._model,
111
+ temperature=self._temperature,
112
+ max_tokens=self._max_tokens,
113
+ )
114
+
115
+ content = response.choices[0].message.content or ""
116
+ code = self._extract_code(content)
117
+
118
+ self._version_counter += 1
119
+
120
+ return Artifact(
121
+ artifact_id=str(uuid.uuid4()),
122
+ workflow_id=workflow_id,
123
+ content=code,
124
+ previous_attempt_id=None,
125
+ parent_action_pair_id=None,
126
+ action_pair_id=action_pair_id,
127
+ created_at=datetime.now().isoformat(),
128
+ attempt_number=self._version_counter,
129
+ status=ArtifactStatus.PENDING,
130
+ guard_result=None,
131
+ context=ContextSnapshot(
132
+ workflow_id=workflow_id,
133
+ specification=context.specification,
134
+ constraints=context.ambient.constraints,
135
+ feedback_history=(),
136
+ dependency_artifacts=context.dependency_artifacts,
137
+ ),
138
+ workflow_ref=workflow_ref,
139
+ )
140
+
141
+ def _extract_code(self, content: str) -> str:
142
+ """Extract Python code from response."""
143
+ if not content or content.isspace():
144
+ return ""
145
+
146
+ # Try python block
147
+ match = re.search(r"```python\n(.*?)\n```", content, re.DOTALL)
148
+ if match:
149
+ return match.group(1)
150
+
151
+ # Try generic block
152
+ match = re.search(r"```\n(.*?)\n```", content, re.DOTALL)
153
+ if match:
154
+ return match.group(1)
155
+
156
+ # Try first def/import/class
157
+ match = re.search(r"^(def |import |class )", content, re.MULTILINE)
158
+ if match:
159
+ return content[match.start() :]
160
+
161
+ # No code block found - return empty to trigger guard validation failure
162
+ return ""
@@ -44,10 +44,11 @@ class MockGenerator(GeneratorInterface):
44
44
 
45
45
  def generate(
46
46
  self,
47
- _context: Context,
48
- _template: PromptTemplate | None = None,
47
+ context: Context, # noqa: ARG002 - unused but required by interface
48
+ template: PromptTemplate, # noqa: ARG002
49
49
  action_pair_id: str = "unknown",
50
50
  workflow_id: str = "unknown",
51
+ workflow_ref: str | None = None,
51
52
  ) -> Artifact:
52
53
  """Return the next predefined response."""
53
54
  if self._call_count >= len(self._responses):
@@ -66,8 +67,7 @@ class MockGenerator(GeneratorInterface):
66
67
  created_at=datetime.now().isoformat(),
67
68
  attempt_number=self._call_count,
68
69
  status=ArtifactStatus.PENDING,
69
- guard_result=None,
70
- feedback="",
70
+ guard_result=None, # Guard result set after validation
71
71
  context=ContextSnapshot(
72
72
  workflow_id=workflow_id,
73
73
  specification="",
@@ -75,6 +75,7 @@ class MockGenerator(GeneratorInterface):
75
75
  feedback_history=(),
76
76
  dependency_artifacts=(),
77
77
  ),
78
+ workflow_ref=workflow_ref,
78
79
  )
79
80
 
80
81
  @property
@@ -65,16 +65,14 @@ class OllamaGenerator(GeneratorInterface):
65
65
  def generate(
66
66
  self,
67
67
  context: Context,
68
- template: PromptTemplate | None = None,
68
+ template: PromptTemplate,
69
69
  action_pair_id: str = "unknown",
70
70
  workflow_id: str = "unknown",
71
+ workflow_ref: str | None = None,
71
72
  ) -> Artifact:
72
73
  """Generate an artifact based on context."""
73
74
  # Build prompt
74
- if template:
75
- prompt = template.render(context)
76
- else:
77
- prompt = self._build_basic_prompt(context)
75
+ prompt = template.render(context)
78
76
 
79
77
  # Call Ollama
80
78
  messages = [
@@ -104,8 +102,7 @@ class OllamaGenerator(GeneratorInterface):
104
102
  created_at=datetime.now().isoformat(),
105
103
  attempt_number=self._version_counter,
106
104
  status=ArtifactStatus.PENDING,
107
- guard_result=None,
108
- feedback="",
105
+ guard_result=None, # Guard result set after validation
109
106
  context=ContextSnapshot(
110
107
  workflow_id=workflow_id,
111
108
  specification=context.specification,
@@ -113,6 +110,7 @@ class OllamaGenerator(GeneratorInterface):
113
110
  feedback_history=(),
114
111
  dependency_artifacts=context.dependency_artifacts,
115
112
  ),
113
+ workflow_ref=workflow_ref,
116
114
  )
117
115
 
118
116
  def _extract_code(self, content: str) -> str:
@@ -137,19 +135,3 @@ class OllamaGenerator(GeneratorInterface):
137
135
 
138
136
  # No code block found - return empty to trigger guard validation failure
139
137
  return ""
140
-
141
- def _build_basic_prompt(self, context: Context) -> str:
142
- """Build a basic prompt from context."""
143
- parts = [context.specification]
144
-
145
- if context.current_artifact:
146
- parts.append(f"\nPrevious attempt:\n{context.current_artifact}")
147
-
148
- if context.feedback_history:
149
- feedback_text = "\n".join(
150
- f"Attempt {i + 1} feedback: {f}"
151
- for i, (_, f) in enumerate(context.feedback_history)
152
- )
153
- parts.append(f"\nFeedback history:\n{feedback_text}")
154
-
155
- return "\n".join(parts)
@@ -6,6 +6,7 @@ Implements the Versioned Repository R from Definition 4.
6
6
  """
7
7
 
8
8
  import json
9
+ import threading
9
10
  from pathlib import Path
10
11
  from typing import Any
11
12
 
@@ -15,6 +16,8 @@ from atomicguard.domain.models import (
15
16
  ArtifactStatus,
16
17
  ContextSnapshot,
17
18
  FeedbackEntry,
19
+ GuardResult,
20
+ SubGuardOutcome,
18
21
  )
19
22
 
20
23
 
@@ -31,6 +34,7 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
31
34
  self._objects_dir = self._base_dir / "objects"
32
35
  self._index_path = self._base_dir / "index.json"
33
36
  self._cache: dict[str, Artifact] = {}
37
+ self._lock = threading.Lock() # Thread safety for concurrent writes
34
38
  self._index: dict[str, Any] = self._load_or_create_index()
35
39
 
36
40
  def _load_or_create_index(self) -> dict[str, Any]:
@@ -52,9 +56,52 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
52
56
  json.dump(self._index, f, indent=2)
53
57
  temp_path.rename(self._index_path) # Atomic on POSIX
54
58
 
59
+ def _guard_result_to_dict(self, guard_result: GuardResult | None) -> dict | None:
60
+ """Serialize GuardResult to JSON-compatible dict."""
61
+ if guard_result is None:
62
+ return None
63
+ return {
64
+ "passed": guard_result.passed,
65
+ "feedback": guard_result.feedback,
66
+ "fatal": guard_result.fatal,
67
+ "guard_name": guard_result.guard_name,
68
+ "sub_results": [
69
+ {
70
+ "guard_name": sr.guard_name,
71
+ "passed": sr.passed,
72
+ "feedback": sr.feedback,
73
+ "execution_time_ms": sr.execution_time_ms,
74
+ }
75
+ for sr in guard_result.sub_results
76
+ ],
77
+ }
78
+
79
+ def _dict_to_guard_result(self, data: dict | None) -> GuardResult | None:
80
+ """Deserialize GuardResult from JSON dict."""
81
+ if data is None:
82
+ return None
83
+ # Handle legacy format where guard_result was just a boolean
84
+ if isinstance(data, bool):
85
+ return GuardResult(passed=data, feedback="")
86
+ return GuardResult(
87
+ passed=data["passed"],
88
+ feedback=data.get("feedback", ""),
89
+ fatal=data.get("fatal", False),
90
+ guard_name=data.get("guard_name"),
91
+ sub_results=tuple(
92
+ SubGuardOutcome(
93
+ guard_name=sr["guard_name"],
94
+ passed=sr["passed"],
95
+ feedback=sr["feedback"],
96
+ execution_time_ms=sr.get("execution_time_ms", 0.0),
97
+ )
98
+ for sr in data.get("sub_results", [])
99
+ ),
100
+ )
101
+
55
102
  def _artifact_to_dict(self, artifact: Artifact) -> dict:
56
103
  """Serialize artifact to JSON-compatible dict."""
57
- return {
104
+ result = {
58
105
  "artifact_id": artifact.artifact_id,
59
106
  "workflow_id": artifact.workflow_id,
60
107
  "content": artifact.content,
@@ -64,8 +111,7 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
64
111
  "created_at": artifact.created_at,
65
112
  "attempt_number": artifact.attempt_number,
66
113
  "status": artifact.status.value,
67
- "guard_result": artifact.guard_result,
68
- "feedback": artifact.feedback,
114
+ "guard_result": self._guard_result_to_dict(artifact.guard_result),
69
115
  "context": {
70
116
  "workflow_id": artifact.context.workflow_id,
71
117
  "specification": artifact.context.specification,
@@ -78,6 +124,10 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
78
124
  "dependency_artifacts": dict(artifact.context.dependency_artifacts),
79
125
  },
80
126
  }
127
+ # Include metadata if present
128
+ if artifact.metadata:
129
+ result["metadata"] = dict(artifact.metadata)
130
+ return result
81
131
 
82
132
  def _dict_to_artifact(self, data: dict) -> Artifact:
83
133
  """Deserialize artifact from JSON dict."""
@@ -98,6 +148,25 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
98
148
  # Deserialize dict → tuple for immutability
99
149
  dependency_artifacts=tuple(dep_data.items()),
100
150
  )
151
+ # Get metadata if present
152
+ metadata = data.get("metadata", {})
153
+
154
+ # Handle legacy format: guard_result was bool, feedback was separate field
155
+ guard_result_data = data.get("guard_result")
156
+ if guard_result_data is None and "feedback" in data:
157
+ # Legacy artifact with no guard_result but has feedback
158
+ # This shouldn't happen often, but handle gracefully
159
+ pass
160
+ elif isinstance(guard_result_data, bool):
161
+ # Legacy format: convert bool + feedback to GuardResult
162
+ guard_result_data = {
163
+ "passed": guard_result_data,
164
+ "feedback": data.get("feedback", ""),
165
+ "fatal": False,
166
+ "guard_name": None,
167
+ "sub_results": [],
168
+ }
169
+
101
170
  return Artifact(
102
171
  artifact_id=data["artifact_id"],
103
172
  workflow_id=data.get("workflow_id", "unknown"),
@@ -108,9 +177,9 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
108
177
  created_at=data["created_at"],
109
178
  attempt_number=data["attempt_number"],
110
179
  status=ArtifactStatus(data["status"]),
111
- guard_result=data["guard_result"],
112
- feedback=data["feedback"],
180
+ guard_result=self._dict_to_guard_result(guard_result_data),
113
181
  context=context,
182
+ metadata=metadata, # Will be converted to MappingProxyType in __post_init__
114
183
  )
115
184
 
116
185
  def _get_object_path(self, artifact_id: str) -> Path:
@@ -122,52 +191,55 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
122
191
  """
123
192
  Append artifact to DAG (immutable, append-only).
124
193
 
194
+ Thread-safe: uses lock to ensure atomic index updates.
195
+
125
196
  Args:
126
197
  artifact: The artifact to store
127
198
 
128
199
  Returns:
129
200
  The artifact_id
130
201
  """
131
- # 1. Serialize to JSON
132
- artifact_dict = self._artifact_to_dict(artifact)
133
-
134
- # 2. Write to objects/{prefix}/{artifact_id}.json
135
- object_path = self._get_object_path(artifact.artifact_id)
136
- object_path.parent.mkdir(parents=True, exist_ok=True)
137
- with open(object_path, "w") as f:
138
- json.dump(artifact_dict, f, indent=2)
139
-
140
- # 3. Update index
141
- self._index["artifacts"][artifact.artifact_id] = {
142
- "path": str(object_path.relative_to(self._base_dir)),
143
- "workflow_id": artifact.workflow_id,
144
- "action_pair_id": artifact.action_pair_id,
145
- "parent_action_pair_id": artifact.parent_action_pair_id,
146
- "status": artifact.status.value,
147
- "created_at": artifact.created_at,
148
- }
149
-
150
- # Track by action pair
151
- if artifact.action_pair_id not in self._index["action_pairs"]:
152
- self._index["action_pairs"][artifact.action_pair_id] = []
153
- self._index["action_pairs"][artifact.action_pair_id].append(
154
- artifact.artifact_id
155
- )
156
-
157
- # Track by workflow
158
- if "workflows" not in self._index:
159
- self._index["workflows"] = {}
160
- if artifact.workflow_id not in self._index["workflows"]:
161
- self._index["workflows"][artifact.workflow_id] = []
162
- self._index["workflows"][artifact.workflow_id].append(artifact.artifact_id)
163
-
164
- # 4. Atomically update index
165
- self._update_index_atomic()
166
-
167
- # 5. Add to cache
168
- self._cache[artifact.artifact_id] = artifact
169
-
170
- return artifact.artifact_id
202
+ with self._lock:
203
+ # 1. Serialize to JSON
204
+ artifact_dict = self._artifact_to_dict(artifact)
205
+
206
+ # 2. Write to objects/{prefix}/{artifact_id}.json
207
+ object_path = self._get_object_path(artifact.artifact_id)
208
+ object_path.parent.mkdir(parents=True, exist_ok=True)
209
+ with open(object_path, "w") as f:
210
+ json.dump(artifact_dict, f, indent=2)
211
+
212
+ # 3. Update index
213
+ self._index["artifacts"][artifact.artifact_id] = {
214
+ "path": str(object_path.relative_to(self._base_dir)),
215
+ "workflow_id": artifact.workflow_id,
216
+ "action_pair_id": artifact.action_pair_id,
217
+ "parent_action_pair_id": artifact.parent_action_pair_id,
218
+ "status": artifact.status.value,
219
+ "created_at": artifact.created_at,
220
+ }
221
+
222
+ # Track by action pair
223
+ if artifact.action_pair_id not in self._index["action_pairs"]:
224
+ self._index["action_pairs"][artifact.action_pair_id] = []
225
+ self._index["action_pairs"][artifact.action_pair_id].append(
226
+ artifact.artifact_id
227
+ )
228
+
229
+ # Track by workflow
230
+ if "workflows" not in self._index:
231
+ self._index["workflows"] = {}
232
+ if artifact.workflow_id not in self._index["workflows"]:
233
+ self._index["workflows"][artifact.workflow_id] = []
234
+ self._index["workflows"][artifact.workflow_id].append(artifact.artifact_id)
235
+
236
+ # 4. Atomically update index
237
+ self._update_index_atomic()
238
+
239
+ # 5. Add to cache
240
+ self._cache[artifact.artifact_id] = artifact
241
+
242
+ return artifact.artifact_id
171
243
 
172
244
  def get_artifact(self, artifact_id: str) -> Artifact:
173
245
  """Retrieve artifact by ID (cache-first)."""
@@ -238,8 +310,8 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
238
310
  attempt_number=artifact.attempt_number,
239
311
  status=new_status,
240
312
  guard_result=artifact.guard_result,
241
- feedback=artifact.feedback,
242
313
  context=artifact.context,
314
+ metadata=artifact.metadata,
243
315
  )
244
316
 
245
317
  # Update file
@@ -294,3 +366,18 @@ class FilesystemArtifactDAG(ArtifactDAGInterface):
294
366
  # Sort by created_at descending and return the latest
295
367
  candidates.sort(key=lambda x: x[1], reverse=True)
296
368
  return self.get_artifact(candidates[0][0])
369
+
370
+ def get_all(self) -> list[Artifact]:
371
+ """Return all artifacts in the DAG.
372
+
373
+ Returns:
374
+ List of all artifacts, sorted by created_at.
375
+ """
376
+ # Get all artifact IDs from index and sort by created_at
377
+ artifact_ids_with_times = [
378
+ (aid, info.get("created_at", ""))
379
+ for aid, info in self._index.get("artifacts", {}).items()
380
+ ]
381
+ artifact_ids_with_times.sort(key=lambda x: x[1])
382
+
383
+ return [self.get_artifact(aid) for aid, _ in artifact_ids_with_times]
@@ -59,3 +59,13 @@ class InMemoryArtifactDAG(ArtifactDAGInterface):
59
59
  # Sort by created_at descending and return the latest
60
60
  candidates.sort(key=lambda a: a.created_at, reverse=True)
61
61
  return candidates[0]
62
+
63
+ def get_all(self) -> list[Artifact]:
64
+ """Return all artifacts in the DAG.
65
+
66
+ Returns:
67
+ List of all artifacts, sorted by created_at.
68
+ """
69
+ artifacts = list(self._artifacts.values())
70
+ artifacts.sort(key=lambda a: a.created_at)
71
+ return artifacts
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: atomicguard
3
- Version: 1.1.0
3
+ Version: 2.0.0
4
4
  Summary: A Dual-State Agent Framework for reliable LLM code generation with guard-validated loops
5
5
  Author-email: Matthew Thompson <thompsonson@gmail.com>
6
6
  Maintainer-email: Matthew Thompson <thompsonson@gmail.com>
@@ -93,6 +93,67 @@ print(artifact.content)
93
93
 
94
94
  See [examples/](examples/) for more detailed usage, including a [mock example](examples/basic_mock.py) that works without an LLM.
95
95
 
96
+ ## LLM Backends
97
+
98
+ AtomicGuard supports multiple LLM backends. Each generator implements `GeneratorInterface` and can be swapped in with no other code changes.
99
+
100
+ ### Ollama (local or cloud)
101
+
102
+ Uses the OpenAI-compatible API. Works with any Ollama-served model:
103
+
104
+ ```python
105
+ from atomicguard.infrastructure.llm import OllamaGenerator
106
+
107
+ # Local instance (default: http://localhost:11434/v1)
108
+ generator = OllamaGenerator(model="qwen2.5-coder:7b")
109
+ ```
110
+
111
+ ### HuggingFace Inference API
112
+
113
+ Connects to HuggingFace Inference Providers via `huggingface_hub`. Supports any model available through the HF Inference API, including third-party providers like Together AI.
114
+
115
+ ```bash
116
+ # Install the optional dependency
117
+ pip install huggingface_hub
118
+
119
+ # Set your API token
120
+ export HF_TOKEN="hf_your_token_here"
121
+ ```
122
+
123
+ ```python
124
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
125
+ from atomicguard.infrastructure.llm.huggingface import HuggingFaceGeneratorConfig
126
+
127
+ # Default: Qwen/Qwen2.5-Coder-32B-Instruct
128
+ generator = HuggingFaceGenerator()
129
+
130
+ # Custom model and provider
131
+ generator = HuggingFaceGenerator(HuggingFaceGeneratorConfig(
132
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
133
+ provider="together", # or "auto", "hf-inference"
134
+ temperature=0.7,
135
+ max_tokens=4096,
136
+ ))
137
+ ```
138
+
139
+ Drop-in replacement in any workflow:
140
+
141
+ ```python
142
+ from atomicguard import (
143
+ SyntaxGuard, TestGuard, CompositeGuard,
144
+ ActionPair, DualStateAgent, InMemoryArtifactDAG
145
+ )
146
+ from atomicguard.infrastructure.llm import HuggingFaceGenerator
147
+
148
+ generator = HuggingFaceGenerator()
149
+ guard = CompositeGuard([SyntaxGuard(), TestGuard("assert add(2, 3) == 5")])
150
+ action_pair = ActionPair(generator=generator, guard=guard)
151
+ agent = DualStateAgent(action_pair, InMemoryArtifactDAG(), rmax=3)
152
+
153
+ artifact = agent.execute("Write a function that adds two numbers")
154
+ print(artifact.content)
155
+ ```
156
+
96
157
  ## Benchmarks
97
158
 
98
159
  Run the simulation from the paper:
@@ -122,15 +183,16 @@ If you use this framework in your research, please cite the paper:
122
183
  > Thompson, M. (2025). Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering. arXiv preprint arXiv:2512.20660.
123
184
 
124
185
  ```bibtex
125
- @article{thompson2025managing,
186
+ @misc{thompson2025managing,
126
187
  title={Managing the Stochastic: Foundations of Learning in Neuro-Symbolic Systems for Software Engineering},
127
188
  author={Thompson, Matthew},
128
- journal={arXiv preprint arXiv:2512.20660},
129
189
  year={2025},
130
- url={[https://arxiv.org/abs/2512.20660](https://arxiv.org/abs/2512.20660)}
190
+ eprint={2512.20660},
191
+ archivePrefix={arXiv},
192
+ primaryClass={cs.LG},
193
+ url={https://arxiv.org/abs/2512.20660}
131
194
  }
132
-
133
- ```
195
+ ```
134
196
 
135
197
  ## License
136
198
 
@@ -0,0 +1,42 @@
1
+ atomicguard/__init__.py,sha256=x11KrkPPhOTMy1ZALeVGMRjpNB8x6EukvdngV9e3_hk,3207
2
+ atomicguard/application/__init__.py,sha256=ttnQx0Jd_G2EsSoc2TNq8VdeoY2k4Q0LK3iRpe0O8Ck,643
3
+ atomicguard/application/action_pair.py,sha256=e-yilKIY1TNDn1JBbMq3fM0IDP82lIwHCDuMRog8fnc,2206
4
+ atomicguard/application/agent.py,sha256=DpK-ZtXp-DhPYkziI9c0hucIw4LZRPWCMlRhOS2oCfc,5912
5
+ atomicguard/application/checkpoint_service.py,sha256=F4EoWmWWIonwc4HAre01EPk9rwTWpLL-gDp-Vvcfs6c,4580
6
+ atomicguard/application/resume_service.py,sha256=H6fvIm8b3BuyF9d8lscVIHi9En37xlmSOkURbrmWpO8,8143
7
+ atomicguard/application/workflow.py,sha256=v6jQOPx-kzFFNvA98hSqCgor8gzP2eE8_Ng--WZ91kE,24992
8
+ atomicguard/domain/__init__.py,sha256=HGPu98AL_HgN2nWdiZU_r3fJq6kD8FNfauQSSMVcocM,1193
9
+ atomicguard/domain/exceptions.py,sha256=Zj1iz31AiksyWnyKR8KfnNLRGhZdetrYhCIwFOapBtY,1376
10
+ atomicguard/domain/extraction.py,sha256=O7ZG9pXTA11kT4n8fQIbV0FA73Ui2boveIbwB5dZ0OY,7660
11
+ atomicguard/domain/interfaces.py,sha256=44JDQ7XF7sXhf3ReWJUEJS4vJXdR_qbR9lfQ_beCTuM,7054
12
+ atomicguard/domain/models.py,sha256=Bjh8VTx7SQ8K0UxjN3lUfNunNPJ-90IZjtTv77KaU-g,11652
13
+ atomicguard/domain/multiagent.py,sha256=Id1DphL_HuxodlIFeaBl_kOny7sadotlJsvYsc4xd6Y,8707
14
+ atomicguard/domain/prompts.py,sha256=QtyBgOFldW9RmfvNAwFeyP2mKE-kFjDn758KmrxMq8Q,3328
15
+ atomicguard/domain/workflow.py,sha256=bVIZmYI3lxHOKuxtX3E3DkFQBBrsU0S73AMqrpdfrIo,21883
16
+ atomicguard/guards/__init__.py,sha256=OzpOjwmyHGpTQSDVSDeGEuXUJNOyiNpZxZVnTVMIOyg,956
17
+ atomicguard/guards/composite/__init__.py,sha256=AedlMBxFw4uTFExVWL6RJRR7vRwMOK3sgjhutkXz838,217
18
+ atomicguard/guards/composite/base.py,sha256=VGydlOD4gmXdiDHvWXF5jSppXcJAoJYSCyDj7Lrbng0,1213
19
+ atomicguard/guards/dynamic/__init__.py,sha256=pjBBeD_yYavvn-I7s5I3nTDOlZtPTq1Hqmim_RgW5cE,321
20
+ atomicguard/guards/dynamic/test_runner.py,sha256=d5cPb5cCM-UnfZjTYGOHn4XoS-N66nr5EBHqPoDGwNM,7196
21
+ atomicguard/guards/interactive/__init__.py,sha256=PlP5LNB4CMZ4fJ7Xr8gjjpn8VwLnB3zs5M2Em9xyVCc,226
22
+ atomicguard/guards/interactive/human.py,sha256=G8SstAEdcMpHR2b8ugyG9drk9aBjw4_pE6E4UrTtcUo,2960
23
+ atomicguard/guards/static/__init__.py,sha256=KANeEg_yhrKGzSmA0yf2SewOE9qkUwnJly4mWkBzsm4,305
24
+ atomicguard/guards/static/imports.py,sha256=nWS3FNNqle7YWU5_tgRgYMRAXwEcWlxee8Q4MdomN5w,6292
25
+ atomicguard/guards/static/syntax.py,sha256=mPVgGDY3pzwtXuulmmuEwYAQG7rNG0fgSGB1puYRI6Y,919
26
+ atomicguard/infrastructure/__init__.py,sha256=Y3Ot6sRgTind-8s5kCnNtRYJGN3uq84QmbAUESx8Pio,581
27
+ atomicguard/infrastructure/registry.py,sha256=XilDY2sUedwm5z1Xrp39HfQ9e3paixyJxL_hwGaVbqs,3662
28
+ atomicguard/infrastructure/llm/__init__.py,sha256=wQg_FCAHmwbDXWm0bF5WX-9v7sutAS7RabhHCGiZZzo,338
29
+ atomicguard/infrastructure/llm/huggingface.py,sha256=fGC3uQu0AwM_WzfyJKTe8aNJHTA92wecSiaxo01pOHU,5130
30
+ atomicguard/infrastructure/llm/mock.py,sha256=z_NTiPwzc7zPYN4RbzkhQEldzrdhYy7Pb9ai_pFQ_gQ,2782
31
+ atomicguard/infrastructure/llm/ollama.py,sha256=RMWct-DiY5noLu0dCM3j-7sybB5QJJGL2ZL9pMjRwZ8,4243
32
+ atomicguard/infrastructure/persistence/__init__.py,sha256=KS95wmO_qVF6It63_884lWDoa-IB0WvRma_nXS2uTq4,483
33
+ atomicguard/infrastructure/persistence/checkpoint.py,sha256=MS4qpJ_jcl6Ibw7h3KncWgDqz7CEIlFJdrpFcu2vEgs,13966
34
+ atomicguard/infrastructure/persistence/filesystem.py,sha256=ZLudEu0HvhnicciGZzOG_xX85UptfcawJGg-5V1LU1Q,14988
35
+ atomicguard/infrastructure/persistence/memory.py,sha256=r-vTR9GdThLV4EGMNM_LAxO7KhXZvh11W-Jdfp6hmk0,2342
36
+ atomicguard/schemas/__init__.py,sha256=9z3gWBHsq8FHD2qE_TPy31wkTa_qXPfVgJrNdIu8p4g,3735
37
+ atomicguard-2.0.0.dist-info/licenses/LICENSE,sha256=ROMMFVruZ18U24pKTNte9AK_YIqoMfXnMzoxqBNmKS4,1073
38
+ atomicguard-2.0.0.dist-info/METADATA,sha256=dNW8Oae14_d_ujCJgSfwb87L23STUibFEH_WU8PaW58,6831
39
+ atomicguard-2.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
40
+ atomicguard-2.0.0.dist-info/entry_points.txt,sha256=MMVrkEyWFFCMoNIlpSWRY9FJYlEMGmaauvzMxEjwzTI,226
41
+ atomicguard-2.0.0.dist-info/top_level.txt,sha256=J_6ENELjnacSYJ5N3FGwomp-sVeAJQohPkJBh6pu6iY,12
42
+ atomicguard-2.0.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5