dataact 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. dataact-0.1.0/.github/workflows/ci.yml +18 -0
  2. dataact-0.1.0/.github/workflows/release-testpypi.yml +15 -0
  3. dataact-0.1.0/.github/workflows/release.yml +26 -0
  4. dataact-0.1.0/.gitignore +22 -0
  5. dataact-0.1.0/LICENSE +21 -0
  6. dataact-0.1.0/PKG-INFO +212 -0
  7. dataact-0.1.0/README.md +188 -0
  8. dataact-0.1.0/dataact/__init__.py +31 -0
  9. dataact-0.1.0/dataact/agent.py +237 -0
  10. dataact-0.1.0/dataact/cache.py +319 -0
  11. dataact-0.1.0/dataact/exceptions.py +21 -0
  12. dataact-0.1.0/dataact/format.py +108 -0
  13. dataact-0.1.0/dataact/logger.py +66 -0
  14. dataact-0.1.0/dataact/loop.py +153 -0
  15. dataact-0.1.0/dataact/observe.py +31 -0
  16. dataact-0.1.0/dataact/providers/__init__.py +0 -0
  17. dataact-0.1.0/dataact/providers/anthropic.py +112 -0
  18. dataact-0.1.0/dataact/providers/base.py +35 -0
  19. dataact-0.1.0/dataact/providers/openai.py +125 -0
  20. dataact-0.1.0/dataact/schema.py +79 -0
  21. dataact-0.1.0/dataact/serialize.py +111 -0
  22. dataact-0.1.0/dataact/testing.py +70 -0
  23. dataact-0.1.0/dataact/tools/__init__.py +0 -0
  24. dataact-0.1.0/dataact/tools/connectors.py +129 -0
  25. dataact-0.1.0/dataact/tools/interpreter.py +189 -0
  26. dataact-0.1.0/dataact/tools/planner.py +107 -0
  27. dataact-0.1.0/dataact/tools/subagent.py +222 -0
  28. dataact-0.1.0/dataact/tools/variables.py +25 -0
  29. dataact-0.1.0/dataact/types.py +54 -0
  30. dataact-0.1.0/examples/advanced_wiring.py +152 -0
  31. dataact-0.1.0/examples/data/README.md +6 -0
  32. dataact-0.1.0/examples/data/fred_unrate_2024.csv +13 -0
  33. dataact-0.1.0/examples/quickstart.py +37 -0
  34. dataact-0.1.0/pyproject.toml +62 -0
  35. dataact-0.1.0/tests/__init__.py +0 -0
  36. dataact-0.1.0/tests/smoke_tests.py +652 -0
  37. dataact-0.1.0/tests/test_agent.py +526 -0
  38. dataact-0.1.0/tests/test_cache.py +282 -0
  39. dataact-0.1.0/tests/test_docs.py +63 -0
  40. dataact-0.1.0/tests/test_format.py +100 -0
  41. dataact-0.1.0/tests/test_integration.py +304 -0
  42. dataact-0.1.0/tests/test_logger.py +134 -0
  43. dataact-0.1.0/tests/test_loop.py +362 -0
  44. dataact-0.1.0/tests/test_loop_reminders.py +204 -0
  45. dataact-0.1.0/tests/test_observe.py +33 -0
  46. dataact-0.1.0/tests/test_providers.py +201 -0
  47. dataact-0.1.0/tests/test_providers_openai.py +276 -0
  48. dataact-0.1.0/tests/test_schema.py +97 -0
  49. dataact-0.1.0/tests/test_serialize.py +105 -0
  50. dataact-0.1.0/tests/test_testing.py +70 -0
  51. dataact-0.1.0/tests/test_tool_connectors.py +106 -0
  52. dataact-0.1.0/tests/test_tool_interpreter.py +139 -0
  53. dataact-0.1.0/tests/test_tool_planner.py +94 -0
  54. dataact-0.1.0/tests/test_tool_subagent.py +461 -0
  55. dataact-0.1.0/tests/test_tool_variables.py +40 -0
  56. dataact-0.1.0/tests/test_types.py +72 -0
  57. dataact-0.1.0/uv.lock +998 -0
@@ -0,0 +1,18 @@
1
+ name: CI
2
+
3
+ on: [push, pull_request]
4
+
5
+ jobs:
6
+ test:
7
+ runs-on: ubuntu-latest
8
+ strategy:
9
+ matrix:
10
+ python: ["3.10", "3.11", "3.12"]
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+ - uses: astral-sh/setup-uv@v3
14
+ - run: uv python install ${{ matrix.python }}
15
+ - run: uv sync --python ${{ matrix.python }}
16
+ - run: uv run --python ${{ matrix.python }} ruff check dataact tests
17
+ - run: uv run --python ${{ matrix.python }} ruff format --check dataact tests
18
+ - run: uv run --python ${{ matrix.python }} pytest tests/ -m "not live"
@@ -0,0 +1,15 @@
1
+ name: Release to TestPyPI
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ publish:
8
+ runs-on: ubuntu-latest
9
+ steps:
10
+ - uses: actions/checkout@v4
11
+ - uses: astral-sh/setup-uv@v3
12
+ - run: uv build
13
+ - run: uv publish --publish-url https://test.pypi.org/legacy/
14
+ env:
15
+ UV_PUBLISH_TOKEN: ${{ secrets.TEST_PYPI_TOKEN }}
@@ -0,0 +1,26 @@
1
+ name: Release to PyPI
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - "v*.*.*"
7
+ release:
8
+ types: [published]
9
+ workflow_dispatch:
10
+
11
+ jobs:
12
+ publish:
13
+ runs-on: ubuntu-latest
14
+ environment: pypi
15
+ permissions:
16
+ id-token: write
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+ - uses: astral-sh/setup-uv@v3
20
+ - run: uv python install 3.12
21
+ - run: uv sync --python 3.12 --frozen
22
+ - run: uv run --python 3.12 ruff check dataact tests
23
+ - run: uv run --python 3.12 ruff format --check dataact tests
24
+ - run: uv run --python 3.12 pytest tests/ -m "not live"
25
+ - run: uv build --no-sources
26
+ - run: uv publish
@@ -0,0 +1,22 @@
1
+ __pycache__/
2
+ *.py[cod]
3
+ *.egg-info/
4
+ .eggs/
5
+ dist/
6
+ build/
7
+ .venv/
8
+ venv/
9
+ env/
10
+ .env
11
+ *.egg
12
+ .pytest_cache/
13
+ .mypy_cache/
14
+ .ruff_cache/
15
+ runs/
16
+ *.jsonl
17
+ .DS_Store
18
+ plan/
19
+ .claude/
20
+ AGENTS.md
21
+ CLAUDE.md
22
+ blogs/
dataact-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Max Khor
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
dataact-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,212 @@
1
+ Metadata-Version: 2.4
2
+ Name: dataact
3
+ Version: 0.1.0
4
+ Summary: Data agent with Python-native tools (no bash)
5
+ Project-URL: Homepage, https://github.com/maxkskhor/dataact
6
+ Project-URL: Repository, https://github.com/maxkskhor/dataact
7
+ Project-URL: Issues, https://github.com/maxkskhor/dataact/issues
8
+ Author: Max Khor
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Requires-Python: >=3.10
12
+ Requires-Dist: anthropic
13
+ Requires-Dist: loguru
14
+ Requires-Dist: numpy
15
+ Requires-Dist: pandas
16
+ Provides-Extra: dev
17
+ Requires-Dist: openai; extra == 'dev'
18
+ Requires-Dist: pytest; extra == 'dev'
19
+ Requires-Dist: pytest-mock; extra == 'dev'
20
+ Requires-Dist: python-dotenv; extra == 'dev'
21
+ Provides-Extra: openai
22
+ Requires-Dist: openai; extra == 'openai'
23
+ Description-Content-Type: text/markdown
24
+
25
+ # dataact
26
+
27
+ *(data + ReAct — a ReAct agent harness built for data workflows)*
28
+
29
+ A minimal, transparent, data-native agent harness for Python — built without bash.
30
+
31
+ Most agent frameworks hand the model a shell and call it a day. `dataact` takes a different approach: the model operates entirely through a sandboxed Python interpreter, with data stored in a session cache and exposed as named handles. No bash. No framework magic. Just a loop you can read in an afternoon.
32
+
33
+ Built as an installable reference implementation for engineers who want to understand how a production-style harness actually works. It is not a polished SDK surface; the convenience API exists to remove setup noise while keeping the harness boundaries visible.
34
+
35
+ The design is covered in a three-part series:
36
+
37
+ - [Designing a ReAct Harness for Data Workflows Without Bash](https://maxkskhor.substack.com/p/designing-a-react-harness-for-data)
38
+ - [How a Bash-Free Data Agent Remembers Its Work](https://maxkskhor.substack.com/p/how-a-bash-free-data-agent-remembers)
39
+ - [The Bugs Hidden Inside a Data Agent Harness](https://maxkskhor.substack.com/p/the-engineering-invariants-behind)
40
+
41
+ ---
42
+
43
+ ## Why no bash?
44
+
45
+ Giving an agent shell access is the path of least resistance, but it creates real problems in production: unpredictable side effects, security exposure, and behaviour that's hard to reproduce. `dataact` deliberately constrains the model to Python only — which turns out to be enough for most data workloads and forces cleaner tool design.
46
+
47
+ ---
48
+
49
+ ## Core design decisions
50
+
51
+ Each decision here is intentional. Understanding them is the point.
52
+
53
+ **Handle/snapshot pattern**
54
+ Large objects (DataFrames, arrays, query results) live in a `SessionCache`, not in message history. The model only sees a compact snapshot — shape, columns, a few sample rows. It accesses the data by writing Python against the handle name. This keeps context lean without hiding data from the model.
55
+
56
+ **Prefix-stable system prompt**
57
+ The system prompt never changes between turns. Reminders, state, and nags are appended to the conversation suffix. This is a KV-cache discipline: a stable prefix means the provider can cache it, which reduces latency and cost on long runs.
58
+
59
+ **Progressive connector disclosure**
60
+ Data connectors (databases, APIs, warehouses) are registered but hidden from the tool list until explicitly loaded. A shorter tool list means the model makes better routing decisions. Connectors are only visible when relevant.
61
+
62
+ **Subagent isolation**
63
+ Spawned subagents get a fresh adapter and a fresh cache. State is transferred explicitly via `input_handles`. No implicit shared state. This makes subagent behaviour reproducible and debuggable.
64
+
65
+ **Suffix-only nag reminders**
66
+ The planner escalates reminders at 4 / 8 / 12 turns without progress. These are always appended to the suffix, never inserted into the prefix, so the KV cache is never busted by reminder text.
67
+
68
+ **JSONL turn logging**
69
+ Every turn is logged to a `.jsonl` file from the start. Not bolted on later. Each line is a complete turn record including latency, token counts, and cache hit/miss. Reproducibility is a first-class concern.
70
+
71
+ ---
72
+
73
+ ## Install
74
+
75
+ ```bash
76
+ # requires Python 3.10+ and uv
77
+ uv sync
78
+ ```
79
+
80
+ ## Quick start
81
+
82
+ `Agent` needs a provider adapter. The adapter is the boundary between the
83
+ provider SDK and the harness: it turns Anthropic/OpenAI responses into
84
+ `dataact`'s normalised `Message`, `ToolUseBlock`, and token-count types. It is
85
+ explicit on purpose so the harness is not tied to one model provider, and tests
86
+ can swap in `FakeAdapter` without touching the loop.
87
+
88
+ For Anthropic:
89
+
90
+ ```python
91
+ from dataact import Agent
92
+ from dataact.providers.anthropic import AnthropicAdapter
93
+
94
+ adapter = AnthropicAdapter(model="claude-sonnet-4-6")
95
+ agent = Agent(adapter=adapter, system="You are a data analyst.")
96
+
97
+ result = agent.run("Compute the mean of [1, 2, 3, 4, 5] and print it.")
98
+ print(result)
99
+ ```
100
+
101
+ For OpenAI, install the optional extra and change only the adapter:
102
+
103
+ ```bash
104
+ pip install "dataact[openai]"
105
+ ```
106
+
107
+ ```python
108
+ from dataact.providers.openai import OpenAIAdapter
109
+
110
+ adapter = OpenAIAdapter(model="gpt-4o-mini")
111
+ ```
112
+
113
+ Run the minimal Anthropic example:
114
+
115
+ ```bash
116
+ uv run python examples/quickstart.py
117
+ ```
118
+
119
+ `examples/quickstart.py` requires `ANTHROPIC_API_KEY` when run as a script. Tests import `build_agent()` and drive it with `FakeAdapter`, so the example stays covered without token spend.
120
+
121
+ ## Connector example
122
+
123
+ Connector helpers keep the quick path small while preserving progressive disclosure. Connector tools start hidden; the model must call `load_connectors` before it can use them.
124
+
125
+ ```python
126
+ from dataact import Agent
127
+ from dataact.providers.anthropic import AnthropicAdapter
128
+
129
+ adapter = AnthropicAdapter(model="claude-sonnet-4-6")
130
+ agent = Agent(adapter=adapter, system="You are a data analyst.")
131
+
132
+ market_data = agent.connector(
133
+ "market_data",
134
+ description="Market data tools.",
135
+ )
136
+
137
+
138
+ def fetch_ohlcv(symbol: str) -> list[dict]:
139
+ return [{"symbol": symbol, "close": 101.2}]
140
+
141
+
142
+ market_data.tool(
143
+ fetch_ohlcv,
144
+ description="Fetch OHLCV data for a ticker.",
145
+ )
146
+
147
+ result = agent.run("Load market_data and inspect AAPL.")
148
+ print(result)
149
+ ```
150
+
151
+ ## What `Agent` composes
152
+
153
+ `Agent` is a thin composition layer over the lower-level primitives:
154
+
155
+ - A provider adapter translates model-provider SDK objects into the harness's normalised response types.
156
+ - `Harness` owns the ReAct loop, messages, dispatch, reminders, and JSONL logging.
157
+ - `SessionCache` stores large values as handles plus compact snapshots.
158
+ - `python_interpreter` is the controlled execution surface; there is no bash tool.
159
+ - `list_variables` exposes cache handles without dumping raw payloads.
160
+ - `ConnectorRegistry` keeps connector tools hidden until loaded.
161
+ - `Planner` reminders and subagents are opt-in helpers, not a second runtime.
162
+
163
+ For the explicit wiring, read [examples/advanced_wiring.py](examples/advanced_wiring.py). It deliberately shows the moving parts that `Agent` composes.
164
+
165
+ Run the advanced example - it loads a checked-in FRED unemployment-rate sample, runs analysis, uses subagents and the planner (requires `ANTHROPIC_API_KEY`):
166
+
167
+ ```bash
168
+ uv run python examples/advanced_wiring.py
169
+ ```
170
+
171
+ Run tests:
172
+
173
+ ```bash
174
+ uv run pytest tests/ -v
175
+ uv run pytest tests/ -m live -v # requires provider API keys
176
+ ```
177
+
178
+ ---
179
+
180
+ ## Project structure
181
+
182
+ ```
183
+ dataact/
184
+ loop.py # Harness: the core ReAct loop
185
+ cache.py # SessionCache: handle/snapshot storage
186
+ providers/ # Normalised adapter interface (Anthropic and OpenAI)
187
+ tools/
188
+ interpreter.py # Sandboxed Python executor
189
+ connectors.py # Progressive connector registry
190
+ planner.py # Plan/nag tool
191
+ subagent.py # Isolated subagent spawning
192
+ variables.py # list_variables tool
193
+ types.py # Shared types: Message, ToolSpec, ContentBlock
194
+ logger.py # JSONL turn logging
195
+ observe.py # Latency measurement
196
+ examples/
197
+ quickstart.py # Minimal Agent path
198
+ advanced_wiring.py # Explicit Harness wiring
199
+ data/ # Small public sample data for the advanced demo
200
+ ```
201
+
202
+ ---
203
+
204
+ ## Sandbox disclaimer
205
+
206
+ The Python interpreter uses AST checks and restricted globals to reduce accidental misuse. It is not a container sandbox and should not be treated as safe for untrusted input.
207
+
208
+ ---
209
+
210
+ ## License
211
+
212
+ MIT
@@ -0,0 +1,188 @@
1
+ # dataact
2
+
3
+ *(data + ReAct — a ReAct agent harness built for data workflows)*
4
+
5
+ A minimal, transparent, data-native agent harness for Python — built without bash.
6
+
7
+ Most agent frameworks hand the model a shell and call it a day. `dataact` takes a different approach: the model operates entirely through a sandboxed Python interpreter, with data stored in a session cache and exposed as named handles. No bash. No framework magic. Just a loop you can read in an afternoon.
8
+
9
+ Built as an installable reference implementation for engineers who want to understand how a production-style harness actually works. It is not a polished SDK surface; the convenience API exists to remove setup noise while keeping the harness boundaries visible.
10
+
11
+ The design is covered in a three-part series:
12
+
13
+ - [Designing a ReAct Harness for Data Workflows Without Bash](https://maxkskhor.substack.com/p/designing-a-react-harness-for-data)
14
+ - [How a Bash-Free Data Agent Remembers Its Work](https://maxkskhor.substack.com/p/how-a-bash-free-data-agent-remembers)
15
+ - [The Bugs Hidden Inside a Data Agent Harness](https://maxkskhor.substack.com/p/the-engineering-invariants-behind)
16
+
17
+ ---
18
+
19
+ ## Why no bash?
20
+
21
+ Giving an agent shell access is the path of least resistance, but it creates real problems in production: unpredictable side effects, security exposure, and behaviour that's hard to reproduce. `dataact` deliberately constrains the model to Python only — which turns out to be enough for most data workloads and forces cleaner tool design.
22
+
23
+ ---
24
+
25
+ ## Core design decisions
26
+
27
+ Each decision here is intentional. Understanding them is the point.
28
+
29
+ **Handle/snapshot pattern**
30
+ Large objects (DataFrames, arrays, query results) live in a `SessionCache`, not in message history. The model only sees a compact snapshot — shape, columns, a few sample rows. It accesses the data by writing Python against the handle name. This keeps context lean without hiding data from the model.
31
+
32
+ **Prefix-stable system prompt**
33
+ The system prompt never changes between turns. Reminders, state, and nags are appended to the conversation suffix. This is a KV-cache discipline: a stable prefix means the provider can cache it, which reduces latency and cost on long runs.
34
+
35
+ **Progressive connector disclosure**
36
+ Data connectors (databases, APIs, warehouses) are registered but hidden from the tool list until explicitly loaded. A shorter tool list means the model makes better routing decisions. Connectors are only visible when relevant.
37
+
38
+ **Subagent isolation**
39
+ Spawned subagents get a fresh adapter and a fresh cache. State is transferred explicitly via `input_handles`. No implicit shared state. This makes subagent behaviour reproducible and debuggable.
40
+
41
+ **Suffix-only nag reminders**
42
+ The planner escalates reminders at 4 / 8 / 12 turns without progress. These are always appended to the suffix, never inserted into the prefix, so the KV cache is never busted by reminder text.
43
+
44
+ **JSONL turn logging**
45
+ Every turn is logged to a `.jsonl` file from the start. Not bolted on later. Each line is a complete turn record including latency, token counts, and cache hit/miss. Reproducibility is a first-class concern.
46
+
47
+ ---
48
+
49
+ ## Install
50
+
51
+ ```bash
52
+ # requires Python 3.10+ and uv
53
+ uv sync
54
+ ```
55
+
56
+ ## Quick start
57
+
58
+ `Agent` needs a provider adapter. The adapter is the boundary between the
59
+ provider SDK and the harness: it turns Anthropic/OpenAI responses into
60
+ `dataact`'s normalised `Message`, `ToolUseBlock`, and token-count types. It is
61
+ explicit on purpose so the harness is not tied to one model provider, and tests
62
+ can swap in `FakeAdapter` without touching the loop.
63
+
64
+ For Anthropic:
65
+
66
+ ```python
67
+ from dataact import Agent
68
+ from dataact.providers.anthropic import AnthropicAdapter
69
+
70
+ adapter = AnthropicAdapter(model="claude-sonnet-4-6")
71
+ agent = Agent(adapter=adapter, system="You are a data analyst.")
72
+
73
+ result = agent.run("Compute the mean of [1, 2, 3, 4, 5] and print it.")
74
+ print(result)
75
+ ```
76
+
77
+ For OpenAI, install the optional extra and change only the adapter:
78
+
79
+ ```bash
80
+ pip install "dataact[openai]"
81
+ ```
82
+
83
+ ```python
84
+ from dataact.providers.openai import OpenAIAdapter
85
+
86
+ adapter = OpenAIAdapter(model="gpt-4o-mini")
87
+ ```
88
+
89
+ Run the minimal Anthropic example:
90
+
91
+ ```bash
92
+ uv run python examples/quickstart.py
93
+ ```
94
+
95
+ `examples/quickstart.py` requires `ANTHROPIC_API_KEY` when run as a script. Tests import `build_agent()` and drive it with `FakeAdapter`, so the example stays covered without token spend.
96
+
97
+ ## Connector example
98
+
99
+ Connector helpers keep the quick path small while preserving progressive disclosure. Connector tools start hidden; the model must call `load_connectors` before it can use them.
100
+
101
+ ```python
102
+ from dataact import Agent
103
+ from dataact.providers.anthropic import AnthropicAdapter
104
+
105
+ adapter = AnthropicAdapter(model="claude-sonnet-4-6")
106
+ agent = Agent(adapter=adapter, system="You are a data analyst.")
107
+
108
+ market_data = agent.connector(
109
+ "market_data",
110
+ description="Market data tools.",
111
+ )
112
+
113
+
114
+ def fetch_ohlcv(symbol: str) -> list[dict]:
115
+ return [{"symbol": symbol, "close": 101.2}]
116
+
117
+
118
+ market_data.tool(
119
+ fetch_ohlcv,
120
+ description="Fetch OHLCV data for a ticker.",
121
+ )
122
+
123
+ result = agent.run("Load market_data and inspect AAPL.")
124
+ print(result)
125
+ ```
126
+
127
+ ## What `Agent` composes
128
+
129
+ `Agent` is a thin composition layer over the lower-level primitives:
130
+
131
+ - A provider adapter translates model-provider SDK objects into the harness's normalised response types.
132
+ - `Harness` owns the ReAct loop, messages, dispatch, reminders, and JSONL logging.
133
+ - `SessionCache` stores large values as handles plus compact snapshots.
134
+ - `python_interpreter` is the controlled execution surface; there is no bash tool.
135
+ - `list_variables` exposes cache handles without dumping raw payloads.
136
+ - `ConnectorRegistry` keeps connector tools hidden until loaded.
137
+ - `Planner` reminders and subagents are opt-in helpers, not a second runtime.
138
+
139
+ For the explicit wiring, read [examples/advanced_wiring.py](examples/advanced_wiring.py). It deliberately shows the moving parts that `Agent` composes.
140
+
141
+ Run the advanced example - it loads a checked-in FRED unemployment-rate sample, runs analysis, uses subagents and the planner (requires `ANTHROPIC_API_KEY`):
142
+
143
+ ```bash
144
+ uv run python examples/advanced_wiring.py
145
+ ```
146
+
147
+ Run tests:
148
+
149
+ ```bash
150
+ uv run pytest tests/ -v
151
+ uv run pytest tests/ -m live -v # requires provider API keys
152
+ ```
153
+
154
+ ---
155
+
156
+ ## Project structure
157
+
158
+ ```
159
+ dataact/
160
+ loop.py # Harness: the core ReAct loop
161
+ cache.py # SessionCache: handle/snapshot storage
162
+ providers/ # Normalised adapter interface (Anthropic and OpenAI)
163
+ tools/
164
+ interpreter.py # Sandboxed Python executor
165
+ connectors.py # Progressive connector registry
166
+ planner.py # Plan/nag tool
167
+ subagent.py # Isolated subagent spawning
168
+ variables.py # list_variables tool
169
+ types.py # Shared types: Message, ToolSpec, ContentBlock
170
+ logger.py # JSONL turn logging
171
+ observe.py # Latency measurement
172
+ examples/
173
+ quickstart.py # Minimal Agent path
174
+ advanced_wiring.py # Explicit Harness wiring
175
+ data/ # Small public sample data for the advanced demo
176
+ ```
177
+
178
+ ---
179
+
180
+ ## Sandbox disclaimer
181
+
182
+ The Python interpreter uses AST checks and restricted globals to reduce accidental misuse. It is not a container sandbox and should not be treated as safe for untrusted input.
183
+
184
+ ---
185
+
186
+ ## License
187
+
188
+ MIT
@@ -0,0 +1,31 @@
1
+ from dataact.agent import Agent
2
+ from dataact.exceptions import (
3
+ MaxTurnsExceeded,
4
+ SubagentRecursionError,
5
+ ToolNotFoundError,
6
+ )
7
+ from dataact.providers.base import NormalizedResponse, ProviderAdapter, StopReason
8
+ from dataact.types import (
9
+ ContentBlock,
10
+ Message,
11
+ TextBlock,
12
+ ToolResultBlock,
13
+ ToolSpec,
14
+ ToolUseBlock,
15
+ )
16
+
17
+ __all__ = [
18
+ "Agent",
19
+ "ContentBlock",
20
+ "MaxTurnsExceeded",
21
+ "Message",
22
+ "NormalizedResponse",
23
+ "ProviderAdapter",
24
+ "StopReason",
25
+ "SubagentRecursionError",
26
+ "TextBlock",
27
+ "ToolNotFoundError",
28
+ "ToolResultBlock",
29
+ "ToolSpec",
30
+ "ToolUseBlock",
31
+ ]