datascience-agent 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. datascience_agent-0.3.0/.env.example +69 -0
  2. datascience_agent-0.3.0/.github/workflows/python-publish.yml +70 -0
  3. datascience_agent-0.3.0/.gitignore +63 -0
  4. datascience_agent-0.3.0/CONTRIBUTING.md +198 -0
  5. datascience_agent-0.3.0/PKG-INFO +279 -0
  6. datascience_agent-0.3.0/README.md +233 -0
  7. datascience_agent-0.3.0/examples/basic_usage.py +72 -0
  8. datascience_agent-0.3.0/examples/fastapi_example.py +164 -0
  9. datascience_agent-0.3.0/examples/streaming_example.py +101 -0
  10. datascience_agent-0.3.0/pyproject.toml +106 -0
  11. datascience_agent-0.3.0/src/dsagent/__init__.py +74 -0
  12. datascience_agent-0.3.0/src/dsagent/agents/__init__.py +7 -0
  13. datascience_agent-0.3.0/src/dsagent/agents/base.py +366 -0
  14. datascience_agent-0.3.0/src/dsagent/cli.py +187 -0
  15. datascience_agent-0.3.0/src/dsagent/core/__init__.py +11 -0
  16. datascience_agent-0.3.0/src/dsagent/core/context.py +136 -0
  17. datascience_agent-0.3.0/src/dsagent/core/engine.py +588 -0
  18. datascience_agent-0.3.0/src/dsagent/core/executor.py +231 -0
  19. datascience_agent-0.3.0/src/dsagent/core/planner.py +180 -0
  20. datascience_agent-0.3.0/src/dsagent/schema/__init__.py +21 -0
  21. datascience_agent-0.3.0/src/dsagent/schema/models.py +268 -0
  22. datascience_agent-0.3.0/src/dsagent/utils/__init__.py +10 -0
  23. datascience_agent-0.3.0/src/dsagent/utils/logger.py +245 -0
  24. datascience_agent-0.3.0/src/dsagent/utils/notebook.py +483 -0
  25. datascience_agent-0.3.0/src/dsagent/utils/run_logger.py +352 -0
  26. datascience_agent-0.3.0/tests/__init__.py +1 -0
  27. datascience_agent-0.3.0/tests/test_context.py +117 -0
  28. datascience_agent-0.3.0/tests/test_notebook.py +162 -0
  29. datascience_agent-0.3.0/tests/test_run_logger.py +293 -0
  30. datascience_agent-0.3.0/uv.lock +4673 -0
@@ -0,0 +1,69 @@
1
+ # =============================================================================
2
+ # DSAgent - Configuration
3
+ # =============================================================================
4
+ # Copy this file to .env and fill in your values
5
+ # Priority: CLI args > Environment variables > .env file > defaults
6
+
7
+ # =============================================================================
8
+ # MODEL CONFIGURATION (choose one provider)
9
+ # =============================================================================
10
+
11
+ # Default model to use (overridden by --model CLI flag)
12
+ LLM_MODEL=gpt-4o
13
+
14
+ # Examples:
15
+ # LLM_MODEL=gpt-4o # OpenAI GPT-4o
16
+ # LLM_MODEL=gpt-4o-mini # OpenAI GPT-4o Mini (cheaper)
17
+ # LLM_MODEL=claude-3-5-sonnet-20241022 # Anthropic Claude 3.5 Sonnet
18
+ # LLM_MODEL=claude-3-opus-20240229 # Anthropic Claude 3 Opus
19
+ # LLM_MODEL=gemini/gemini-1.5-pro # Google Gemini 1.5 Pro
20
+ # LLM_MODEL=ollama/llama3 # Ollama local model
21
+ # LLM_MODEL=ollama/codellama # Ollama CodeLlama
22
+
23
+ # =============================================================================
24
+ # API KEYS (set the one for your chosen provider)
25
+ # =============================================================================
26
+
27
+ # OpenAI
28
+ OPENAI_API_KEY=sk-your-openai-api-key-here
29
+
30
+ # Anthropic
31
+ ANTHROPIC_API_KEY=sk-ant-your-anthropic-api-key-here
32
+
33
+ # Google
34
+ GOOGLE_API_KEY=your-google-api-key-here
35
+
36
+ # =============================================================================
37
+ # CUSTOM API BASE (for local models or proxies)
38
+ # =============================================================================
39
+
40
+ # LM Studio (local)
41
+ # LLM_API_BASE=http://localhost:1234/v1
42
+ # LLM_MODEL=openai/your-model-name
43
+
44
+ # Ollama (if not using default port)
45
+ # OLLAMA_API_BASE=http://localhost:11434
46
+
47
+ # Azure OpenAI
48
+ # AZURE_API_KEY=your-azure-api-key
49
+ # AZURE_API_BASE=https://your-resource.openai.azure.com/
50
+ # AZURE_API_VERSION=2024-02-15-preview
51
+
52
+ # =============================================================================
53
+ # AGENT SETTINGS (optional)
54
+ # =============================================================================
55
+
56
+ # Maximum rounds before stopping (default: 30)
57
+ # DSAGENT_MAX_ROUNDS=30
58
+
59
+ # LLM temperature (default: 0.3)
60
+ # DSAGENT_TEMPERATURE=0.3
61
+
62
+ # Max tokens per response (default: 4096)
63
+ # DSAGENT_MAX_TOKENS=4096
64
+
65
+ # Code execution timeout in seconds (default: 300)
66
+ # DSAGENT_CODE_TIMEOUT=300
67
+
68
+ # Default workspace directory (default: ./workspace)
69
+ # DSAGENT_WORKSPACE=./workspace
@@ -0,0 +1,70 @@
1
+ # This workflow will upload a Python Package to PyPI when a release is created
2
+ # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ permissions:
16
+ contents: read
17
+
18
+ jobs:
19
+ release-build:
20
+ runs-on: ubuntu-latest
21
+
22
+ steps:
23
+ - uses: actions/checkout@v4
24
+
25
+ - uses: actions/setup-python@v5
26
+ with:
27
+ python-version: "3.x"
28
+
29
+ - name: Build release distributions
30
+ run: |
31
+ # NOTE: put your own distribution build steps here.
32
+ python -m pip install build
33
+ python -m build
34
+
35
+ - name: Upload distributions
36
+ uses: actions/upload-artifact@v4
37
+ with:
38
+ name: release-dists
39
+ path: dist/
40
+
41
+ pypi-publish:
42
+ runs-on: ubuntu-latest
43
+ needs:
44
+ - release-build
45
+ permissions:
46
+ # IMPORTANT: this permission is mandatory for trusted publishing
47
+ id-token: write
48
+
49
+ # Dedicated environments with protections for publishing are strongly recommended.
50
+ # For more information, see: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules
51
+ environment:
52
+ name: pypi
53
+ # OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
54
+ url: https://pypi.org/project/aiuda-planner-agent/
55
+ #
56
+ # ALTERNATIVE: if your GitHub Release name is the PyPI project version string
57
+ # ALTERNATIVE: exactly, uncomment the following line instead:
58
+ # url: https://pypi.org/project/YOURPROJECT/${{ github.event.release.name }}
59
+
60
+ steps:
61
+ - name: Retrieve release distributions
62
+ uses: actions/download-artifact@v4
63
+ with:
64
+ name: release-dists
65
+ path: dist/
66
+
67
+ - name: Publish release distributions to PyPI
68
+ uses: pypa/gh-action-pypi-publish@release/v1
69
+ with:
70
+ packages-dir: dist/
@@ -0,0 +1,63 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual environments
24
+ .venv/
25
+ venv/
26
+ ENV/
27
+ env/
28
+
29
+ # IDE
30
+ .idea/
31
+ .vscode/
32
+ *.swp
33
+ *.swo
34
+ *~
35
+
36
+ # Testing
37
+ .tox/
38
+ .nox/
39
+ .coverage
40
+ .coverage.*
41
+ htmlcov/
42
+ .pytest_cache/
43
+ .mypy_cache/
44
+
45
+ # Jupyter
46
+ .ipynb_checkpoints/
47
+
48
+ # Environment variables
49
+ .env
50
+ .env.local
51
+
52
+ # OS
53
+ .DS_Store
54
+ Thumbs.db
55
+
56
+ # Project specific
57
+ workspace/
58
+ workspaces/
59
+ *.ipynb
60
+ !examples/*.ipynb
61
+
62
+ # uv
63
+ .python-version
@@ -0,0 +1,198 @@
1
+ # Contributing to Aiuda Planner Agent
2
+
3
+ ## Git Flow
4
+
5
+ This project follows Git Flow branching strategy.
6
+
7
+ ### Branch Structure
8
+
9
+ ```
10
+ main ← Production (stable releases only)
11
+
12
+ develop ← Integration (development changes)
13
+
14
+ ├── feature/* ← New features
15
+ ├── bugfix/* ← Bug fixes
16
+ ├── release/* ← Release preparation
17
+ └── hotfix/* ← Urgent production fixes
18
+ ```
19
+
20
+ ### Branch Naming Convention
21
+
22
+ | Type | Pattern | Example |
23
+ |------|---------|---------|
24
+ | Feature | `feature/short-description` | `feature/add-streaming-api` |
25
+ | Bug fix | `bugfix/issue-or-description` | `bugfix/fix-notebook-export` |
26
+ | Release | `release/vX.Y.Z` | `release/v0.2.0` |
27
+ | Hotfix | `hotfix/description` | `hotfix/critical-memory-leak` |
28
+
29
+ ## Development Workflow
30
+
31
+ ### 1. New Feature
32
+
33
+ ```bash
34
+ # Start from develop
35
+ git checkout develop
36
+ git pull origin develop
37
+
38
+ # Create feature branch
39
+ git checkout -b feature/my-new-feature
40
+
41
+ # Work on your feature...
42
+ # Make commits with clear messages
43
+
44
+ # Push and create PR
45
+ git push -u origin feature/my-new-feature
46
+ ```
47
+
48
+ Then create a Pull Request to `develop` on GitHub.
49
+
50
+ ### 2. Bug Fix
51
+
52
+ ```bash
53
+ # Start from develop
54
+ git checkout develop
55
+ git pull origin develop
56
+
57
+ # Create bugfix branch
58
+ git checkout -b bugfix/fix-issue-123
59
+
60
+ # Fix the bug...
61
+
62
+ # Push and create PR
63
+ git push -u origin bugfix/fix-issue-123
64
+ ```
65
+
66
+ Then create a Pull Request to `develop` on GitHub.
67
+
68
+ ### 3. Release
69
+
70
+ When `develop` is ready for a new release:
71
+
72
+ ```bash
73
+ # Start from develop
74
+ git checkout develop
75
+ git pull origin develop
76
+
77
+ # Create release branch
78
+ git checkout -b release/v0.2.0
79
+
80
+ # Update version in pyproject.toml
81
+ # Update CHANGELOG if exists
82
+ # Final testing
83
+
84
+ # Push
85
+ git push -u origin release/v0.2.0
86
+ ```
87
+
88
+ Then:
89
+ 1. Create PR to `main`
90
+ 2. After merge, tag the release: `git tag v0.2.0`
91
+ 3. Merge back to `develop`
92
+
93
+ ### 4. Hotfix (Urgent Production Fix)
94
+
95
+ ```bash
96
+ # Start from main
97
+ git checkout main
98
+ git pull origin main
99
+
100
+ # Create hotfix branch
101
+ git checkout -b hotfix/critical-fix
102
+
103
+ # Fix the issue...
104
+
105
+ # Push
106
+ git push -u origin hotfix/critical-fix
107
+ ```
108
+
109
+ Then:
110
+ 1. Create PR to `main`
111
+ 2. After merge, also merge to `develop`
112
+
113
+ ## Commit Messages
114
+
115
+ Use clear, descriptive commit messages:
116
+
117
+ ```
118
+ type: short description
119
+
120
+ Longer description if needed.
121
+
122
+ - Bullet points for multiple changes
123
+ - Another change
124
+ ```
125
+
126
+ **Types:**
127
+ - `feat:` New feature
128
+ - `fix:` Bug fix
129
+ - `docs:` Documentation only
130
+ - `refactor:` Code refactoring
131
+ - `test:` Adding tests
132
+ - `chore:` Maintenance tasks
133
+
134
+ **Examples:**
135
+ ```
136
+ feat: add streaming API endpoint
137
+
138
+ fix: resolve notebook cell ordering issue
139
+
140
+ docs: update README with CLI examples
141
+
142
+ refactor: simplify plan parsing logic
143
+ ```
144
+
145
+ ## Development Setup
146
+
147
+ ```bash
148
+ # Clone repository
149
+ git clone https://github.com/nmlemus/aiuda-planner-agent.git
150
+ cd aiuda-planner-agent
151
+
152
+ # Create environment with uv
153
+ uv venv --python 3.11
154
+ source .venv/bin/activate
155
+
156
+ # Install with dev dependencies
157
+ uv sync --all-extras
158
+
159
+ # Run tests
160
+ uv run pytest
161
+
162
+ # Run linting
163
+ uv run ruff check .
164
+ ```
165
+
166
+ ## Code Style
167
+
168
+ - Use `ruff` for linting
169
+ - Use `mypy` for type checking
170
+ - Follow PEP 8 guidelines
171
+ - Add type hints to all functions
172
+ - Write docstrings for public APIs
173
+
174
+ ```bash
175
+ # Check code style
176
+ uv run ruff check .
177
+
178
+ # Auto-fix issues
179
+ uv run ruff check --fix .
180
+
181
+ # Type checking
182
+ uv run mypy src/
183
+ ```
184
+
185
+ ## Pull Request Checklist
186
+
187
+ Before submitting a PR:
188
+
189
+ - [ ] Code follows project style guidelines
190
+ - [ ] Tests pass locally
191
+ - [ ] New features have tests
192
+ - [ ] Documentation updated if needed
193
+ - [ ] Commit messages are clear
194
+ - [ ] PR description explains the changes
195
+
196
+ ## Questions?
197
+
198
+ Open an issue on GitHub for any questions or discussions.
@@ -0,0 +1,279 @@
1
+ Metadata-Version: 2.4
2
+ Name: datascience-agent
3
+ Version: 0.3.0
4
+ Summary: AI Agent with dynamic planning and persistent Jupyter kernel execution for data analysis
5
+ Project-URL: Homepage, https://github.com/nmlemus/dsagent
6
+ Project-URL: Documentation, https://github.com/nmlemus/dsagent#readme
7
+ Project-URL: Repository, https://github.com/nmlemus/dsagent
8
+ Author: DSAgent Contributors
9
+ License-Expression: MIT
10
+ Keywords: agent,ai,autonomous-agent,data-analysis,datascience-agent,dsagent,jupyter,llm,machine-learning,planner
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
21
+ Requires-Python: >=3.10
22
+ Requires-Dist: ipykernel>=6.0.0
23
+ Requires-Dist: jupyter-client>=8.0.0
24
+ Requires-Dist: litellm>=1.0.0
25
+ Requires-Dist: matplotlib>=3.7.0
26
+ Requires-Dist: numpy>=1.24.0
27
+ Requires-Dist: openpyxl>=3.1.0
28
+ Requires-Dist: pandas>=2.0.0
29
+ Requires-Dist: pycaret>=3.0.0
30
+ Requires-Dist: pydantic-settings>=2.0.0
31
+ Requires-Dist: pydantic>=2.0.0
32
+ Requires-Dist: python-dotenv>=1.0.0
33
+ Requires-Dist: scikit-learn>=1.3.0
34
+ Requires-Dist: seaborn>=0.12.0
35
+ Requires-Dist: statsmodels>=0.14.0
36
+ Provides-Extra: api
37
+ Requires-Dist: fastapi>=0.100.0; extra == 'api'
38
+ Requires-Dist: sse-starlette>=1.0.0; extra == 'api'
39
+ Requires-Dist: uvicorn>=0.20.0; extra == 'api'
40
+ Provides-Extra: dev
41
+ Requires-Dist: mypy>=1.0.0; extra == 'dev'
42
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
43
+ Requires-Dist: pytest>=7.0.0; extra == 'dev'
44
+ Requires-Dist: ruff>=0.1.0; extra == 'dev'
45
+ Description-Content-Type: text/markdown
46
+
47
+ # DSAgent
48
+
49
+ An AI-powered autonomous agent for data analysis with dynamic planning and persistent Jupyter kernel execution.
50
+
51
+ ## Features
52
+
53
+ - **Dynamic Planning**: Agent creates and follows plans with [x]/[ ] step tracking
54
+ - **Persistent Execution**: Code runs in a Jupyter kernel with variable persistence
55
+ - **Multi-Provider LLM**: Supports OpenAI, Anthropic, Google, Ollama via LiteLLM
56
+ - **Notebook Generation**: Automatically generates clean, runnable Jupyter notebooks
57
+ - **Event Streaming**: Real-time events for UI integration
58
+ - **Comprehensive Logging**: Full execution logs for debugging and ML retraining
59
+ - **Session Management**: State persistence for multi-user scenarios
60
+
61
+ ## Installation
62
+
63
+ Using pip:
64
+ ```bash
65
+ pip install datascience-agent
66
+ ```
67
+
68
+ With FastAPI support:
69
+ ```bash
70
+ pip install "datascience-agent[api]"
71
+ ```
72
+
73
+ Using uv (recommended):
74
+ ```bash
75
+ uv pip install datascience-agent
76
+ uv pip install "datascience-agent[api]" # with FastAPI
77
+ ```
78
+
79
+ For development:
80
+ ```bash
81
+ git clone https://github.com/nmlemus/dsagent
82
+ cd dsagent
83
+ uv sync --all-extras
84
+ ```
85
+
86
+ ## Quick Start
87
+
88
+ ### Basic Usage
89
+
90
+ ```python
91
+ from dsagent import PlannerAgent
92
+
93
+ # Create agent
94
+ with PlannerAgent(model="gpt-4o", workspace="./workspace") as agent:
95
+ result = agent.run("Analyze sales_data.csv and identify top performing products")
96
+
97
+ print(result.answer)
98
+ print(f"Notebook: {result.notebook_path}")
99
+ ```
100
+
101
+ ### With Streaming
102
+
103
+ ```python
104
+ from dsagent import PlannerAgent, EventType
105
+
106
+ agent = PlannerAgent(model="claude-3-sonnet-20240229")
107
+ agent.start()
108
+
109
+ for event in agent.run_stream("Build a predictive model for customer churn"):
110
+ if event.type == EventType.PLAN_UPDATED:
111
+ print(f"Plan: {event.plan.raw_text if event.plan else ''}")
112
+ elif event.type == EventType.CODE_SUCCESS:
113
+ print("Code executed successfully")
114
+ elif event.type == EventType.CODE_FAILED:
115
+ print("Code execution failed")
116
+ elif event.type == EventType.ANSWER_ACCEPTED:
117
+ print(f"Answer: {event.message}")
118
+
119
+ # Get result with notebook after streaming
120
+ result = agent.get_result()
121
+ print(f"Notebook: {result.notebook_path}")
122
+
123
+ agent.shutdown()
124
+ ```
125
+
126
+ ### FastAPI Integration
127
+
128
+ ```python
129
+ from fastapi import FastAPI
130
+ from fastapi.responses import StreamingResponse
131
+ from uuid import uuid4
132
+ from dsagent import PlannerAgent, EventType
133
+
134
+ app = FastAPI()
135
+
136
+ @app.post("/analyze")
137
+ async def analyze(task: str):
138
+ async def event_stream():
139
+ agent = PlannerAgent(
140
+ model="gpt-4o",
141
+ session_id=str(uuid4()),
142
+ )
143
+ agent.start()
144
+
145
+ try:
146
+ for event in agent.run_stream(task):
147
+ yield f"data: {event.to_sse()}\n\n"
148
+ finally:
149
+ agent.shutdown()
150
+
151
+ return StreamingResponse(event_stream(), media_type="text/event-stream")
152
+ ```
153
+
154
+ ## Command Line Interface
155
+
156
+ The package includes a CLI for quick analysis from the terminal:
157
+
158
+ ```bash
159
+ dsagent "Analyze this dataset and create visualizations" --data ./my_data.csv
160
+ ```
161
+
162
+ ### CLI Options
163
+
164
+ | Option | Short | Description |
165
+ |--------|-------|-------------|
166
+ | `--data` | `-d` | Path to data file or directory (required) |
167
+ | `--model` | `-m` | LLM model to use (default: gpt-4o) |
168
+ | `--workspace` | `-w` | Output directory (default: ./workspace) |
169
+ | `--run-id` | | Custom run ID for this execution |
170
+ | `--max-rounds` | `-r` | Max iterations (default: 30) |
171
+ | `--quiet` | `-q` | Suppress verbose output |
172
+ | `--no-stream` | | Disable streaming output |
173
+
174
+ ### CLI Examples
175
+
176
+ ```bash
177
+ # Basic analysis
178
+ dsagent "Find trends and patterns" -d ./sales.csv
179
+
180
+ # With specific model
181
+ dsagent "Build ML model" -d ./dataset -m claude-3-sonnet-20240229
182
+
183
+ # Custom output directory
184
+ dsagent "Create charts" -d ./data -w ./output
185
+
186
+ # With custom run ID
187
+ dsagent "Analyze" -d ./data --run-id my-analysis-001
188
+
189
+ # Quiet mode
190
+ dsagent "Analyze" -d ./data -q
191
+ ```
192
+
193
+ ### Output Structure
194
+
195
+ Each run creates an isolated workspace:
196
+ ```
197
+ workspace/
198
+ └── runs/
199
+ └── {run_id}/
200
+ ├── data/ # Input data (copied)
201
+ ├── notebooks/ # Generated notebooks
202
+ ├── artifacts/ # Images, charts, outputs
203
+ └── logs/
204
+ ├── run.log # Human-readable log
205
+ └── events.jsonl # Structured events for ML
206
+ ```
207
+
208
+ ## Configuration
209
+
210
+ ```python
211
+ from dsagent import PlannerAgent, RunContext
212
+
213
+ # With automatic run isolation
214
+ context = RunContext(workspace="./workspace")
215
+ agent = PlannerAgent(
216
+ model="gpt-4o", # Any LiteLLM-supported model
217
+ context=context, # Run context for isolation
218
+ max_rounds=30, # Max agent iterations
219
+ max_tokens=4096, # Max tokens per response
220
+ temperature=0.2, # LLM temperature
221
+ timeout=300, # Code execution timeout (seconds)
222
+ verbose=True, # Print to console
223
+ event_callback=None, # Callback for events
224
+ )
225
+ ```
226
+
227
+ ## Supported Models
228
+
229
+ Any model supported by [LiteLLM](https://docs.litellm.ai/docs/providers):
230
+
231
+ - OpenAI: `gpt-4o`, `gpt-4-turbo`, `gpt-3.5-turbo`
232
+ - Anthropic: `claude-3-opus-20240229`, `claude-3-sonnet-20240229`
233
+ - Google: `gemini-pro`, `gemini-1.5-pro`
234
+ - Ollama: `ollama/llama3`, `ollama/codellama`
235
+ - And many more...
236
+
237
+ ## Event Types
238
+
239
+ ```python
240
+ from dsagent import EventType
241
+
242
+ EventType.AGENT_STARTED # Agent started processing
243
+ EventType.AGENT_FINISHED # Agent finished
244
+ EventType.AGENT_ERROR # Error occurred
245
+ EventType.ROUND_STARTED # New iteration round
246
+ EventType.ROUND_FINISHED # Round completed
247
+ EventType.LLM_CALL_STARTED # LLM call started
248
+ EventType.LLM_CALL_FINISHED # LLM response received
249
+ EventType.PLAN_CREATED # Plan was created
250
+ EventType.PLAN_UPDATED # Plan was updated
251
+ EventType.CODE_EXECUTING # Code execution started
252
+ EventType.CODE_SUCCESS # Code execution succeeded
253
+ EventType.CODE_FAILED # Code execution failed
254
+ EventType.ANSWER_ACCEPTED # Final answer generated
255
+ EventType.ANSWER_REJECTED # Answer rejected (plan incomplete)
256
+ ```
257
+
258
+ ## Architecture
259
+
260
+ ```
261
+ dsagent/
262
+ ├── agents/
263
+ │ └── base.py # PlannerAgent - main user interface
264
+ ├── core/
265
+ │ ├── context.py # RunContext - workspace management
266
+ │ ├── engine.py # AgentEngine - main loop
267
+ │ ├── executor.py # JupyterExecutor - code execution
268
+ │ └── planner.py # PlanParser - response parsing
269
+ ├── schema/
270
+ │ └── models.py # Pydantic models
271
+ └── utils/
272
+ ├── logger.py # AgentLogger - console logging
273
+ ├── run_logger.py # RunLogger - comprehensive logging
274
+ └── notebook.py # NotebookBuilder - notebook generation
275
+ ```
276
+
277
+ ## License
278
+
279
+ MIT