efficient-agent-protocol 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- efficient_agent_protocol-0.1.0/LICENSE +21 -0
- efficient_agent_protocol-0.1.0/PKG-INFO +155 -0
- efficient_agent_protocol-0.1.0/README.md +135 -0
- efficient_agent_protocol-0.1.0/agent/__init__.py +20 -0
- efficient_agent_protocol-0.1.0/agent/agent_client.py +131 -0
- efficient_agent_protocol-0.1.0/agent/compiler.py +123 -0
- efficient_agent_protocol-0.1.0/agent/providers/__init__.py +16 -0
- efficient_agent_protocol-0.1.0/agent/providers/anthropic_provider.py +73 -0
- efficient_agent_protocol-0.1.0/agent/providers/base.py +40 -0
- efficient_agent_protocol-0.1.0/agent/providers/factory.py +66 -0
- efficient_agent_protocol-0.1.0/agent/providers/google_provider.py +89 -0
- efficient_agent_protocol-0.1.0/agent/providers/openai_provider.py +87 -0
- efficient_agent_protocol-0.1.0/eap/__init__.py +2 -0
- efficient_agent_protocol-0.1.0/eap/agent/__init__.py +27 -0
- efficient_agent_protocol-0.1.0/eap/agent/agent_client.py +4 -0
- efficient_agent_protocol-0.1.0/eap/agent/compiler.py +4 -0
- efficient_agent_protocol-0.1.0/eap/environment/__init__.py +25 -0
- efficient_agent_protocol-0.1.0/eap/environment/executor.py +15 -0
- efficient_agent_protocol-0.1.0/eap/environment/plugin_loader.py +13 -0
- efficient_agent_protocol-0.1.0/eap/environment/tool_registry.py +15 -0
- efficient_agent_protocol-0.1.0/eap/environment/tools/__init__.py +37 -0
- efficient_agent_protocol-0.1.0/eap/environment/tools/example_tools.py +9 -0
- efficient_agent_protocol-0.1.0/eap/environment/tools/file_tools.py +17 -0
- efficient_agent_protocol-0.1.0/eap/environment/tools/web_tools.py +17 -0
- efficient_agent_protocol-0.1.0/eap/protocol/__init__.py +61 -0
- efficient_agent_protocol-0.1.0/eap/protocol/logging_config.py +4 -0
- efficient_agent_protocol-0.1.0/eap/protocol/models.py +3 -0
- efficient_agent_protocol-0.1.0/eap/protocol/settings.py +4 -0
- efficient_agent_protocol-0.1.0/eap/protocol/state_manager.py +4 -0
- efficient_agent_protocol-0.1.0/efficient_agent_protocol.egg-info/PKG-INFO +155 -0
- efficient_agent_protocol-0.1.0/efficient_agent_protocol.egg-info/SOURCES.txt +54 -0
- efficient_agent_protocol-0.1.0/efficient_agent_protocol.egg-info/dependency_links.txt +1 -0
- efficient_agent_protocol-0.1.0/efficient_agent_protocol.egg-info/requires.txt +13 -0
- efficient_agent_protocol-0.1.0/efficient_agent_protocol.egg-info/top_level.txt +4 -0
- efficient_agent_protocol-0.1.0/environment/__init__.py +28 -0
- efficient_agent_protocol-0.1.0/environment/distributed_executor.py +453 -0
- efficient_agent_protocol-0.1.0/environment/executor.py +587 -0
- efficient_agent_protocol-0.1.0/environment/plugin_loader.py +106 -0
- efficient_agent_protocol-0.1.0/environment/tool_registry.py +297 -0
- efficient_agent_protocol-0.1.0/environment/tools/__init__.py +26 -0
- efficient_agent_protocol-0.1.0/environment/tools/example_tools.py +42 -0
- efficient_agent_protocol-0.1.0/environment/tools/file_tools.py +242 -0
- efficient_agent_protocol-0.1.0/environment/tools/web_tools.py +277 -0
- efficient_agent_protocol-0.1.0/protocol/__init__.py +61 -0
- efficient_agent_protocol-0.1.0/protocol/logging_config.py +81 -0
- efficient_agent_protocol-0.1.0/protocol/migrations.py +138 -0
- efficient_agent_protocol-0.1.0/protocol/models.py +543 -0
- efficient_agent_protocol-0.1.0/protocol/settings.py +200 -0
- efficient_agent_protocol-0.1.0/protocol/state_manager.py +669 -0
- efficient_agent_protocol-0.1.0/protocol/storage/__init__.py +11 -0
- efficient_agent_protocol-0.1.0/protocol/storage/base.py +98 -0
- efficient_agent_protocol-0.1.0/protocol/storage/postgres_store.py +267 -0
- efficient_agent_protocol-0.1.0/protocol/storage/redis_store.py +120 -0
- efficient_agent_protocol-0.1.0/protocol/storage/sqlite_store.py +135 -0
- efficient_agent_protocol-0.1.0/pyproject.toml +47 -0
- efficient_agent_protocol-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 GenieWeenie
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: efficient-agent-protocol
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: High-performance multi-agent framework for stateful batched execution.
|
|
5
|
+
Requires-Python: >=3.9
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Requires-Dist: pydantic==2.6.1
|
|
9
|
+
Requires-Dist: requests==2.32.4
|
|
10
|
+
Requires-Dist: beautifulsoup4==4.12.3
|
|
11
|
+
Provides-Extra: dev
|
|
12
|
+
Requires-Dist: pytest>=8.0.0; extra == "dev"
|
|
13
|
+
Requires-Dist: pytest-cov>=5.0.0; extra == "dev"
|
|
14
|
+
Requires-Dist: ruff>=0.6.0; extra == "dev"
|
|
15
|
+
Requires-Dist: pre-commit>=3.7.0; extra == "dev"
|
|
16
|
+
Provides-Extra: storage
|
|
17
|
+
Requires-Dist: redis>=5.0.0; extra == "storage"
|
|
18
|
+
Requires-Dist: psycopg[binary]>=3.1.0; extra == "storage"
|
|
19
|
+
Dynamic: license-file
|
|
20
|
+
|
|
21
|
+
# Efficient Agent Protocol (EAP)
|
|
22
|
+
|
|
23
|
+
[](https://github.com/GenieWeenie/efficient-agent-protocol/actions/workflows/ci.yml)
|
|
24
|
+

|
|
25
|
+
|
|
26
|
+
> Status: Experimental (pre-1.0). APIs and schema may change.
|
|
27
|
+
> See `STABILITY.md` and `ROADMAP.md` for guarantees and planned milestones.
|
|
28
|
+
> Latest stable release: `v0.1.4`
|
|
29
|
+
|
|
30
|
+
Efficient Agent Protocol is a local-first framework for multi-step tool workflows.
|
|
31
|
+
It stores large outputs as pointer-backed state (`ptr_*`) and runs dependency-aware DAG steps in parallel.
|
|
32
|
+
|
|
33
|
+
## Who This Is For
|
|
34
|
+
|
|
35
|
+
- Python developers building local-first agent/tool orchestration
|
|
36
|
+
- Teams that need pointer-backed state and execution trace visibility
|
|
37
|
+
|
|
38
|
+
Not ideal yet for:
|
|
39
|
+
- strict long-term API compatibility requirements before `v1.0`
|
|
40
|
+
- non-technical users expecting zero-configuration onboarding
|
|
41
|
+
|
|
42
|
+
## What You Get
|
|
43
|
+
|
|
44
|
+
- Pointer-based state to keep prompts small
|
|
45
|
+
- Parallel DAG execution with retries and validation
|
|
46
|
+
- Built-in chat UI (Streamlit) with trace + data inspection
|
|
47
|
+
- Conversation memory (full/window/summary)
|
|
48
|
+
- Pluggable pointer storage backends (SQLite, Redis, PostgreSQL)
|
|
49
|
+
|
|
50
|
+
## Quickstart (GitHub-first)
|
|
51
|
+
|
|
52
|
+
Requirements:
|
|
53
|
+
- Python 3.9+
|
|
54
|
+
|
|
55
|
+
1. Install
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
git clone https://github.com/GenieWeenie/efficient-agent-protocol.git
|
|
59
|
+
cd efficient-agent-protocol
|
|
60
|
+
pip install -e .
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
2. Configure
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
cp .env.example .env
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
Minimum variables:
|
|
70
|
+
|
|
71
|
+
```bash
|
|
72
|
+
EAP_BASE_URL=http://localhost:1234
|
|
73
|
+
EAP_MODEL=nemotron-orchestrator-8b
|
|
74
|
+
EAP_API_KEY=not-needed
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
3. Smoke test
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
python -m examples.01_minimal
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
4. Run dashboard
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
pip install streamlit pandas
|
|
87
|
+
streamlit run app.py
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
5. Use it
|
|
91
|
+
|
|
92
|
+
- Open `http://localhost:8501`
|
|
93
|
+
- In **Agent Chat**, ask for a task
|
|
94
|
+
- Check **Data Inspector** for pointer payloads
|
|
95
|
+
- Check **Execution Trace** for step timing/retries/errors
|
|
96
|
+
|
|
97
|
+
## Programmatic Example
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
from eap.protocol import StateManager
|
|
101
|
+
from eap.environment import AsyncLocalExecutor, ToolRegistry
|
|
102
|
+
from eap.environment.tools import read_local_file, READ_FILE_SCHEMA
|
|
103
|
+
from eap.agent import AgentClient
|
|
104
|
+
|
|
105
|
+
state_manager = StateManager()
|
|
106
|
+
registry = ToolRegistry()
|
|
107
|
+
registry.register("read_local_file", read_local_file, READ_FILE_SCHEMA)
|
|
108
|
+
executor = AsyncLocalExecutor(state_manager, registry)
|
|
109
|
+
|
|
110
|
+
architect = AgentClient(
|
|
111
|
+
base_url="http://localhost:1234",
|
|
112
|
+
model_name="nemotron-orchestrator-8b",
|
|
113
|
+
provider_name="local",
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
manifest = registry.get_agent_manifest()
|
|
117
|
+
macro = architect.generate_macro("Read README.md and summarize setup steps", manifest)
|
|
118
|
+
# asyncio.run(executor.execute_macro(macro))
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
## Common Commands
|
|
122
|
+
|
|
123
|
+
```bash
|
|
124
|
+
python3 -m pytest -q
|
|
125
|
+
pre-commit run --all-files
|
|
126
|
+
python3 scripts/migrate_state_db.py --db-path agent_state.db --dry-run
|
|
127
|
+
python3 scripts/export_metrics.py --db-path agent_state.db --output metrics/latest.json
|
|
128
|
+
python3 -m build
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Docs
|
|
132
|
+
|
|
133
|
+
- `STABILITY.md`
|
|
134
|
+
- `ROADMAP.md`
|
|
135
|
+
- `docs/v1_contract.md`
|
|
136
|
+
- `docs/release_notes_template.md`
|
|
137
|
+
- `docs/benchmarks.md`
|
|
138
|
+
- `docs/release.md`
|
|
139
|
+
- `docs/v1_stabilization_checklist.md`
|
|
140
|
+
- `docs/migrations.md`
|
|
141
|
+
- `docs/observability.md`
|
|
142
|
+
- `docs/maintainer_runbook.md`
|
|
143
|
+
- `SECURITY.md`
|
|
144
|
+
- `CONTRIBUTING.md`
|
|
145
|
+
- GitHub roadmap board: https://github.com/users/GenieWeenie/projects/1
|
|
146
|
+
- `docs/configuration.md`
|
|
147
|
+
- `docs/architecture.md`
|
|
148
|
+
- `docs/tools.md`
|
|
149
|
+
- `docs/workflow_schema.md`
|
|
150
|
+
- `docs/storage_lifecycle.md`
|
|
151
|
+
- `docs/storage_backends.md`
|
|
152
|
+
- `docs/sdk_contract.md`
|
|
153
|
+
- `docs/distributed_execution.md`
|
|
154
|
+
- `docs/troubleshooting.md`
|
|
155
|
+
- `docs/eap_proof_sheet.md`
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
# Efficient Agent Protocol (EAP)
|
|
2
|
+
|
|
3
|
+
[](https://github.com/GenieWeenie/efficient-agent-protocol/actions/workflows/ci.yml)
|
|
4
|
+

|
|
5
|
+
|
|
6
|
+
> Status: Experimental (pre-1.0). APIs and schema may change.
|
|
7
|
+
> See `STABILITY.md` and `ROADMAP.md` for guarantees and planned milestones.
|
|
8
|
+
> Latest stable release: `v0.1.4`
|
|
9
|
+
|
|
10
|
+
Efficient Agent Protocol is a local-first framework for multi-step tool workflows.
|
|
11
|
+
It stores large outputs as pointer-backed state (`ptr_*`) and runs dependency-aware DAG steps in parallel.
|
|
12
|
+
|
|
13
|
+
## Who This Is For
|
|
14
|
+
|
|
15
|
+
- Python developers building local-first agent/tool orchestration
|
|
16
|
+
- Teams that need pointer-backed state and execution trace visibility
|
|
17
|
+
|
|
18
|
+
Not ideal yet for:
|
|
19
|
+
- strict long-term API compatibility requirements before `v1.0`
|
|
20
|
+
- non-technical users expecting zero-configuration onboarding
|
|
21
|
+
|
|
22
|
+
## What You Get
|
|
23
|
+
|
|
24
|
+
- Pointer-based state to keep prompts small
|
|
25
|
+
- Parallel DAG execution with retries and validation
|
|
26
|
+
- Built-in chat UI (Streamlit) with trace + data inspection
|
|
27
|
+
- Conversation memory (full/window/summary)
|
|
28
|
+
- Pluggable pointer storage backends (SQLite, Redis, PostgreSQL)
|
|
29
|
+
|
|
30
|
+
## Quickstart (GitHub-first)
|
|
31
|
+
|
|
32
|
+
Requirements:
|
|
33
|
+
- Python 3.9+
|
|
34
|
+
|
|
35
|
+
1. Install
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
git clone https://github.com/GenieWeenie/efficient-agent-protocol.git
|
|
39
|
+
cd efficient-agent-protocol
|
|
40
|
+
pip install -e .
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
2. Configure
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
cp .env.example .env
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Minimum variables:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
EAP_BASE_URL=http://localhost:1234
|
|
53
|
+
EAP_MODEL=nemotron-orchestrator-8b
|
|
54
|
+
EAP_API_KEY=not-needed
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
3. Smoke test
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
python -m examples.01_minimal
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
4. Run dashboard
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
pip install streamlit pandas
|
|
67
|
+
streamlit run app.py
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
5. Use it
|
|
71
|
+
|
|
72
|
+
- Open `http://localhost:8501`
|
|
73
|
+
- In **Agent Chat**, ask for a task
|
|
74
|
+
- Check **Data Inspector** for pointer payloads
|
|
75
|
+
- Check **Execution Trace** for step timing/retries/errors
|
|
76
|
+
|
|
77
|
+
## Programmatic Example
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
from eap.protocol import StateManager
|
|
81
|
+
from eap.environment import AsyncLocalExecutor, ToolRegistry
|
|
82
|
+
from eap.environment.tools import read_local_file, READ_FILE_SCHEMA
|
|
83
|
+
from eap.agent import AgentClient
|
|
84
|
+
|
|
85
|
+
state_manager = StateManager()
|
|
86
|
+
registry = ToolRegistry()
|
|
87
|
+
registry.register("read_local_file", read_local_file, READ_FILE_SCHEMA)
|
|
88
|
+
executor = AsyncLocalExecutor(state_manager, registry)
|
|
89
|
+
|
|
90
|
+
architect = AgentClient(
|
|
91
|
+
base_url="http://localhost:1234",
|
|
92
|
+
model_name="nemotron-orchestrator-8b",
|
|
93
|
+
provider_name="local",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
manifest = registry.get_agent_manifest()
|
|
97
|
+
macro = architect.generate_macro("Read README.md and summarize setup steps", manifest)
|
|
98
|
+
# asyncio.run(executor.execute_macro(macro))
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
## Common Commands
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
python3 -m pytest -q
|
|
105
|
+
pre-commit run --all-files
|
|
106
|
+
python3 scripts/migrate_state_db.py --db-path agent_state.db --dry-run
|
|
107
|
+
python3 scripts/export_metrics.py --db-path agent_state.db --output metrics/latest.json
|
|
108
|
+
python3 -m build
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Docs
|
|
112
|
+
|
|
113
|
+
- `STABILITY.md`
|
|
114
|
+
- `ROADMAP.md`
|
|
115
|
+
- `docs/v1_contract.md`
|
|
116
|
+
- `docs/release_notes_template.md`
|
|
117
|
+
- `docs/benchmarks.md`
|
|
118
|
+
- `docs/release.md`
|
|
119
|
+
- `docs/v1_stabilization_checklist.md`
|
|
120
|
+
- `docs/migrations.md`
|
|
121
|
+
- `docs/observability.md`
|
|
122
|
+
- `docs/maintainer_runbook.md`
|
|
123
|
+
- `SECURITY.md`
|
|
124
|
+
- `CONTRIBUTING.md`
|
|
125
|
+
- GitHub roadmap board: https://github.com/users/GenieWeenie/projects/1
|
|
126
|
+
- `docs/configuration.md`
|
|
127
|
+
- `docs/architecture.md`
|
|
128
|
+
- `docs/tools.md`
|
|
129
|
+
- `docs/workflow_schema.md`
|
|
130
|
+
- `docs/storage_lifecycle.md`
|
|
131
|
+
- `docs/storage_backends.md`
|
|
132
|
+
- `docs/sdk_contract.md`
|
|
133
|
+
- `docs/distributed_execution.md`
|
|
134
|
+
- `docs/troubleshooting.md`
|
|
135
|
+
- `docs/eap_proof_sheet.md`
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# agent/__init__.py
|
|
2
|
+
from .compiler import MacroCompiler, WorkflowGraphCompiler
|
|
3
|
+
from .agent_client import AgentClient
|
|
4
|
+
from .providers import CompletionRequest, CompletionResponse, LLMProvider, ProviderMessage
|
|
5
|
+
from .providers import AnthropicProvider, GoogleProvider, OpenAIProvider
|
|
6
|
+
from .providers import create_provider
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"MacroCompiler",
|
|
10
|
+
"WorkflowGraphCompiler",
|
|
11
|
+
"AgentClient",
|
|
12
|
+
"ProviderMessage",
|
|
13
|
+
"CompletionRequest",
|
|
14
|
+
"CompletionResponse",
|
|
15
|
+
"LLMProvider",
|
|
16
|
+
"OpenAIProvider",
|
|
17
|
+
"AnthropicProvider",
|
|
18
|
+
"GoogleProvider",
|
|
19
|
+
"create_provider",
|
|
20
|
+
]
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
# agent/agent_client.py
|
|
2
|
+
import json
|
|
3
|
+
from typing import Callable, Dict, Any, Optional
|
|
4
|
+
from .compiler import MacroCompiler
|
|
5
|
+
from .providers import CompletionRequest, LLMProvider, ProviderMessage, create_provider
|
|
6
|
+
from protocol.models import BatchedMacroRequest
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AgentClient:
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
base_url: str,
|
|
13
|
+
model_name: str,
|
|
14
|
+
api_key: str = "not-needed",
|
|
15
|
+
system_prompt: str = "",
|
|
16
|
+
temperature: float = 0.0,
|
|
17
|
+
timeout_seconds: int = 60,
|
|
18
|
+
provider_name: str = "local",
|
|
19
|
+
fallback_provider_name: Optional[str] = None,
|
|
20
|
+
provider: Optional[LLMProvider] = None,
|
|
21
|
+
):
|
|
22
|
+
self.endpoint = f"{base_url.rstrip('/')}/v1/chat/completions"
|
|
23
|
+
self.base_url = base_url.rstrip("/")
|
|
24
|
+
self.model_name = model_name
|
|
25
|
+
self.api_key = api_key
|
|
26
|
+
self.system_prompt = system_prompt
|
|
27
|
+
self.temperature = temperature
|
|
28
|
+
self.timeout_seconds = timeout_seconds
|
|
29
|
+
self.provider_name = provider_name
|
|
30
|
+
self.fallback_provider_name = fallback_provider_name
|
|
31
|
+
self.provider = provider or create_provider(
|
|
32
|
+
provider_name=provider_name,
|
|
33
|
+
base_url=self.base_url,
|
|
34
|
+
api_key=api_key,
|
|
35
|
+
timeout_seconds=timeout_seconds,
|
|
36
|
+
fallback_provider_name=fallback_provider_name,
|
|
37
|
+
)
|
|
38
|
+
self.compiler = MacroCompiler()
|
|
39
|
+
|
|
40
|
+
def _headers(self) -> Dict[str, str]:
|
|
41
|
+
if hasattr(self.provider, "_headers"):
|
|
42
|
+
return self.provider._headers() # type: ignore[attr-defined]
|
|
43
|
+
if self.api_key and self.api_key != "not-needed":
|
|
44
|
+
return {"Authorization": f"Bearer {self.api_key}"}
|
|
45
|
+
return {}
|
|
46
|
+
|
|
47
|
+
def chat(self, user_input: str) -> str:
|
|
48
|
+
"""Simple text-to-text chat for non-macro tasks like auditing."""
|
|
49
|
+
request = CompletionRequest(
|
|
50
|
+
model=self.model_name,
|
|
51
|
+
messages=[
|
|
52
|
+
ProviderMessage(role="system", content=self.system_prompt),
|
|
53
|
+
ProviderMessage(role="user", content=user_input),
|
|
54
|
+
],
|
|
55
|
+
temperature=self.temperature,
|
|
56
|
+
)
|
|
57
|
+
response = self.provider.complete(request)
|
|
58
|
+
return response.text
|
|
59
|
+
|
|
60
|
+
def stream_chat(
|
|
61
|
+
self,
|
|
62
|
+
user_input: str,
|
|
63
|
+
on_token: Optional[Callable[[str], None]] = None,
|
|
64
|
+
fallback_to_non_stream: bool = True,
|
|
65
|
+
) -> str:
|
|
66
|
+
"""Stream chat tokens and return the final assembled content.
|
|
67
|
+
|
|
68
|
+
If streaming fails and `fallback_to_non_stream` is true, falls back to a
|
|
69
|
+
standard completion request and emits the remaining content.
|
|
70
|
+
"""
|
|
71
|
+
request = CompletionRequest(
|
|
72
|
+
model=self.model_name,
|
|
73
|
+
messages=[
|
|
74
|
+
ProviderMessage(role="system", content=self.system_prompt),
|
|
75
|
+
ProviderMessage(role="user", content=user_input),
|
|
76
|
+
],
|
|
77
|
+
temperature=self.temperature,
|
|
78
|
+
)
|
|
79
|
+
chunks = []
|
|
80
|
+
try:
|
|
81
|
+
for token in self.provider.stream(request):
|
|
82
|
+
if not token:
|
|
83
|
+
continue
|
|
84
|
+
chunks.append(token)
|
|
85
|
+
if on_token:
|
|
86
|
+
on_token(token)
|
|
87
|
+
except Exception:
|
|
88
|
+
if not fallback_to_non_stream:
|
|
89
|
+
raise
|
|
90
|
+
fallback_text = self.provider.complete(request).text
|
|
91
|
+
streamed_text = "".join(chunks)
|
|
92
|
+
if streamed_text and fallback_text.startswith(streamed_text):
|
|
93
|
+
remaining = fallback_text[len(streamed_text) :]
|
|
94
|
+
else:
|
|
95
|
+
remaining = fallback_text if not streamed_text else fallback_text
|
|
96
|
+
if remaining:
|
|
97
|
+
chunks.append(remaining)
|
|
98
|
+
if on_token:
|
|
99
|
+
on_token(remaining)
|
|
100
|
+
return "".join(chunks)
|
|
101
|
+
|
|
102
|
+
def generate_macro(
|
|
103
|
+
self,
|
|
104
|
+
user_input: str,
|
|
105
|
+
hashed_manifest: Dict[str, Any],
|
|
106
|
+
error_feedback: str = None,
|
|
107
|
+
memory_context: str = "",
|
|
108
|
+
) -> BatchedMacroRequest:
|
|
109
|
+
manifest_str = json.dumps(hashed_manifest, indent=2)
|
|
110
|
+
protocol_instructions = (
|
|
111
|
+
"### ENVIRONMENT MANIFEST ###\n"
|
|
112
|
+
f"{manifest_str}\n\n"
|
|
113
|
+
"### SYSTEM RULES ###\n"
|
|
114
|
+
"1. Output ONLY a JSON object. No conversational text.\n"
|
|
115
|
+
"2. Use the exact key names: 'steps', 'step_id', 'tool_name', 'arguments'.\n"
|
|
116
|
+
"3. Use the hashed IDs for 'tool_name'.\n"
|
|
117
|
+
)
|
|
118
|
+
memory_block = f"\n\n### MEMORY CONTEXT ###\n{memory_context}" if memory_context else ""
|
|
119
|
+
error_block = f"\n\n### FIX PREVIOUS ERROR ###\n{error_feedback}" if error_feedback else ""
|
|
120
|
+
|
|
121
|
+
request = CompletionRequest(
|
|
122
|
+
model=self.model_name,
|
|
123
|
+
messages=[
|
|
124
|
+
ProviderMessage(role="system", content=f"{self.system_prompt}\n\n{protocol_instructions}"),
|
|
125
|
+
ProviderMessage(role="user", content=f"{user_input}{memory_block}{error_block}"),
|
|
126
|
+
],
|
|
127
|
+
temperature=self.temperature,
|
|
128
|
+
)
|
|
129
|
+
response = self.provider.complete_with_tools(request)
|
|
130
|
+
raw_text = response.text
|
|
131
|
+
return self.compiler.compile(raw_text)
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
# agent/compiler.py
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Union
|
|
4
|
+
from protocol.models import (
|
|
5
|
+
BatchedMacroRequest,
|
|
6
|
+
ExecutionLimits,
|
|
7
|
+
PersistedWorkflowGraph,
|
|
8
|
+
RetryPolicy,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _extract_first_json_object(raw_payload: str, error_message: str) -> Dict[str, Any]:
|
|
13
|
+
"""Extract the first top-level JSON object embedded in arbitrary text."""
|
|
14
|
+
decoder = json.JSONDecoder()
|
|
15
|
+
start_index = raw_payload.find("{")
|
|
16
|
+
while start_index != -1:
|
|
17
|
+
try:
|
|
18
|
+
parsed, _ = decoder.raw_decode(raw_payload, start_index)
|
|
19
|
+
except json.JSONDecodeError:
|
|
20
|
+
start_index = raw_payload.find("{", start_index + 1)
|
|
21
|
+
continue
|
|
22
|
+
if isinstance(parsed, dict):
|
|
23
|
+
return parsed
|
|
24
|
+
start_index = raw_payload.find("{", start_index + 1)
|
|
25
|
+
raise ValueError(error_message)
|
|
26
|
+
|
|
27
|
+
class MacroCompiler:
|
|
28
|
+
"""
|
|
29
|
+
An advanced compiler that sanitizes and 'auto-heals' common LLM
|
|
30
|
+
JSON hallucinations before validation.
|
|
31
|
+
"""
|
|
32
|
+
@staticmethod
|
|
33
|
+
def compile(raw_llm_output: Union[str, Dict[str, Any]]) -> BatchedMacroRequest:
|
|
34
|
+
try:
|
|
35
|
+
# 1. Extract JSON from potential markdown or conversational noise
|
|
36
|
+
if isinstance(raw_llm_output, str):
|
|
37
|
+
parsed_data = _extract_first_json_object(
|
|
38
|
+
raw_llm_output,
|
|
39
|
+
error_message="No JSON object found in output.",
|
|
40
|
+
)
|
|
41
|
+
else:
|
|
42
|
+
parsed_data = raw_llm_output
|
|
43
|
+
|
|
44
|
+
# 2. AUTO-HEALING: Fix common hallucinations for smaller models
|
|
45
|
+
if "steps" in parsed_data:
|
|
46
|
+
for step in parsed_data["steps"]:
|
|
47
|
+
# Fix 'tool' -> 'tool_name'
|
|
48
|
+
if "tool" in step and "tool_name" not in step:
|
|
49
|
+
step["tool_name"] = step.pop("tool")
|
|
50
|
+
|
|
51
|
+
# Ensure step_id exists
|
|
52
|
+
if "step_id" not in step:
|
|
53
|
+
step["step_id"] = f"auto_step_{hash(str(step)) % 1000}"
|
|
54
|
+
|
|
55
|
+
# 3. Final Pydantic validation
|
|
56
|
+
return BatchedMacroRequest(**parsed_data)
|
|
57
|
+
|
|
58
|
+
except Exception as e:
|
|
59
|
+
raise ValueError(f"Compiler Error: {str(e)}")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class WorkflowGraphCompiler:
|
|
63
|
+
"""Compile persisted workflow graph payloads into executable macro requests."""
|
|
64
|
+
|
|
65
|
+
@staticmethod
|
|
66
|
+
def _extract_json_object(raw_payload: str) -> Dict[str, Any]:
|
|
67
|
+
return _extract_first_json_object(
|
|
68
|
+
raw_payload,
|
|
69
|
+
error_message="No JSON object found in workflow graph payload.",
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def _coerce_graph_payload(
|
|
74
|
+
raw_graph: Union[str, Dict[str, Any], PersistedWorkflowGraph],
|
|
75
|
+
) -> PersistedWorkflowGraph:
|
|
76
|
+
if isinstance(raw_graph, PersistedWorkflowGraph):
|
|
77
|
+
return raw_graph
|
|
78
|
+
if isinstance(raw_graph, str):
|
|
79
|
+
parsed = WorkflowGraphCompiler._extract_json_object(raw_graph)
|
|
80
|
+
return PersistedWorkflowGraph(**parsed)
|
|
81
|
+
if isinstance(raw_graph, dict):
|
|
82
|
+
return PersistedWorkflowGraph(**raw_graph)
|
|
83
|
+
raise ValueError("Unsupported workflow graph payload type.")
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
def compile_graph(
|
|
87
|
+
raw_graph: Union[str, Dict[str, Any], PersistedWorkflowGraph],
|
|
88
|
+
) -> PersistedWorkflowGraph:
|
|
89
|
+
try:
|
|
90
|
+
return WorkflowGraphCompiler._coerce_graph_payload(raw_graph)
|
|
91
|
+
except Exception as exc:
|
|
92
|
+
raise ValueError(f"Workflow graph validation failed: {exc}") from exc
|
|
93
|
+
|
|
94
|
+
@staticmethod
|
|
95
|
+
def compile_to_macro(
|
|
96
|
+
raw_graph: Union[str, Dict[str, Any], PersistedWorkflowGraph],
|
|
97
|
+
return_final_state_only: bool = True,
|
|
98
|
+
retry_policy: Optional[Union[RetryPolicy, Dict[str, Any]]] = None,
|
|
99
|
+
execution_limits: Optional[Union[ExecutionLimits, Dict[str, Any]]] = None,
|
|
100
|
+
) -> BatchedMacroRequest:
|
|
101
|
+
graph = WorkflowGraphCompiler.compile_graph(raw_graph)
|
|
102
|
+
try:
|
|
103
|
+
retry_policy_model: Optional[RetryPolicy] = None
|
|
104
|
+
if retry_policy is not None:
|
|
105
|
+
retry_policy_model = (
|
|
106
|
+
retry_policy if isinstance(retry_policy, RetryPolicy) else RetryPolicy(**retry_policy)
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
execution_limits_model: Optional[ExecutionLimits] = None
|
|
110
|
+
if execution_limits is not None:
|
|
111
|
+
execution_limits_model = (
|
|
112
|
+
execution_limits
|
|
113
|
+
if isinstance(execution_limits, ExecutionLimits)
|
|
114
|
+
else ExecutionLimits(**execution_limits)
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
return graph.to_batched_macro_request(
|
|
118
|
+
return_final_state_only=return_final_state_only,
|
|
119
|
+
retry_policy=retry_policy_model,
|
|
120
|
+
execution_limits=execution_limits_model,
|
|
121
|
+
)
|
|
122
|
+
except Exception as exc:
|
|
123
|
+
raise ValueError(f"Workflow graph compile failed: {exc}") from exc
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from .base import CompletionRequest, CompletionResponse, LLMProvider, ProviderMessage
|
|
2
|
+
from .anthropic_provider import AnthropicProvider
|
|
3
|
+
from .factory import create_provider
|
|
4
|
+
from .google_provider import GoogleProvider
|
|
5
|
+
from .openai_provider import OpenAIProvider
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"ProviderMessage",
|
|
9
|
+
"CompletionRequest",
|
|
10
|
+
"CompletionResponse",
|
|
11
|
+
"LLMProvider",
|
|
12
|
+
"OpenAIProvider",
|
|
13
|
+
"AnthropicProvider",
|
|
14
|
+
"GoogleProvider",
|
|
15
|
+
"create_provider",
|
|
16
|
+
]
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from typing import Dict, List
|
|
2
|
+
|
|
3
|
+
import requests
|
|
4
|
+
|
|
5
|
+
from .base import CompletionRequest, CompletionResponse, LLMProvider
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AnthropicProvider(LLMProvider):
|
|
9
|
+
"""Adapter for Anthropic `/v1/messages` APIs."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, endpoint: str, api_key: str, timeout_seconds: int):
|
|
12
|
+
self.endpoint = endpoint
|
|
13
|
+
self.api_key = api_key
|
|
14
|
+
self.timeout_seconds = timeout_seconds
|
|
15
|
+
|
|
16
|
+
def _headers(self) -> Dict[str, str]:
|
|
17
|
+
return {
|
|
18
|
+
"x-api-key": self.api_key,
|
|
19
|
+
"anthropic-version": "2023-06-01",
|
|
20
|
+
"content-type": "application/json",
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
def _to_payload(self, request: CompletionRequest) -> Dict[str, object]:
|
|
24
|
+
system_chunks: List[str] = []
|
|
25
|
+
messages = []
|
|
26
|
+
for msg in request.messages:
|
|
27
|
+
if msg.role == "system":
|
|
28
|
+
system_chunks.append(msg.content)
|
|
29
|
+
else:
|
|
30
|
+
role = "assistant" if msg.role == "assistant" else "user"
|
|
31
|
+
messages.append({"role": role, "content": msg.content})
|
|
32
|
+
|
|
33
|
+
payload: Dict[str, object] = {
|
|
34
|
+
"model": request.model,
|
|
35
|
+
"messages": messages,
|
|
36
|
+
"temperature": request.temperature,
|
|
37
|
+
"max_tokens": int(request.metadata.get("max_tokens", 1024)),
|
|
38
|
+
}
|
|
39
|
+
if system_chunks:
|
|
40
|
+
payload["system"] = "\n\n".join(system_chunks)
|
|
41
|
+
if request.tools:
|
|
42
|
+
payload["tools"] = request.tools
|
|
43
|
+
return payload
|
|
44
|
+
|
|
45
|
+
def _extract_text(self, raw_json: Dict[str, object]) -> str:
|
|
46
|
+
content = raw_json.get("content", [])
|
|
47
|
+
if isinstance(content, list):
|
|
48
|
+
text_parts = []
|
|
49
|
+
for item in content:
|
|
50
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
51
|
+
text_parts.append(str(item.get("text", "")))
|
|
52
|
+
return "".join(text_parts)
|
|
53
|
+
return ""
|
|
54
|
+
|
|
55
|
+
def _request(self, request: CompletionRequest) -> CompletionResponse:
|
|
56
|
+
response = requests.post(
|
|
57
|
+
self.endpoint,
|
|
58
|
+
json=self._to_payload(request),
|
|
59
|
+
headers=self._headers(),
|
|
60
|
+
timeout=self.timeout_seconds,
|
|
61
|
+
)
|
|
62
|
+
response.raise_for_status()
|
|
63
|
+
raw_json = response.json()
|
|
64
|
+
return CompletionResponse(text=self._extract_text(raw_json), raw_response=raw_json)
|
|
65
|
+
|
|
66
|
+
def complete(self, request: CompletionRequest) -> CompletionResponse:
|
|
67
|
+
return self._request(request)
|
|
68
|
+
|
|
69
|
+
def complete_with_tools(self, request: CompletionRequest) -> CompletionResponse:
|
|
70
|
+
return self._request(request)
|
|
71
|
+
|
|
72
|
+
def stream(self, request: CompletionRequest):
|
|
73
|
+
raise NotImplementedError("Streaming not implemented for AnthropicProvider yet.")
|