agenthelm-sdk 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agenthelm_sdk-0.1.0/LICENSE +21 -0
- agenthelm_sdk-0.1.0/PKG-INFO +105 -0
- agenthelm_sdk-0.1.0/README.md +93 -0
- agenthelm_sdk-0.1.0/agenthelm/__init__.py +9 -0
- agenthelm_sdk-0.1.0/agenthelm/client.py +631 -0
- agenthelm_sdk-0.1.0/agenthelm/queue.py +32 -0
- agenthelm_sdk-0.1.0/agenthelm_sdk.egg-info/PKG-INFO +105 -0
- agenthelm_sdk-0.1.0/agenthelm_sdk.egg-info/SOURCES.txt +12 -0
- agenthelm_sdk-0.1.0/agenthelm_sdk.egg-info/dependency_links.txt +1 -0
- agenthelm_sdk-0.1.0/agenthelm_sdk.egg-info/requires.txt +1 -0
- agenthelm_sdk-0.1.0/agenthelm_sdk.egg-info/top_level.txt +1 -0
- agenthelm_sdk-0.1.0/pyproject.toml +15 -0
- agenthelm_sdk-0.1.0/setup.cfg +4 -0
- agenthelm_sdk-0.1.0/tests/test_client.py +58 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 AgentHelm
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agenthelm-sdk
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Python SDK for AgentHelm — AI Agent Control Plane
|
|
5
|
+
Project-URL: Homepage, https://agenthelm.vercel.app
|
|
6
|
+
Project-URL: Repository, https://github.com/jayasukuv11-beep/agenthelm
|
|
7
|
+
Requires-Python: >=3.8
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: requests>=2.28.0
|
|
11
|
+
Dynamic: license-file
|
|
12
|
+
|
|
13
|
+
# agenthelm-sdk
|
|
14
|
+
|
|
15
|
+
Monitor your AI agents from AgentHelm.
|
|
16
|
+
Add one line. See everything.
|
|
17
|
+
|
|
18
|
+
## Install
|
|
19
|
+
|
|
20
|
+
pip install agenthelm-sdk
|
|
21
|
+
|
|
22
|
+
## Quick Start
|
|
23
|
+
|
|
24
|
+
import agenthelm
|
|
25
|
+
|
|
26
|
+
# Connect your agent (one line):
|
|
27
|
+
dock = agenthelm.connect(
|
|
28
|
+
"ahe_live_xxxxx",
|
|
29
|
+
name="My Agent"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Send logs:
|
|
33
|
+
dock.log("Agent started")
|
|
34
|
+
dock.log("Warning: rate limit", level="warning")
|
|
35
|
+
dock.log("Task complete", level="success")
|
|
36
|
+
|
|
37
|
+
# Track token usage:
|
|
38
|
+
dock.track_tokens(used=1500, model="gemini-flash")
|
|
39
|
+
dock.track_tokens(
|
|
40
|
+
used=2000,
|
|
41
|
+
model="gpt-4",
|
|
42
|
+
cost_per_1k=0.03
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# Send structured output:
|
|
46
|
+
dock.output({
|
|
47
|
+
"leads_found": 12,
|
|
48
|
+
"hot_leads": 5,
|
|
49
|
+
"report_sent": True
|
|
50
|
+
})
|
|
51
|
+
|
|
52
|
+
# Report errors:
|
|
53
|
+
try:
|
|
54
|
+
risky_operation()
|
|
55
|
+
except Exception as e:
|
|
56
|
+
dock.error("Operation failed", exception=e)
|
|
57
|
+
|
|
58
|
+
# Handle dashboard commands:
|
|
59
|
+
@dock.on_command("stop")
|
|
60
|
+
def handle_stop(payload):
|
|
61
|
+
dock.log("Stopping agent...")
|
|
62
|
+
dock.stop()
|
|
63
|
+
|
|
64
|
+
@dock.on_command("run_now")
|
|
65
|
+
def handle_run(payload):
|
|
66
|
+
# run_main_task()
|
|
67
|
+
dock.reply("Task started!")
|
|
68
|
+
|
|
69
|
+
# Chat with your agent:
|
|
70
|
+
@dock.on_chat
|
|
71
|
+
def handle_chat(message: str):
|
|
72
|
+
if "status" in message.lower():
|
|
73
|
+
dock.reply("Running fine!")
|
|
74
|
+
else:
|
|
75
|
+
dock.reply(f"Received: {message}")
|
|
76
|
+
|
|
77
|
+
# Keep agent alive:
|
|
78
|
+
dock.listen()
|
|
79
|
+
|
|
80
|
+
## Context Manager
|
|
81
|
+
|
|
82
|
+
with agenthelm.connect("ahe_live_xxxxx") as dock:
|
|
83
|
+
dock.log("Working...")
|
|
84
|
+
# do_work()
|
|
85
|
+
dock.output({"done": True})
|
|
86
|
+
# Agent auto-stops when block exits.
|
|
87
|
+
|
|
88
|
+
## Configuration
|
|
89
|
+
|
|
90
|
+
dock = agenthelm.connect(
|
|
91
|
+
key="ahe_live_xxxxx",
|
|
92
|
+
name="My Agent",
|
|
93
|
+
agent_type="python",
|
|
94
|
+
version="2.1.0",
|
|
95
|
+
verbose=True,
|
|
96
|
+
ping_interval=30,
|
|
97
|
+
command_poll_interval=5,
|
|
98
|
+
timeout=5
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
## Links
|
|
102
|
+
|
|
103
|
+
Dashboard: https://agenthelm.dev
|
|
104
|
+
Docs: https://agenthelm.dev/docs
|
|
105
|
+
Support: support@agenthelm.dev
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# agenthelm-sdk
|
|
2
|
+
|
|
3
|
+
Monitor your AI agents from AgentHelm.
|
|
4
|
+
Add one line. See everything.
|
|
5
|
+
|
|
6
|
+
## Install
|
|
7
|
+
|
|
8
|
+
pip install agenthelm-sdk
|
|
9
|
+
|
|
10
|
+
## Quick Start
|
|
11
|
+
|
|
12
|
+
import agenthelm
|
|
13
|
+
|
|
14
|
+
# Connect your agent (one line):
|
|
15
|
+
dock = agenthelm.connect(
|
|
16
|
+
"ahe_live_xxxxx",
|
|
17
|
+
name="My Agent"
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Send logs:
|
|
21
|
+
dock.log("Agent started")
|
|
22
|
+
dock.log("Warning: rate limit", level="warning")
|
|
23
|
+
dock.log("Task complete", level="success")
|
|
24
|
+
|
|
25
|
+
# Track token usage:
|
|
26
|
+
dock.track_tokens(used=1500, model="gemini-flash")
|
|
27
|
+
dock.track_tokens(
|
|
28
|
+
used=2000,
|
|
29
|
+
model="gpt-4",
|
|
30
|
+
cost_per_1k=0.03
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Send structured output:
|
|
34
|
+
dock.output({
|
|
35
|
+
"leads_found": 12,
|
|
36
|
+
"hot_leads": 5,
|
|
37
|
+
"report_sent": True
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
# Report errors:
|
|
41
|
+
try:
|
|
42
|
+
risky_operation()
|
|
43
|
+
except Exception as e:
|
|
44
|
+
dock.error("Operation failed", exception=e)
|
|
45
|
+
|
|
46
|
+
# Handle dashboard commands:
|
|
47
|
+
@dock.on_command("stop")
|
|
48
|
+
def handle_stop(payload):
|
|
49
|
+
dock.log("Stopping agent...")
|
|
50
|
+
dock.stop()
|
|
51
|
+
|
|
52
|
+
@dock.on_command("run_now")
|
|
53
|
+
def handle_run(payload):
|
|
54
|
+
# run_main_task()
|
|
55
|
+
dock.reply("Task started!")
|
|
56
|
+
|
|
57
|
+
# Chat with your agent:
|
|
58
|
+
@dock.on_chat
|
|
59
|
+
def handle_chat(message: str):
|
|
60
|
+
if "status" in message.lower():
|
|
61
|
+
dock.reply("Running fine!")
|
|
62
|
+
else:
|
|
63
|
+
dock.reply(f"Received: {message}")
|
|
64
|
+
|
|
65
|
+
# Keep agent alive:
|
|
66
|
+
dock.listen()
|
|
67
|
+
|
|
68
|
+
## Context Manager
|
|
69
|
+
|
|
70
|
+
with agenthelm.connect("ahe_live_xxxxx") as dock:
|
|
71
|
+
dock.log("Working...")
|
|
72
|
+
# do_work()
|
|
73
|
+
dock.output({"done": True})
|
|
74
|
+
# Agent auto-stops when block exits.
|
|
75
|
+
|
|
76
|
+
## Configuration
|
|
77
|
+
|
|
78
|
+
dock = agenthelm.connect(
|
|
79
|
+
key="ahe_live_xxxxx",
|
|
80
|
+
name="My Agent",
|
|
81
|
+
agent_type="python",
|
|
82
|
+
version="2.1.0",
|
|
83
|
+
verbose=True,
|
|
84
|
+
ping_interval=30,
|
|
85
|
+
command_poll_interval=5,
|
|
86
|
+
timeout=5
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
## Links
|
|
90
|
+
|
|
91
|
+
Dashboard: https://agenthelm.dev
|
|
92
|
+
Docs: https://agenthelm.dev/docs
|
|
93
|
+
Support: support@agenthelm.dev
|
|
@@ -0,0 +1,631 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AgentHelm Python SDK Client
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import time
|
|
7
|
+
import json
|
|
8
|
+
import threading
|
|
9
|
+
import traceback
|
|
10
|
+
from datetime import datetime, timezone
|
|
11
|
+
from typing import Optional, Callable, Any, Dict
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
|
|
15
|
+
from .queue import OfflineQueue
|
|
16
|
+
|
|
17
|
+
# Default API URL — can be overridden for self-hosted
|
|
18
|
+
DEFAULT_BASE_URL = "https://agenthelm.vercel.app"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Agent:
|
|
22
|
+
"""
|
|
23
|
+
AgentHelm SDK — Connect any Python agent to AgentHelm.
|
|
24
|
+
|
|
25
|
+
Usage:
|
|
26
|
+
import agenthelm
|
|
27
|
+
dock = agenthelm.connect("ahe_live_xxxxx", name="My Agent")
|
|
28
|
+
dock.log("Agent started")
|
|
29
|
+
dock.track_tokens(used=1500, model="gemini-flash")
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
key: str,
|
|
35
|
+
name: str = "Python Agent",
|
|
36
|
+
agent_type: str = "python",
|
|
37
|
+
version: str = "1.0.0",
|
|
38
|
+
base_url: str = DEFAULT_BASE_URL,
|
|
39
|
+
auto_ping: bool = True,
|
|
40
|
+
ping_interval: int = 30,
|
|
41
|
+
command_poll_interval: int = 5,
|
|
42
|
+
verbose: bool = True,
|
|
43
|
+
timeout: int = 10
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Initialize AgentHelm connection.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
key: Your AgentHelm connect key (ahe_live_xxxxx)
|
|
50
|
+
name: Display name for this agent in dashboard
|
|
51
|
+
agent_type: Type of agent ("python", "node", "other")
|
|
52
|
+
version: Your agent's version string
|
|
53
|
+
base_url: API base URL (change for self-hosted)
|
|
54
|
+
auto_ping: Automatically send heartbeat pings
|
|
55
|
+
ping_interval: Seconds between heartbeat pings
|
|
56
|
+
command_poll_interval: Seconds between command polls
|
|
57
|
+
verbose: Print connection status to console
|
|
58
|
+
timeout: HTTP request timeout in seconds
|
|
59
|
+
"""
|
|
60
|
+
if not key or not key.startswith("ahe_live_"):
|
|
61
|
+
raise ValueError(
|
|
62
|
+
"Invalid AgentHelm key. "
|
|
63
|
+
"Keys must start with 'ahe_live_'. "
|
|
64
|
+
"Get your key at agenthelm.dev/dashboard/settings"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
self._key = key
|
|
68
|
+
self._name = name
|
|
69
|
+
self._agent_type = agent_type
|
|
70
|
+
self._version = version
|
|
71
|
+
self._base_url = base_url.rstrip("/")
|
|
72
|
+
self._verbose = verbose
|
|
73
|
+
self._timeout = 10
|
|
74
|
+
self._ping_interval = ping_interval
|
|
75
|
+
self._command_poll_interval = command_poll_interval
|
|
76
|
+
|
|
77
|
+
# State
|
|
78
|
+
self._agent_id: Optional[str] = None
|
|
79
|
+
self._running = True
|
|
80
|
+
self._connected = False
|
|
81
|
+
self._tokens_today = 0
|
|
82
|
+
self._tokens_session = 0
|
|
83
|
+
self._ping_timer: Optional[threading.Timer] = None
|
|
84
|
+
|
|
85
|
+
# Command + chat handlers
|
|
86
|
+
self._command_handlers: Dict[str, Callable] = {}
|
|
87
|
+
self._chat_handler: Optional[Callable] = None
|
|
88
|
+
|
|
89
|
+
# Offline queue
|
|
90
|
+
self._queue = OfflineQueue(maxsize=1000)
|
|
91
|
+
|
|
92
|
+
# Register agent on startup
|
|
93
|
+
self._register()
|
|
94
|
+
|
|
95
|
+
# Start background threads (all daemon=True)
|
|
96
|
+
if auto_ping:
|
|
97
|
+
self._start_ping_timer()
|
|
98
|
+
|
|
99
|
+
threading.Thread(
|
|
100
|
+
target=self._command_loop,
|
|
101
|
+
daemon=True,
|
|
102
|
+
name="agenthelm-commands"
|
|
103
|
+
).start()
|
|
104
|
+
|
|
105
|
+
threading.Thread(
|
|
106
|
+
target=self._flush_loop,
|
|
107
|
+
daemon=True,
|
|
108
|
+
name="agenthelm-flush"
|
|
109
|
+
).start()
|
|
110
|
+
|
|
111
|
+
# ─── PUBLIC METHODS ───────────────────────────────────
|
|
112
|
+
|
|
113
|
+
def log(
|
|
114
|
+
self,
|
|
115
|
+
message: str,
|
|
116
|
+
level: str = "info",
|
|
117
|
+
data: Optional[dict] = None
|
|
118
|
+
) -> None:
|
|
119
|
+
"""
|
|
120
|
+
Send a log message to AgentHelm dashboard.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
message: The log message to display
|
|
124
|
+
level: One of "info", "warning", "error", "success"
|
|
125
|
+
data: Optional structured data (JSON-serializable dict)
|
|
126
|
+
|
|
127
|
+
Example:
|
|
128
|
+
dock.log("Processing started")
|
|
129
|
+
dock.log("Warning: rate limit approaching", level="warning")
|
|
130
|
+
dock.log("Error occurred", level="error")
|
|
131
|
+
"""
|
|
132
|
+
try:
|
|
133
|
+
payload = {
|
|
134
|
+
"key": self._key,
|
|
135
|
+
"agent_id": self._agent_id,
|
|
136
|
+
"type": "log",
|
|
137
|
+
"level": level,
|
|
138
|
+
"message": str(message),
|
|
139
|
+
"data": data,
|
|
140
|
+
"timestamp": self._now()
|
|
141
|
+
}
|
|
142
|
+
self._send("/api/sdk/log", payload)
|
|
143
|
+
except Exception as e:
|
|
144
|
+
if self._verbose:
|
|
145
|
+
print(f"[AgentHelm] ⚠️ Failed to log: {e}")
|
|
146
|
+
|
|
147
|
+
def output(
|
|
148
|
+
self,
|
|
149
|
+
data: dict,
|
|
150
|
+
label: str = "output"
|
|
151
|
+
) -> None:
|
|
152
|
+
"""
|
|
153
|
+
Send structured output/results to dashboard.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
data: Dictionary of results (must be JSON-serializable)
|
|
157
|
+
label: Optional label for this output
|
|
158
|
+
|
|
159
|
+
Example:
|
|
160
|
+
dock.output({"leads_found": 12, "hot_leads": 5})
|
|
161
|
+
dock.output({"orders": 3, "revenue": 1499}, label="daily_summary")
|
|
162
|
+
"""
|
|
163
|
+
try:
|
|
164
|
+
payload = {
|
|
165
|
+
"key": self._key,
|
|
166
|
+
"agent_id": self._agent_id,
|
|
167
|
+
"type": "output",
|
|
168
|
+
"level": "success",
|
|
169
|
+
"message": f"[{label}] {json.dumps(data, default=str)}",
|
|
170
|
+
"data": data,
|
|
171
|
+
"label": label,
|
|
172
|
+
"timestamp": self._now()
|
|
173
|
+
}
|
|
174
|
+
self._send("/api/sdk/output", payload)
|
|
175
|
+
except Exception as e:
|
|
176
|
+
if self._verbose:
|
|
177
|
+
print(f"[AgentHelm] ⚠️ Failed to output: {e}")
|
|
178
|
+
|
|
179
|
+
def warn(
|
|
180
|
+
self,
|
|
181
|
+
message: str,
|
|
182
|
+
data: Optional[dict] = None
|
|
183
|
+
) -> None:
|
|
184
|
+
"""Report a warning to AgentHelm dashboard."""
|
|
185
|
+
try:
|
|
186
|
+
payload = {
|
|
187
|
+
"key": self._key,
|
|
188
|
+
"agent_id": self._agent_id,
|
|
189
|
+
"type": "log",
|
|
190
|
+
"level": "warn",
|
|
191
|
+
"message": str(message),
|
|
192
|
+
"data": data,
|
|
193
|
+
"timestamp": self._now()
|
|
194
|
+
}
|
|
195
|
+
self._send("/api/sdk/log", payload)
|
|
196
|
+
except Exception as e:
|
|
197
|
+
if self._verbose:
|
|
198
|
+
print(f"[AgentHelm] ⚠️ Failed to warn: {e}")
|
|
199
|
+
|
|
200
|
+
def error(
|
|
201
|
+
self,
|
|
202
|
+
message: str,
|
|
203
|
+
exception: Optional[Exception] = None,
|
|
204
|
+
include_traceback: bool = True
|
|
205
|
+
) -> None:
|
|
206
|
+
"""
|
|
207
|
+
Report an error to AgentHelm dashboard.
|
|
208
|
+
Automatically updates agent status to 'error'.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
message: Human-readable error description
|
|
212
|
+
exception: Optional exception object
|
|
213
|
+
include_traceback: Whether to include full traceback
|
|
214
|
+
|
|
215
|
+
Example:
|
|
216
|
+
try:
|
|
217
|
+
risky_operation()
|
|
218
|
+
except Exception as e:
|
|
219
|
+
dock.error("Operation failed", exception=e)
|
|
220
|
+
"""
|
|
221
|
+
try:
|
|
222
|
+
error_data = {"message": message}
|
|
223
|
+
|
|
224
|
+
if exception:
|
|
225
|
+
error_data["exception_type"] = type(exception).__name__
|
|
226
|
+
error_data["exception_message"] = str(exception)
|
|
227
|
+
|
|
228
|
+
if include_traceback:
|
|
229
|
+
error_data["traceback"] = traceback.format_exc()
|
|
230
|
+
|
|
231
|
+
payload = {
|
|
232
|
+
"key": self._key,
|
|
233
|
+
"agent_id": self._agent_id,
|
|
234
|
+
"type": "log",
|
|
235
|
+
"level": "error",
|
|
236
|
+
"message": message,
|
|
237
|
+
"data": error_data,
|
|
238
|
+
"timestamp": self._now()
|
|
239
|
+
}
|
|
240
|
+
self._send("/api/sdk/log", payload)
|
|
241
|
+
except Exception as e:
|
|
242
|
+
if self._verbose:
|
|
243
|
+
print(f"[AgentHelm] ⚠️ Failed to error: {e}")
|
|
244
|
+
|
|
245
|
+
def track_tokens(
|
|
246
|
+
self,
|
|
247
|
+
used: int,
|
|
248
|
+
model: str,
|
|
249
|
+
cost_per_1k: float = 0.0,
|
|
250
|
+
prompt_tokens: Optional[int] = None,
|
|
251
|
+
completion_tokens: Optional[int] = None
|
|
252
|
+
) -> None:
|
|
253
|
+
"""
|
|
254
|
+
Track token usage for credits dashboard.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
used: Total tokens used in this call
|
|
258
|
+
model: Model name (e.g., "gemini-flash", "gpt-4", "claude-sonnet")
|
|
259
|
+
cost_per_1k: Cost in USD per 1000 tokens (optional)
|
|
260
|
+
prompt_tokens: Input tokens (optional breakdown)
|
|
261
|
+
completion_tokens: Output tokens (optional breakdown)
|
|
262
|
+
|
|
263
|
+
Example:
|
|
264
|
+
dock.track_tokens(used=1500, model="gemini-flash")
|
|
265
|
+
dock.track_tokens(
|
|
266
|
+
used=2000, model="gpt-4",
|
|
267
|
+
cost_per_1k=0.03,
|
|
268
|
+
prompt_tokens=1200,
|
|
269
|
+
completion_tokens=800
|
|
270
|
+
)
|
|
271
|
+
"""
|
|
272
|
+
cost_usd = round((used / 1000) * cost_per_1k, 8)
|
|
273
|
+
self._tokens_today += used
|
|
274
|
+
self._tokens_session += used
|
|
275
|
+
|
|
276
|
+
payload = {
|
|
277
|
+
"key": self._key,
|
|
278
|
+
"agent_id": self._agent_id,
|
|
279
|
+
"type": "tokens",
|
|
280
|
+
"level": "info",
|
|
281
|
+
"message": f"Token usage: {used:,} tokens ({model})",
|
|
282
|
+
"tokens_used": used,
|
|
283
|
+
"model": model,
|
|
284
|
+
"cost_usd": cost_usd,
|
|
285
|
+
"data": {
|
|
286
|
+
"prompt_tokens": prompt_tokens,
|
|
287
|
+
"completion_tokens": completion_tokens,
|
|
288
|
+
"total_tokens": used,
|
|
289
|
+
"model": model,
|
|
290
|
+
"cost_usd": cost_usd
|
|
291
|
+
},
|
|
292
|
+
"timestamp": self._now()
|
|
293
|
+
}
|
|
294
|
+
self._send("/api/sdk/log", payload)
|
|
295
|
+
|
|
296
|
+
def reply(self, message: str) -> None:
|
|
297
|
+
"""
|
|
298
|
+
Send a reply back to a dashboard/Telegram chat message.
|
|
299
|
+
Called inside @dock.on_chat handlers.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
message: The reply message to send
|
|
303
|
+
|
|
304
|
+
Example:
|
|
305
|
+
@dock.on_chat
|
|
306
|
+
def handle(msg):
|
|
307
|
+
dock.reply(f"I received: {msg}")
|
|
308
|
+
"""
|
|
309
|
+
payload = {
|
|
310
|
+
"key": self._key,
|
|
311
|
+
"agent_id": self._agent_id,
|
|
312
|
+
"type": "chat_reply",
|
|
313
|
+
"level": "info",
|
|
314
|
+
"message": message,
|
|
315
|
+
"timestamp": self._now()
|
|
316
|
+
}
|
|
317
|
+
self._send("/api/sdk/log", payload)
|
|
318
|
+
|
|
319
|
+
def stop(self) -> None:
|
|
320
|
+
"""
|
|
321
|
+
Gracefully stop the agent and notify dashboard.
|
|
322
|
+
|
|
323
|
+
Example:
|
|
324
|
+
@dock.on_command("stop")
|
|
325
|
+
def handle_stop(payload):
|
|
326
|
+
dock.log("Stopping gracefully...")
|
|
327
|
+
dock.stop()
|
|
328
|
+
"""
|
|
329
|
+
try:
|
|
330
|
+
self._running = False
|
|
331
|
+
if hasattr(self, '_ping_timer') and self._ping_timer:
|
|
332
|
+
self._ping_timer.cancel()
|
|
333
|
+
|
|
334
|
+
self._send("/api/sdk/ping", {
|
|
335
|
+
"key": self._key,
|
|
336
|
+
"agent_id": self._agent_id,
|
|
337
|
+
"status": "stopped",
|
|
338
|
+
"timestamp": self._now()
|
|
339
|
+
})
|
|
340
|
+
if self._verbose:
|
|
341
|
+
print(f"[AgentHelm] ⏹ {self._name} stopped")
|
|
342
|
+
except Exception as e:
|
|
343
|
+
if self._verbose:
|
|
344
|
+
print(f"[AgentHelm] ⚠️ Failed to stop: {e}")
|
|
345
|
+
|
|
346
|
+
def listen(self) -> None:
|
|
347
|
+
"""
|
|
348
|
+
Block the main thread and keep the agent running.
|
|
349
|
+
Handles KeyboardInterrupt (Ctrl+C) gracefully.
|
|
350
|
+
|
|
351
|
+
Example:
|
|
352
|
+
# At the end of your script:
|
|
353
|
+
dock.listen()
|
|
354
|
+
"""
|
|
355
|
+
if self._verbose:
|
|
356
|
+
print(f"[AgentHelm] 👂 {self._name} listening for commands...")
|
|
357
|
+
try:
|
|
358
|
+
while self._running:
|
|
359
|
+
time.sleep(1)
|
|
360
|
+
except KeyboardInterrupt:
|
|
361
|
+
if self._verbose:
|
|
362
|
+
print(f"\n[AgentHelm] 🛑 Shutdown requested")
|
|
363
|
+
self.stop()
|
|
364
|
+
|
|
365
|
+
# ─── DECORATORS ───────────────────────────────────────
|
|
366
|
+
|
|
367
|
+
def on_command(self, command_type: str):
|
|
368
|
+
"""
|
|
369
|
+
Decorator to handle commands from dashboard/Telegram.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
command_type: Command name to listen for
|
|
373
|
+
("stop", "start", "restart", or custom)
|
|
374
|
+
|
|
375
|
+
Example:
|
|
376
|
+
@dock.on_command("stop")
|
|
377
|
+
def handle_stop(payload):
|
|
378
|
+
cleanup()
|
|
379
|
+
dock.stop()
|
|
380
|
+
|
|
381
|
+
@dock.on_command("run_report")
|
|
382
|
+
def handle_run(payload):
|
|
383
|
+
run_report()
|
|
384
|
+
dock.reply("Report generated!")
|
|
385
|
+
"""
|
|
386
|
+
def decorator(func: Callable) -> Callable:
|
|
387
|
+
self._command_handlers[command_type] = func
|
|
388
|
+
return func
|
|
389
|
+
return decorator
|
|
390
|
+
|
|
391
|
+
def on_chat(self, func: Callable) -> Callable:
|
|
392
|
+
"""
|
|
393
|
+
Decorator to handle chat messages from dashboard/Telegram.
|
|
394
|
+
Use dock.reply() to respond.
|
|
395
|
+
|
|
396
|
+
Example:
|
|
397
|
+
@dock.on_chat
|
|
398
|
+
def handle_chat(message: str):
|
|
399
|
+
if "status" in message.lower():
|
|
400
|
+
dock.reply("All good!")
|
|
401
|
+
else:
|
|
402
|
+
dock.reply(f"Got: {message}")
|
|
403
|
+
"""
|
|
404
|
+
self._chat_handler = func
|
|
405
|
+
return func
|
|
406
|
+
|
|
407
|
+
# ─── PROPERTIES ───────────────────────────────────────
|
|
408
|
+
|
|
409
|
+
@property
|
|
410
|
+
def tokens_today(self) -> int:
|
|
411
|
+
"""Total tokens tracked today in this session."""
|
|
412
|
+
return self._tokens_today
|
|
413
|
+
|
|
414
|
+
@property
|
|
415
|
+
def tokens_session(self) -> int:
|
|
416
|
+
"""Total tokens tracked in this session."""
|
|
417
|
+
return self._tokens_session
|
|
418
|
+
|
|
419
|
+
@property
|
|
420
|
+
def agent_id(self) -> Optional[str]:
|
|
421
|
+
"""AgentHelm agent UUID."""
|
|
422
|
+
return self._agent_id
|
|
423
|
+
|
|
424
|
+
@property
|
|
425
|
+
def is_connected(self) -> bool:
|
|
426
|
+
"""Whether agent is connected to AgentHelm."""
|
|
427
|
+
return self._connected
|
|
428
|
+
|
|
429
|
+
@property
|
|
430
|
+
def name(self) -> str:
|
|
431
|
+
"""Agent display name."""
|
|
432
|
+
return self._name
|
|
433
|
+
|
|
434
|
+
# ─── PRIVATE METHODS ──────────────────────────────────
|
|
435
|
+
|
|
436
|
+
def _register(self) -> None:
|
|
437
|
+
"""Register agent with AgentHelm on startup."""
|
|
438
|
+
try:
|
|
439
|
+
response = requests.post(
|
|
440
|
+
f"{self._base_url}/api/sdk/ping",
|
|
441
|
+
json={
|
|
442
|
+
"key": self._key,
|
|
443
|
+
"name": self._name,
|
|
444
|
+
"agent_type": self._agent_type,
|
|
445
|
+
"version": self._version,
|
|
446
|
+
"status": "running",
|
|
447
|
+
"started_at": self._now()
|
|
448
|
+
},
|
|
449
|
+
timeout=10
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
if response.status_code == 200:
|
|
453
|
+
data = response.json()
|
|
454
|
+
self._agent_id = data.get("agent_id")
|
|
455
|
+
self._connected = True
|
|
456
|
+
if self._verbose:
|
|
457
|
+
agent_short = (
|
|
458
|
+
self._agent_id[:8] + "..."
|
|
459
|
+
if self._agent_id else "unknown"
|
|
460
|
+
)
|
|
461
|
+
print(
|
|
462
|
+
f"[AgentHelm] ✅ Connected: "
|
|
463
|
+
f"{self._name} ({agent_short})"
|
|
464
|
+
)
|
|
465
|
+
elif response.status_code == 401:
|
|
466
|
+
if self._verbose:
|
|
467
|
+
print("[AgentHelm] ❌ Invalid connect key.")
|
|
468
|
+
else:
|
|
469
|
+
if self._verbose:
|
|
470
|
+
print(f"[AgentHelm] ⚠️ HTTP {response.status_code}")
|
|
471
|
+
|
|
472
|
+
except requests.exceptions.ConnectionError:
|
|
473
|
+
if self._verbose:
|
|
474
|
+
print("[AgentHelm] ⚠️ Offline mode — will retry connection...")
|
|
475
|
+
except Exception as e:
|
|
476
|
+
if self._verbose:
|
|
477
|
+
print(f"[AgentHelm] ⚠️ Connection failed: {e}")
|
|
478
|
+
|
|
479
|
+
def _send(self, endpoint: str, payload: dict) -> bool:
|
|
480
|
+
"""
|
|
481
|
+
Send request to AgentHelm API.
|
|
482
|
+
Falls back to offline queue on failure.
|
|
483
|
+
"""
|
|
484
|
+
try:
|
|
485
|
+
response = requests.post(
|
|
486
|
+
f"{self._base_url}{endpoint}",
|
|
487
|
+
json=payload,
|
|
488
|
+
timeout=self._timeout
|
|
489
|
+
)
|
|
490
|
+
return response.status_code == 200
|
|
491
|
+
except Exception:
|
|
492
|
+
# Queue for retry when back online
|
|
493
|
+
if self._queue.size() < 1000:
|
|
494
|
+
self._queue.push(endpoint, payload)
|
|
495
|
+
return False
|
|
496
|
+
|
|
497
|
+
def _start_ping_timer(self) -> None:
|
|
498
|
+
"""Start the heartbeat timer."""
|
|
499
|
+
if self._running:
|
|
500
|
+
self._ping_timer = threading.Timer(self._ping_interval, self._ping_loop)
|
|
501
|
+
self._ping_timer.daemon = True
|
|
502
|
+
self._ping_timer.start()
|
|
503
|
+
|
|
504
|
+
def _ping_loop(self) -> None:
|
|
505
|
+
"""Send heartbeat."""
|
|
506
|
+
if not self._running:
|
|
507
|
+
return
|
|
508
|
+
try:
|
|
509
|
+
response = requests.post(
|
|
510
|
+
f"{self._base_url}/api/sdk/ping",
|
|
511
|
+
json={
|
|
512
|
+
"key": self._key,
|
|
513
|
+
"agent_id": self._agent_id,
|
|
514
|
+
"status": "running",
|
|
515
|
+
"timestamp": self._now()
|
|
516
|
+
},
|
|
517
|
+
timeout=10
|
|
518
|
+
)
|
|
519
|
+
if response.status_code == 200:
|
|
520
|
+
data = response.json()
|
|
521
|
+
if not self._agent_id and data.get("agent_id"):
|
|
522
|
+
self._agent_id = data.get("agent_id")
|
|
523
|
+
except Exception:
|
|
524
|
+
pass
|
|
525
|
+
self._start_ping_timer()
|
|
526
|
+
|
|
527
|
+
def _command_loop(self) -> None:
|
|
528
|
+
"""Poll for commands every command_poll_interval seconds."""
|
|
529
|
+
while self._running:
|
|
530
|
+
try:
|
|
531
|
+
response = requests.get(
|
|
532
|
+
f"{self._base_url}/api/sdk/command",
|
|
533
|
+
params={
|
|
534
|
+
"key": self._key,
|
|
535
|
+
"agent_id": self._agent_id
|
|
536
|
+
},
|
|
537
|
+
timeout=self._timeout
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
if response.status_code == 200:
|
|
541
|
+
data = response.json()
|
|
542
|
+
commands = data.get("commands", [])
|
|
543
|
+
|
|
544
|
+
for cmd in commands:
|
|
545
|
+
self._handle_command(cmd)
|
|
546
|
+
|
|
547
|
+
except Exception:
|
|
548
|
+
pass
|
|
549
|
+
|
|
550
|
+
time.sleep(self._command_poll_interval)
|
|
551
|
+
|
|
552
|
+
def _handle_command(self, cmd: dict) -> None:
|
|
553
|
+
"""Route command to correct handler."""
|
|
554
|
+
command_type = cmd.get("command_type", "")
|
|
555
|
+
payload = cmd.get("payload", {})
|
|
556
|
+
|
|
557
|
+
try:
|
|
558
|
+
if command_type == "chat":
|
|
559
|
+
# Route to chat handler
|
|
560
|
+
if self._chat_handler:
|
|
561
|
+
message = payload.get("message", "")
|
|
562
|
+
threading.Thread(
|
|
563
|
+
target=self._chat_handler,
|
|
564
|
+
args=(message,),
|
|
565
|
+
daemon=True
|
|
566
|
+
).start()
|
|
567
|
+
|
|
568
|
+
elif command_type in self._command_handlers:
|
|
569
|
+
# Route to registered command handler
|
|
570
|
+
handler = self._command_handlers[command_type]
|
|
571
|
+
threading.Thread(
|
|
572
|
+
target=handler,
|
|
573
|
+
args=(payload,),
|
|
574
|
+
daemon=True
|
|
575
|
+
).start()
|
|
576
|
+
|
|
577
|
+
except Exception as e:
|
|
578
|
+
print(f"[AgentHelm] ❌ Command handler error: {e}")
|
|
579
|
+
|
|
580
|
+
def _flush_loop(self) -> None:
|
|
581
|
+
"""Retry failed requests from offline queue."""
|
|
582
|
+
while self._running:
|
|
583
|
+
if not self._queue.is_empty():
|
|
584
|
+
item = self._queue.pop()
|
|
585
|
+
if item:
|
|
586
|
+
endpoint, payload = item
|
|
587
|
+
self._send(endpoint, payload)
|
|
588
|
+
time.sleep(10)
|
|
589
|
+
|
|
590
|
+
@staticmethod
|
|
591
|
+
def _now() -> str:
|
|
592
|
+
"""Return current UTC timestamp as ISO string."""
|
|
593
|
+
return datetime.now(timezone.utc).isoformat()
|
|
594
|
+
|
|
595
|
+
def __repr__(self) -> str:
|
|
596
|
+
return (
|
|
597
|
+
f"AgentHelm(name='{self._name}', "
|
|
598
|
+
f"connected={self._connected}, "
|
|
599
|
+
f"agent_id='{self._agent_id}')"
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
def __enter__(self):
|
|
603
|
+
"""Support using as context manager."""
|
|
604
|
+
return self
|
|
605
|
+
|
|
606
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
607
|
+
"""Auto-stop when used as context manager."""
|
|
608
|
+
self.stop()
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
def connect(
|
|
612
|
+
key: str,
|
|
613
|
+
name: str = "Python Agent",
|
|
614
|
+
**kwargs
|
|
615
|
+
) -> Agent:
|
|
616
|
+
"""
|
|
617
|
+
One-line shortcut to connect an agent to AgentHelm.
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
key: Your AgentHelm connect key (ahe_live_xxxxx)
|
|
621
|
+
name: Display name for this agent
|
|
622
|
+
**kwargs: Additional options passed to AgentHelm()
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
Connected Agent instance
|
|
626
|
+
|
|
627
|
+
Example:
|
|
628
|
+
import agenthelm
|
|
629
|
+
dock = agenthelm.connect("ahe_live_xxxxx", name="My Agent")
|
|
630
|
+
"""
|
|
631
|
+
return Agent(key=key, name=name, **kwargs)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from typing import Tuple, Any
|
|
3
|
+
from collections import deque
|
|
4
|
+
|
|
5
|
+
class OfflineQueue:
|
|
6
|
+
"""
|
|
7
|
+
Thread-safe queue for storing failed API requests.
|
|
8
|
+
Retries automatically when connection is restored.
|
|
9
|
+
Max size: 1000 items to prevent memory issues.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, maxsize: int = 1000):
|
|
13
|
+
self._queue: deque = deque(maxlen=maxsize)
|
|
14
|
+
self._lock = threading.Lock()
|
|
15
|
+
|
|
16
|
+
def push(self, endpoint: str, payload: dict) -> None:
|
|
17
|
+
with self._lock:
|
|
18
|
+
self._queue.append((endpoint, payload))
|
|
19
|
+
|
|
20
|
+
def pop(self) -> Tuple[str, dict] | None:
|
|
21
|
+
with self._lock:
|
|
22
|
+
if self._queue:
|
|
23
|
+
return self._queue.popleft()
|
|
24
|
+
return None
|
|
25
|
+
|
|
26
|
+
def size(self) -> int:
|
|
27
|
+
with self._lock:
|
|
28
|
+
return len(self._queue)
|
|
29
|
+
|
|
30
|
+
def is_empty(self) -> bool:
|
|
31
|
+
with self._lock:
|
|
32
|
+
return len(self._queue) == 0
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agenthelm-sdk
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Python SDK for AgentHelm — AI Agent Control Plane
|
|
5
|
+
Project-URL: Homepage, https://agenthelm.vercel.app
|
|
6
|
+
Project-URL: Repository, https://github.com/jayasukuv11-beep/agenthelm
|
|
7
|
+
Requires-Python: >=3.8
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: requests>=2.28.0
|
|
11
|
+
Dynamic: license-file
|
|
12
|
+
|
|
13
|
+
# agenthelm-sdk
|
|
14
|
+
|
|
15
|
+
Monitor your AI agents from AgentHelm.
|
|
16
|
+
Add one line. See everything.
|
|
17
|
+
|
|
18
|
+
## Install
|
|
19
|
+
|
|
20
|
+
pip install agenthelm-sdk
|
|
21
|
+
|
|
22
|
+
## Quick Start
|
|
23
|
+
|
|
24
|
+
import agenthelm
|
|
25
|
+
|
|
26
|
+
# Connect your agent (one line):
|
|
27
|
+
dock = agenthelm.connect(
|
|
28
|
+
"ahe_live_xxxxx",
|
|
29
|
+
name="My Agent"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Send logs:
|
|
33
|
+
dock.log("Agent started")
|
|
34
|
+
dock.log("Warning: rate limit", level="warning")
|
|
35
|
+
dock.log("Task complete", level="success")
|
|
36
|
+
|
|
37
|
+
# Track token usage:
|
|
38
|
+
dock.track_tokens(used=1500, model="gemini-flash")
|
|
39
|
+
dock.track_tokens(
|
|
40
|
+
used=2000,
|
|
41
|
+
model="gpt-4",
|
|
42
|
+
cost_per_1k=0.03
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# Send structured output:
|
|
46
|
+
dock.output({
|
|
47
|
+
"leads_found": 12,
|
|
48
|
+
"hot_leads": 5,
|
|
49
|
+
"report_sent": True
|
|
50
|
+
})
|
|
51
|
+
|
|
52
|
+
# Report errors:
|
|
53
|
+
try:
|
|
54
|
+
risky_operation()
|
|
55
|
+
except Exception as e:
|
|
56
|
+
dock.error("Operation failed", exception=e)
|
|
57
|
+
|
|
58
|
+
# Handle dashboard commands:
|
|
59
|
+
@dock.on_command("stop")
|
|
60
|
+
def handle_stop(payload):
|
|
61
|
+
dock.log("Stopping agent...")
|
|
62
|
+
dock.stop()
|
|
63
|
+
|
|
64
|
+
@dock.on_command("run_now")
|
|
65
|
+
def handle_run(payload):
|
|
66
|
+
# run_main_task()
|
|
67
|
+
dock.reply("Task started!")
|
|
68
|
+
|
|
69
|
+
# Chat with your agent:
|
|
70
|
+
@dock.on_chat
|
|
71
|
+
def handle_chat(message: str):
|
|
72
|
+
if "status" in message.lower():
|
|
73
|
+
dock.reply("Running fine!")
|
|
74
|
+
else:
|
|
75
|
+
dock.reply(f"Received: {message}")
|
|
76
|
+
|
|
77
|
+
# Keep agent alive:
|
|
78
|
+
dock.listen()
|
|
79
|
+
|
|
80
|
+
## Context Manager
|
|
81
|
+
|
|
82
|
+
with agenthelm.connect("ahe_live_xxxxx") as dock:
|
|
83
|
+
dock.log("Working...")
|
|
84
|
+
# do_work()
|
|
85
|
+
dock.output({"done": True})
|
|
86
|
+
# Agent auto-stops when block exits.
|
|
87
|
+
|
|
88
|
+
## Configuration
|
|
89
|
+
|
|
90
|
+
dock = agenthelm.connect(
|
|
91
|
+
key="ahe_live_xxxxx",
|
|
92
|
+
name="My Agent",
|
|
93
|
+
agent_type="python",
|
|
94
|
+
version="2.1.0",
|
|
95
|
+
verbose=True,
|
|
96
|
+
ping_interval=30,
|
|
97
|
+
command_poll_interval=5,
|
|
98
|
+
timeout=5
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
## Links
|
|
102
|
+
|
|
103
|
+
Dashboard: https://agenthelm.dev
|
|
104
|
+
Docs: https://agenthelm.dev/docs
|
|
105
|
+
Support: support@agenthelm.dev
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
agenthelm/__init__.py
|
|
5
|
+
agenthelm/client.py
|
|
6
|
+
agenthelm/queue.py
|
|
7
|
+
agenthelm_sdk.egg-info/PKG-INFO
|
|
8
|
+
agenthelm_sdk.egg-info/SOURCES.txt
|
|
9
|
+
agenthelm_sdk.egg-info/dependency_links.txt
|
|
10
|
+
agenthelm_sdk.egg-info/requires.txt
|
|
11
|
+
agenthelm_sdk.egg-info/top_level.txt
|
|
12
|
+
tests/test_client.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
requests>=2.28.0
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
agenthelm
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "agenthelm-sdk"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Python SDK for AgentHelm — AI Agent Control Plane"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
dependencies = ["requests>=2.28.0"]
|
|
12
|
+
|
|
13
|
+
[project.urls]
|
|
14
|
+
Homepage = "https://agenthelm.vercel.app"
|
|
15
|
+
Repository = "https://github.com/jayasukuv11-beep/agenthelm"
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import patch, MagicMock
|
|
3
|
+
from agenthelm import AgentHelm, connect
|
|
4
|
+
|
|
5
|
+
class TestAgentHelmInit:
|
|
6
|
+
def test_invalid_key_raises_error(self):
|
|
7
|
+
with pytest.raises(ValueError):
|
|
8
|
+
AgentHelm(key="invalid_key", name="Test")
|
|
9
|
+
|
|
10
|
+
def test_valid_key_accepted(self):
|
|
11
|
+
with patch("requests.post") as mock_post:
|
|
12
|
+
mock_post.return_value.status_code = 200
|
|
13
|
+
mock_post.return_value.json.return_value = {
|
|
14
|
+
"agent_id": "test-uuid-1234"
|
|
15
|
+
}
|
|
16
|
+
dock = AgentHelm(
|
|
17
|
+
key="ahe_live_test123",
|
|
18
|
+
name="Test Agent",
|
|
19
|
+
auto_ping=False
|
|
20
|
+
)
|
|
21
|
+
assert dock.name == "Test Agent"
|
|
22
|
+
|
|
23
|
+
def test_connect_shortcut(self):
|
|
24
|
+
with patch("requests.post") as mock_post:
|
|
25
|
+
mock_post.return_value.status_code = 200
|
|
26
|
+
mock_post.return_value.json.return_value = {
|
|
27
|
+
"agent_id": "test-uuid"
|
|
28
|
+
}
|
|
29
|
+
dock = connect(
|
|
30
|
+
"ahe_live_test123",
|
|
31
|
+
name="Test",
|
|
32
|
+
auto_ping=False
|
|
33
|
+
)
|
|
34
|
+
assert isinstance(dock, AgentHelm)
|
|
35
|
+
|
|
36
|
+
class TestLogging:
|
|
37
|
+
def setup_method(self):
|
|
38
|
+
with patch("requests.post") as mock:
|
|
39
|
+
mock.return_value.status_code = 200
|
|
40
|
+
mock.return_value.json.return_value = {
|
|
41
|
+
"agent_id": "test-id"
|
|
42
|
+
}
|
|
43
|
+
self.dock = AgentHelm(
|
|
44
|
+
"ahe_live_test",
|
|
45
|
+
auto_ping=False
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
def test_log_sends_request(self):
|
|
49
|
+
with patch("requests.post") as mock:
|
|
50
|
+
mock.return_value.status_code = 200
|
|
51
|
+
self.dock.log("Test message")
|
|
52
|
+
mock.assert_called_once()
|
|
53
|
+
|
|
54
|
+
def test_track_tokens_accumulates(self):
|
|
55
|
+
with patch("requests.post"):
|
|
56
|
+
self.dock.track_tokens(500, "test-model")
|
|
57
|
+
self.dock.track_tokens(300, "test-model")
|
|
58
|
+
assert self.dock.tokens_session == 800
|