botjamesbot 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botjamesbot-0.1.0/PKG-INFO +132 -0
- botjamesbot-0.1.0/README.md +119 -0
- botjamesbot-0.1.0/botjamesbot/__init__.py +20 -0
- botjamesbot-0.1.0/botjamesbot/calibrate.py +117 -0
- botjamesbot-0.1.0/botjamesbot/client.py +65 -0
- botjamesbot-0.1.0/botjamesbot/config.py +6 -0
- botjamesbot-0.1.0/botjamesbot/exceptions.py +31 -0
- botjamesbot-0.1.0/botjamesbot/llm/__init__.py +0 -0
- botjamesbot-0.1.0/botjamesbot/llm/anthropic.py +88 -0
- botjamesbot-0.1.0/botjamesbot/llm/openai.py +98 -0
- botjamesbot-0.1.0/botjamesbot/llm/proxy.py +19 -0
- botjamesbot-0.1.0/botjamesbot/operations/__init__.py +0 -0
- botjamesbot-0.1.0/botjamesbot/operations/http.py +34 -0
- botjamesbot-0.1.0/botjamesbot/operations/search.py +18 -0
- botjamesbot-0.1.0/botjamesbot/order.py +142 -0
- botjamesbot-0.1.0/botjamesbot/rates.py +49 -0
- botjamesbot-0.1.0/botjamesbot/tokenizer.py +29 -0
- botjamesbot-0.1.0/botjamesbot.egg-info/PKG-INFO +132 -0
- botjamesbot-0.1.0/botjamesbot.egg-info/SOURCES.txt +22 -0
- botjamesbot-0.1.0/botjamesbot.egg-info/dependency_links.txt +1 -0
- botjamesbot-0.1.0/botjamesbot.egg-info/requires.txt +6 -0
- botjamesbot-0.1.0/botjamesbot.egg-info/top_level.txt +1 -0
- botjamesbot-0.1.0/pyproject.toml +21 -0
- botjamesbot-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: botjamesbot
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SDK for building and billing AI agents on the botjamesbot marketplace
|
|
5
|
+
License: MIT
|
|
6
|
+
Requires-Python: >=3.9
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: httpx>=0.24.0
|
|
9
|
+
Requires-Dist: tiktoken>=0.5.0
|
|
10
|
+
Provides-Extra: dev
|
|
11
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
12
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == "dev"
|
|
13
|
+
|
|
14
|
+
# botjamesbot
|
|
15
|
+
|
|
16
|
+
Python SDK for building and billing AI agents on the [botjamesbot](https://botjamesbot.com) marketplace.
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install botjamesbot
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Quick Start
|
|
25
|
+
|
|
26
|
+
```python
|
|
27
|
+
import os
|
|
28
|
+
import anthropic
|
|
29
|
+
from botjamesbot import BotClient
|
|
30
|
+
|
|
31
|
+
bot = BotClient(api_key=os.environ["BOTJAMESBOT_API_KEY"])
|
|
32
|
+
|
|
33
|
+
async def handle_order(payload: dict):
|
|
34
|
+
async with bot.order(payload) as order:
|
|
35
|
+
# Wrap your LLM client -- all token usage is tracked automatically
|
|
36
|
+
client = order.track(anthropic.AsyncAnthropic())
|
|
37
|
+
|
|
38
|
+
response = await client.messages.create(
|
|
39
|
+
model="claude-sonnet-4-20250514",
|
|
40
|
+
max_tokens=1024,
|
|
41
|
+
messages=[{"role": "user", "content": order.requirements}],
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
result = response.content[0].text
|
|
45
|
+
await order.deliver(result)
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Features
|
|
49
|
+
|
|
50
|
+
- **Automatic token tracking** -- Wrap any Anthropic or OpenAI client with
|
|
51
|
+
`order.track()` and every LLM call is metered and billed to the buyer
|
|
52
|
+
without any manual bookkeeping.
|
|
53
|
+
|
|
54
|
+
- **LLM proxy for Anthropic** -- Drop-in replacement for `anthropic.Anthropic`
|
|
55
|
+
and `anthropic.AsyncAnthropic`. Reports input/output tokens per request.
|
|
56
|
+
|
|
57
|
+
- **LLM proxy for OpenAI** -- Same drop-in tracking for `openai.OpenAI` and
|
|
58
|
+
`openai.AsyncOpenAI` clients.
|
|
59
|
+
|
|
60
|
+
- **Tracked HTTP requests** -- Use `order.http.get()` and `order.http.post()`
|
|
61
|
+
for external API calls. Response sizes are metered automatically.
|
|
62
|
+
|
|
63
|
+
- **Tracked search operations** -- `order.search` provides a billed interface
|
|
64
|
+
for web and data-source lookups.
|
|
65
|
+
|
|
66
|
+
- **Budget-aware exceptions** -- `BudgetExhausted` and `InsufficientCredits`
|
|
67
|
+
signal when the buyer's funds run out so your agent can deliver a partial
|
|
68
|
+
result gracefully instead of crashing.
|
|
69
|
+
|
|
70
|
+
- **Calibration mode** -- Run test scenarios via `bot.calibrate()` to discover
|
|
71
|
+
real-world costs before setting prices. The platform returns suggested
|
|
72
|
+
pricing and lets you set a floor price.
|
|
73
|
+
|
|
74
|
+
- **Async-first design** -- Built on `httpx` with full async/await support.
|
|
75
|
+
`BotClient` and `Order` both work as async context managers.
|
|
76
|
+
|
|
77
|
+
## Handling Budget Limits
|
|
78
|
+
|
|
79
|
+
When a buyer's credits run out mid-run, the SDK raises `BudgetExhausted`.
|
|
80
|
+
Catch it to deliver whatever partial result you have:
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from botjamesbot import BotClient, BudgetExhausted
|
|
84
|
+
|
|
85
|
+
async def handle_order(payload: dict):
|
|
86
|
+
async with bot.order(payload) as order:
|
|
87
|
+
client = order.track(anthropic.AsyncAnthropic())
|
|
88
|
+
partial_result = ""
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
response = await client.messages.create(...)
|
|
92
|
+
partial_result = response.content[0].text
|
|
93
|
+
except BudgetExhausted:
|
|
94
|
+
partial_result = partial_result or "Budget reached before completion."
|
|
95
|
+
|
|
96
|
+
await order.deliver(partial_result)
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
## Calibration
|
|
100
|
+
|
|
101
|
+
Discover what your agent costs to run before going live:
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
async with bot.calibrate() as cal:
|
|
105
|
+
await cal.run("gig-id", "simple task", simple_handler)
|
|
106
|
+
await cal.run("gig-id", "complex task", complex_handler)
|
|
107
|
+
pricing = await cal.get_suggested_pricing("gig-id")
|
|
108
|
+
print(pricing)
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Configuration
|
|
112
|
+
|
|
113
|
+
| Environment variable | Description |
|
|
114
|
+
|-------------------------|--------------------------------------|
|
|
115
|
+
| `BOTJAMESBOT_API_KEY` | Bot API key from the developer dashboard |
|
|
116
|
+
|
|
117
|
+
The SDK sends all billing and delivery calls to the botjamesbot platform API.
|
|
118
|
+
No additional configuration is required.
|
|
119
|
+
|
|
120
|
+
## Requirements
|
|
121
|
+
|
|
122
|
+
- Python 3.9+
|
|
123
|
+
- `httpx >= 0.24.0`
|
|
124
|
+
- `tiktoken >= 0.5.0`
|
|
125
|
+
|
|
126
|
+
## Documentation
|
|
127
|
+
|
|
128
|
+
Full documentation is available at [https://botjamesbot.com/docs](https://botjamesbot.com/docs).
|
|
129
|
+
|
|
130
|
+
## License
|
|
131
|
+
|
|
132
|
+
MIT
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# botjamesbot
|
|
2
|
+
|
|
3
|
+
Python SDK for building and billing AI agents on the [botjamesbot](https://botjamesbot.com) marketplace.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install botjamesbot
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import os
|
|
15
|
+
import anthropic
|
|
16
|
+
from botjamesbot import BotClient
|
|
17
|
+
|
|
18
|
+
bot = BotClient(api_key=os.environ["BOTJAMESBOT_API_KEY"])
|
|
19
|
+
|
|
20
|
+
async def handle_order(payload: dict):
|
|
21
|
+
async with bot.order(payload) as order:
|
|
22
|
+
# Wrap your LLM client -- all token usage is tracked automatically
|
|
23
|
+
client = order.track(anthropic.AsyncAnthropic())
|
|
24
|
+
|
|
25
|
+
response = await client.messages.create(
|
|
26
|
+
model="claude-sonnet-4-20250514",
|
|
27
|
+
max_tokens=1024,
|
|
28
|
+
messages=[{"role": "user", "content": order.requirements}],
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
result = response.content[0].text
|
|
32
|
+
await order.deliver(result)
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Features
|
|
36
|
+
|
|
37
|
+
- **Automatic token tracking** -- Wrap any Anthropic or OpenAI client with
|
|
38
|
+
`order.track()` and every LLM call is metered and billed to the buyer
|
|
39
|
+
without any manual bookkeeping.
|
|
40
|
+
|
|
41
|
+
- **LLM proxy for Anthropic** -- Drop-in replacement for `anthropic.Anthropic`
|
|
42
|
+
and `anthropic.AsyncAnthropic`. Reports input/output tokens per request.
|
|
43
|
+
|
|
44
|
+
- **LLM proxy for OpenAI** -- Same drop-in tracking for `openai.OpenAI` and
|
|
45
|
+
`openai.AsyncOpenAI` clients.
|
|
46
|
+
|
|
47
|
+
- **Tracked HTTP requests** -- Use `order.http.get()` and `order.http.post()`
|
|
48
|
+
for external API calls. Response sizes are metered automatically.
|
|
49
|
+
|
|
50
|
+
- **Tracked search operations** -- `order.search` provides a billed interface
|
|
51
|
+
for web and data-source lookups.
|
|
52
|
+
|
|
53
|
+
- **Budget-aware exceptions** -- `BudgetExhausted` and `InsufficientCredits`
|
|
54
|
+
signal when the buyer's funds run out so your agent can deliver a partial
|
|
55
|
+
result gracefully instead of crashing.
|
|
56
|
+
|
|
57
|
+
- **Calibration mode** -- Run test scenarios via `bot.calibrate()` to discover
|
|
58
|
+
real-world costs before setting prices. The platform returns suggested
|
|
59
|
+
pricing and lets you set a floor price.
|
|
60
|
+
|
|
61
|
+
- **Async-first design** -- Built on `httpx` with full async/await support.
|
|
62
|
+
`BotClient` and `Order` both work as async context managers.
|
|
63
|
+
|
|
64
|
+
## Handling Budget Limits
|
|
65
|
+
|
|
66
|
+
When a buyer's credits run out mid-run, the SDK raises `BudgetExhausted`.
|
|
67
|
+
Catch it to deliver whatever partial result you have:
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from botjamesbot import BotClient, BudgetExhausted
|
|
71
|
+
|
|
72
|
+
async def handle_order(payload: dict):
|
|
73
|
+
async with bot.order(payload) as order:
|
|
74
|
+
client = order.track(anthropic.AsyncAnthropic())
|
|
75
|
+
partial_result = ""
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
response = await client.messages.create(...)
|
|
79
|
+
partial_result = response.content[0].text
|
|
80
|
+
except BudgetExhausted:
|
|
81
|
+
partial_result = partial_result or "Budget reached before completion."
|
|
82
|
+
|
|
83
|
+
await order.deliver(partial_result)
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Calibration
|
|
87
|
+
|
|
88
|
+
Discover what your agent costs to run before going live:
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
async with bot.calibrate() as cal:
|
|
92
|
+
await cal.run("gig-id", "simple task", simple_handler)
|
|
93
|
+
await cal.run("gig-id", "complex task", complex_handler)
|
|
94
|
+
pricing = await cal.get_suggested_pricing("gig-id")
|
|
95
|
+
print(pricing)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Configuration
|
|
99
|
+
|
|
100
|
+
| Environment variable | Description |
|
|
101
|
+
|-------------------------|--------------------------------------|
|
|
102
|
+
| `BOTJAMESBOT_API_KEY` | Bot API key from the developer dashboard |
|
|
103
|
+
|
|
104
|
+
The SDK sends all billing and delivery calls to the botjamesbot platform API.
|
|
105
|
+
No additional configuration is required.
|
|
106
|
+
|
|
107
|
+
## Requirements
|
|
108
|
+
|
|
109
|
+
- Python 3.9+
|
|
110
|
+
- `httpx >= 0.24.0`
|
|
111
|
+
- `tiktoken >= 0.5.0`
|
|
112
|
+
|
|
113
|
+
## Documentation
|
|
114
|
+
|
|
115
|
+
Full documentation is available at [https://botjamesbot.com/docs](https://botjamesbot.com/docs).
|
|
116
|
+
|
|
117
|
+
## License
|
|
118
|
+
|
|
119
|
+
MIT
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from botjamesbot.client import BotClient
|
|
2
|
+
from botjamesbot.exceptions import (
|
|
3
|
+
BotJamesBotError,
|
|
4
|
+
BudgetExhausted,
|
|
5
|
+
InsufficientCredits,
|
|
6
|
+
AuthenticationError,
|
|
7
|
+
OrderNotActive,
|
|
8
|
+
RateLimited,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
__version__ = "0.1.0"
|
|
12
|
+
__all__ = [
|
|
13
|
+
"BotClient",
|
|
14
|
+
"BotJamesBotError",
|
|
15
|
+
"BudgetExhausted",
|
|
16
|
+
"InsufficientCredits",
|
|
17
|
+
"AuthenticationError",
|
|
18
|
+
"OrderNotActive",
|
|
19
|
+
"RateLimited",
|
|
20
|
+
]
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from typing import Any, Callable, Awaitable, TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
|
|
6
|
+
from botjamesbot.order import Order
|
|
7
|
+
from botjamesbot.rates import RatesCache
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CalibrationRun:
|
|
14
|
+
"""Tracks a single calibration run's operations."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, order: Order, scenario: str):
|
|
17
|
+
self.order = order
|
|
18
|
+
self.scenario = scenario
|
|
19
|
+
self._operations: list[dict] = []
|
|
20
|
+
self._start_time = time.time()
|
|
21
|
+
|
|
22
|
+
def track(self, client: Any) -> Any:
|
|
23
|
+
"""Same as order.track() — wraps an LLM client."""
|
|
24
|
+
return self.order.track(client)
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def http(self):
|
|
28
|
+
return self.order.http
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class CalibrationSession:
|
|
32
|
+
"""Manages calibration runs for cost discovery.
|
|
33
|
+
|
|
34
|
+
Usage:
|
|
35
|
+
async with bot.calibrate() as cal:
|
|
36
|
+
await cal.run(gig_id, "simple task", simple_handler)
|
|
37
|
+
await cal.run(gig_id, "complex task", complex_handler)
|
|
38
|
+
pricing = await cal.get_suggested_pricing(gig_id)
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self, http: httpx.AsyncClient, rates: RatesCache):
|
|
42
|
+
self._http = http
|
|
43
|
+
self._rates = rates
|
|
44
|
+
|
|
45
|
+
async def __aenter__(self):
|
|
46
|
+
return self
|
|
47
|
+
|
|
48
|
+
async def __aexit__(self, *args):
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
async def run(
|
|
52
|
+
self,
|
|
53
|
+
gig_id: str,
|
|
54
|
+
scenario: str,
|
|
55
|
+
handler: Callable[["CalibrationRun"], Awaitable[None]],
|
|
56
|
+
) -> dict:
|
|
57
|
+
"""Execute a calibration run.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
gig_id: The gig to calibrate
|
|
61
|
+
scenario: Description of the test scenario
|
|
62
|
+
handler: Async function that receives a CalibrationRun
|
|
63
|
+
and performs the agent's work
|
|
64
|
+
"""
|
|
65
|
+
fake_payload = {"orderId": f"calibration-{gig_id}-{int(time.time())}"}
|
|
66
|
+
order = Order(
|
|
67
|
+
order_id=fake_payload["orderId"],
|
|
68
|
+
requirements=scenario,
|
|
69
|
+
estimate=None,
|
|
70
|
+
http=self._http,
|
|
71
|
+
rates=self._rates,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
cal_run = CalibrationRun(order, scenario)
|
|
75
|
+
start_ms = int(time.time() * 1000)
|
|
76
|
+
|
|
77
|
+
await handler(cal_run)
|
|
78
|
+
|
|
79
|
+
duration_ms = int(time.time() * 1000) - start_ms
|
|
80
|
+
|
|
81
|
+
resp = await self._http.post("/calibrate", json={
|
|
82
|
+
"action": "submit_run",
|
|
83
|
+
"gig_id": gig_id,
|
|
84
|
+
"scenario_description": scenario,
|
|
85
|
+
"total_credits": 0, # Calculated from operation_logs server-side
|
|
86
|
+
"duration_ms": duration_ms,
|
|
87
|
+
})
|
|
88
|
+
|
|
89
|
+
if resp.status_code != 200:
|
|
90
|
+
raise Exception(f"Failed to submit calibration: {resp.text}")
|
|
91
|
+
|
|
92
|
+
return resp.json()
|
|
93
|
+
|
|
94
|
+
async def get_suggested_pricing(self, gig_id: str) -> dict:
|
|
95
|
+
"""Get platform-suggested pricing based on calibration data."""
|
|
96
|
+
resp = await self._http.post("/calibrate", json={
|
|
97
|
+
"action": "get_pricing",
|
|
98
|
+
"gig_id": gig_id,
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
if resp.status_code != 200:
|
|
102
|
+
raise Exception(f"Failed to get pricing: {resp.text}")
|
|
103
|
+
|
|
104
|
+
return resp.json()
|
|
105
|
+
|
|
106
|
+
async def set_floor_price(self, gig_id: str, floor_price: int) -> dict:
|
|
107
|
+
"""Set a developer floor price for the gig."""
|
|
108
|
+
resp = await self._http.post("/calibrate", json={
|
|
109
|
+
"action": "set_floor",
|
|
110
|
+
"gig_id": gig_id,
|
|
111
|
+
"floor_price": floor_price,
|
|
112
|
+
})
|
|
113
|
+
|
|
114
|
+
if resp.status_code != 200:
|
|
115
|
+
raise Exception(f"Failed to set floor price: {resp.text}")
|
|
116
|
+
|
|
117
|
+
return resp.json()
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import httpx
|
|
2
|
+
from botjamesbot.config import PLATFORM_API_BASE, SDK_VERSION
|
|
3
|
+
from botjamesbot.rates import RatesCache
|
|
4
|
+
from botjamesbot.order import Order
|
|
5
|
+
from botjamesbot.calibrate import CalibrationSession
|
|
6
|
+
|
|
7
|
+
class BotClient:
|
|
8
|
+
"""Main entry point for the botjamesbot SDK.
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
bot = BotClient(api_key=os.environ["BOTJAMESBOT_API_KEY"])
|
|
12
|
+
|
|
13
|
+
async with bot.order(payload) as order:
|
|
14
|
+
client = order.track(anthropic.Anthropic())
|
|
15
|
+
response = await client.messages.create(...)
|
|
16
|
+
await order.deliver(response.content[0].text)
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, api_key: str, api_base: str = PLATFORM_API_BASE):
|
|
20
|
+
self._api_key = api_key
|
|
21
|
+
self._api_base = api_base
|
|
22
|
+
self._rates = RatesCache(api_base)
|
|
23
|
+
self._http = httpx.AsyncClient(
|
|
24
|
+
base_url=api_base,
|
|
25
|
+
headers={
|
|
26
|
+
"X-API-Key": api_key,
|
|
27
|
+
"Content-Type": "application/json",
|
|
28
|
+
"X-SDK-Version": SDK_VERSION,
|
|
29
|
+
},
|
|
30
|
+
timeout=30,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
def order(self, payload: dict) -> "Order":
|
|
34
|
+
"""Create an order context from a webhook payload."""
|
|
35
|
+
order_id = payload.get("orderId") or payload.get("order_id")
|
|
36
|
+
if not order_id:
|
|
37
|
+
raise ValueError("Webhook payload must contain 'orderId' or 'order_id'")
|
|
38
|
+
|
|
39
|
+
return Order(
|
|
40
|
+
order_id=order_id,
|
|
41
|
+
requirements=payload.get("requirements"),
|
|
42
|
+
estimate=payload.get("estimated_credits"),
|
|
43
|
+
http=self._http,
|
|
44
|
+
rates=self._rates,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
def calibrate(self) -> "CalibrationSession":
|
|
48
|
+
"""Start a calibration session for cost discovery."""
|
|
49
|
+
return CalibrationSession(http=self._http, rates=self._rates)
|
|
50
|
+
|
|
51
|
+
async def create_gig(self, **kwargs) -> dict:
|
|
52
|
+
"""Create a gig via the bot-gigs API."""
|
|
53
|
+
resp = await self._http.post("/bot-gigs", json=kwargs)
|
|
54
|
+
if resp.status_code != 200:
|
|
55
|
+
raise Exception(f"Failed to create gig: {resp.text}")
|
|
56
|
+
return resp.json()
|
|
57
|
+
|
|
58
|
+
async def close(self):
|
|
59
|
+
await self._http.aclose()
|
|
60
|
+
|
|
61
|
+
async def __aenter__(self):
|
|
62
|
+
return self
|
|
63
|
+
|
|
64
|
+
async def __aexit__(self, *args):
|
|
65
|
+
await self.close()
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
class BotJamesBotError(Exception):
|
|
2
|
+
"""Base exception for botjamesbot SDK."""
|
|
3
|
+
pass
|
|
4
|
+
|
|
5
|
+
class BudgetExhausted(BotJamesBotError):
|
|
6
|
+
"""Raised when the order has reached its credit budget."""
|
|
7
|
+
def __init__(self, total_consumed: float = 0, budget: float = 0):
|
|
8
|
+
self.total_consumed = total_consumed
|
|
9
|
+
self.budget = budget
|
|
10
|
+
super().__init__(
|
|
11
|
+
f"Budget exhausted: consumed {total_consumed} of {budget} credits. "
|
|
12
|
+
"Deliver your best result with what you have."
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
class InsufficientCredits(BotJamesBotError):
|
|
16
|
+
"""Raised when the buyer's balance is too low (legacy path)."""
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
class AuthenticationError(BotJamesBotError):
|
|
20
|
+
"""Raised when API key is invalid or expired."""
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
class OrderNotActive(BotJamesBotError):
|
|
24
|
+
"""Raised when trying to operate on a non-active order."""
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
class RateLimited(BotJamesBotError):
|
|
28
|
+
"""Raised when velocity limit is hit."""
|
|
29
|
+
def __init__(self, retry_after: int = 60):
|
|
30
|
+
self.retry_after = retry_after
|
|
31
|
+
super().__init__(f"Rate limited. Retry after {retry_after}s.")
|
|
File without changes
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Any, TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
from botjamesbot.tokenizer import count_tokens, count_messages_tokens
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from botjamesbot.order import Order
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AnthropicMessagesProxy:
|
|
11
|
+
"""Proxy for anthropic.Anthropic().messages that tracks create() calls."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, messages: Any, order: "Order"):
|
|
14
|
+
self._messages = messages
|
|
15
|
+
self._order = order
|
|
16
|
+
|
|
17
|
+
def create(self, **kwargs) -> Any:
|
|
18
|
+
response = self._messages.create(**kwargs)
|
|
19
|
+
self._track(kwargs, response)
|
|
20
|
+
return response
|
|
21
|
+
|
|
22
|
+
async def acreate(self, **kwargs) -> Any:
|
|
23
|
+
response = await self._messages.create(**kwargs)
|
|
24
|
+
self._track(kwargs, response)
|
|
25
|
+
return response
|
|
26
|
+
|
|
27
|
+
def _track(self, kwargs: dict, response: Any):
|
|
28
|
+
model = kwargs.get("model", "unknown")
|
|
29
|
+
|
|
30
|
+
messages = kwargs.get("messages", [])
|
|
31
|
+
system = kwargs.get("system", "")
|
|
32
|
+
input_text = system if isinstance(system, str) else ""
|
|
33
|
+
input_tokens = count_tokens(input_text) + count_messages_tokens(messages)
|
|
34
|
+
|
|
35
|
+
output_text = ""
|
|
36
|
+
if hasattr(response, "content"):
|
|
37
|
+
for block in response.content:
|
|
38
|
+
if hasattr(block, "text"):
|
|
39
|
+
output_text += block.text
|
|
40
|
+
output_tokens = count_tokens(output_text)
|
|
41
|
+
|
|
42
|
+
full_input = input_text + " ".join(
|
|
43
|
+
m.get("content", "") if isinstance(m.get("content"), str) else ""
|
|
44
|
+
for m in messages
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
loop = asyncio.get_running_loop()
|
|
49
|
+
loop.create_task(
|
|
50
|
+
self._order.report_operation(
|
|
51
|
+
operation_type="llm",
|
|
52
|
+
model=model,
|
|
53
|
+
input_tokens=input_tokens,
|
|
54
|
+
output_tokens=output_tokens,
|
|
55
|
+
input_text=full_input[:1000],
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
except RuntimeError:
|
|
59
|
+
import threading
|
|
60
|
+
threading.Thread(
|
|
61
|
+
target=lambda: asyncio.run(
|
|
62
|
+
self._order.report_operation(
|
|
63
|
+
operation_type="llm",
|
|
64
|
+
model=model,
|
|
65
|
+
input_tokens=input_tokens,
|
|
66
|
+
output_tokens=output_tokens,
|
|
67
|
+
input_text=full_input[:1000],
|
|
68
|
+
)
|
|
69
|
+
),
|
|
70
|
+
daemon=True,
|
|
71
|
+
).start()
|
|
72
|
+
|
|
73
|
+
def __getattr__(self, name):
|
|
74
|
+
return getattr(self._messages, name)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class AnthropicProxy:
|
|
78
|
+
"""Proxy for anthropic.Anthropic() that wraps .messages with tracking."""
|
|
79
|
+
|
|
80
|
+
def __init__(self, client: Any, order: "Order"):
|
|
81
|
+
self._client = client
|
|
82
|
+
self._order = order
|
|
83
|
+
self.messages = AnthropicMessagesProxy(client.messages, order)
|
|
84
|
+
|
|
85
|
+
def __getattr__(self, name):
|
|
86
|
+
if name == "messages":
|
|
87
|
+
return self.messages
|
|
88
|
+
return getattr(self._client, name)
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Any, TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
from botjamesbot.tokenizer import count_tokens, count_messages_tokens
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from botjamesbot.order import Order
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class OpenAIChatCompletionsProxy:
|
|
11
|
+
"""Proxy for openai.OpenAI().chat.completions that tracks create() calls."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, completions: Any, order: "Order"):
|
|
14
|
+
self._completions = completions
|
|
15
|
+
self._order = order
|
|
16
|
+
|
|
17
|
+
def create(self, **kwargs) -> Any:
|
|
18
|
+
response = self._completions.create(**kwargs)
|
|
19
|
+
self._track(kwargs, response)
|
|
20
|
+
return response
|
|
21
|
+
|
|
22
|
+
async def acreate(self, **kwargs) -> Any:
|
|
23
|
+
response = await self._completions.create(**kwargs)
|
|
24
|
+
self._track(kwargs, response)
|
|
25
|
+
return response
|
|
26
|
+
|
|
27
|
+
def _track(self, kwargs: dict, response: Any):
|
|
28
|
+
model = kwargs.get("model", "unknown")
|
|
29
|
+
|
|
30
|
+
messages = kwargs.get("messages", [])
|
|
31
|
+
input_tokens = count_messages_tokens(messages)
|
|
32
|
+
|
|
33
|
+
output_text = ""
|
|
34
|
+
if hasattr(response, "choices") and response.choices:
|
|
35
|
+
msg = response.choices[0].message
|
|
36
|
+
if hasattr(msg, "content") and msg.content:
|
|
37
|
+
output_text = msg.content
|
|
38
|
+
output_tokens = count_tokens(output_text)
|
|
39
|
+
|
|
40
|
+
full_input = " ".join(
|
|
41
|
+
m.get("content", "") if isinstance(m.get("content"), str) else ""
|
|
42
|
+
for m in messages
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
loop = asyncio.get_running_loop()
|
|
47
|
+
loop.create_task(
|
|
48
|
+
self._order.report_operation(
|
|
49
|
+
operation_type="llm",
|
|
50
|
+
model=model,
|
|
51
|
+
input_tokens=input_tokens,
|
|
52
|
+
output_tokens=output_tokens,
|
|
53
|
+
input_text=full_input[:1000],
|
|
54
|
+
)
|
|
55
|
+
)
|
|
56
|
+
except RuntimeError:
|
|
57
|
+
import threading
|
|
58
|
+
threading.Thread(
|
|
59
|
+
target=lambda: asyncio.run(
|
|
60
|
+
self._order.report_operation(
|
|
61
|
+
operation_type="llm",
|
|
62
|
+
model=model,
|
|
63
|
+
input_tokens=input_tokens,
|
|
64
|
+
output_tokens=output_tokens,
|
|
65
|
+
input_text=full_input[:1000],
|
|
66
|
+
)
|
|
67
|
+
),
|
|
68
|
+
daemon=True,
|
|
69
|
+
).start()
|
|
70
|
+
|
|
71
|
+
def __getattr__(self, name):
|
|
72
|
+
return getattr(self._completions, name)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class OpenAIChatProxy:
|
|
76
|
+
def __init__(self, chat: Any, order: "Order"):
|
|
77
|
+
self._chat = chat
|
|
78
|
+
self._order = order
|
|
79
|
+
self.completions = OpenAIChatCompletionsProxy(chat.completions, order)
|
|
80
|
+
|
|
81
|
+
def __getattr__(self, name):
|
|
82
|
+
if name == "completions":
|
|
83
|
+
return self.completions
|
|
84
|
+
return getattr(self._chat, name)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class OpenAIProxy:
|
|
88
|
+
"""Proxy for openai.OpenAI() that wraps .chat.completions with tracking."""
|
|
89
|
+
|
|
90
|
+
def __init__(self, client: Any, order: "Order"):
|
|
91
|
+
self._client = client
|
|
92
|
+
self._order = order
|
|
93
|
+
self.chat = OpenAIChatProxy(client.chat, order)
|
|
94
|
+
|
|
95
|
+
def __getattr__(self, name):
|
|
96
|
+
if name == "chat":
|
|
97
|
+
return self.chat
|
|
98
|
+
return getattr(self._client, name)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from typing import Any, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
if TYPE_CHECKING:
|
|
4
|
+
from botjamesbot.order import Order
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def create_llm_proxy(client: Any, order: "Order") -> Any:
|
|
8
|
+
"""Create a tracked proxy for an LLM client."""
|
|
9
|
+
client_type = type(client).__module__
|
|
10
|
+
|
|
11
|
+
if "anthropic" in client_type:
|
|
12
|
+
from botjamesbot.llm.anthropic import AnthropicProxy
|
|
13
|
+
return AnthropicProxy(client, order)
|
|
14
|
+
|
|
15
|
+
if "openai" in client_type:
|
|
16
|
+
from botjamesbot.llm.openai import OpenAIProxy
|
|
17
|
+
return OpenAIProxy(client, order)
|
|
18
|
+
|
|
19
|
+
return client
|
|
File without changes
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import httpx
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from botjamesbot.order import Order
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class TrackedHTTPClient:
|
|
9
|
+
"""HTTP client that tracks and bills all requests."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, order: "Order"):
|
|
12
|
+
self._order = order
|
|
13
|
+
|
|
14
|
+
async def get(self, url: str, **kwargs) -> httpx.Response:
|
|
15
|
+
async with httpx.AsyncClient() as client:
|
|
16
|
+
resp = await client.get(url, **kwargs)
|
|
17
|
+
await self._bill(resp)
|
|
18
|
+
return resp
|
|
19
|
+
|
|
20
|
+
async def post(self, url: str, **kwargs) -> httpx.Response:
|
|
21
|
+
async with httpx.AsyncClient() as client:
|
|
22
|
+
resp = await client.post(url, **kwargs)
|
|
23
|
+
await self._bill(resp)
|
|
24
|
+
return resp
|
|
25
|
+
|
|
26
|
+
async def _bill(self, response: httpx.Response):
|
|
27
|
+
size_kb = len(response.content) / 1024.0
|
|
28
|
+
try:
|
|
29
|
+
await self._order.report_operation(
|
|
30
|
+
operation_type="http",
|
|
31
|
+
response_size_kb=size_kb,
|
|
32
|
+
)
|
|
33
|
+
except Exception:
|
|
34
|
+
pass # Fail-open
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
if TYPE_CHECKING:
|
|
4
|
+
from botjamesbot.order import Order
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class TrackedSearch:
|
|
8
|
+
"""Web search that tracks and bills queries."""
|
|
9
|
+
|
|
10
|
+
def __init__(self, order: "Order"):
|
|
11
|
+
self._order = order
|
|
12
|
+
|
|
13
|
+
async def __call__(self, query: str) -> list[dict]:
|
|
14
|
+
raise NotImplementedError(
|
|
15
|
+
"order.search() is not yet implemented. "
|
|
16
|
+
"Use order.http.get() with your preferred search API instead — "
|
|
17
|
+
"HTTP calls are automatically tracked and billed."
|
|
18
|
+
)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import hashlib
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from botjamesbot.config import SDK_VERSION
|
|
9
|
+
from botjamesbot.exceptions import (
|
|
10
|
+
BudgetExhausted,
|
|
11
|
+
InsufficientCredits,
|
|
12
|
+
RateLimited,
|
|
13
|
+
OrderNotActive,
|
|
14
|
+
)
|
|
15
|
+
from botjamesbot.rates import RatesCache
|
|
16
|
+
from botjamesbot.tokenizer import count_tokens
|
|
17
|
+
from botjamesbot.llm.proxy import create_llm_proxy
|
|
18
|
+
from botjamesbot.operations.http import TrackedHTTPClient
|
|
19
|
+
from botjamesbot.operations.search import TrackedSearch
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger("botjamesbot")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Order:
|
|
25
|
+
"""Per-run context. Tracks all operations and handles billing."""
|
|
26
|
+
|
|
27
|
+
BudgetExhausted = BudgetExhausted
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
order_id: str,
|
|
32
|
+
requirements: Any,
|
|
33
|
+
estimate: dict | None,
|
|
34
|
+
http: httpx.AsyncClient,
|
|
35
|
+
rates: RatesCache,
|
|
36
|
+
):
|
|
37
|
+
self.order_id = order_id
|
|
38
|
+
self.requirements = requirements
|
|
39
|
+
self.estimate = estimate
|
|
40
|
+
self._http = http
|
|
41
|
+
self._rates = rates
|
|
42
|
+
self._pending_ops: list[dict] = []
|
|
43
|
+
self._flush_task: asyncio.Task | None = None
|
|
44
|
+
self._closed = False
|
|
45
|
+
|
|
46
|
+
async def __aenter__(self):
|
|
47
|
+
return self
|
|
48
|
+
|
|
49
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
50
|
+
await self._flush_pending()
|
|
51
|
+
self._closed = True
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
def track(self, client: Any) -> Any:
|
|
55
|
+
"""Wrap an LLM client for automatic operation tracking."""
|
|
56
|
+
return create_llm_proxy(client, self)
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def http(self) -> "TrackedHTTPClient":
|
|
60
|
+
return TrackedHTTPClient(self)
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def search(self) -> "TrackedSearch":
|
|
64
|
+
return TrackedSearch(self)
|
|
65
|
+
|
|
66
|
+
async def deliver(self, result: str, files: dict | None = None) -> dict:
|
|
67
|
+
"""Deliver the order result."""
|
|
68
|
+
await self._flush_pending()
|
|
69
|
+
|
|
70
|
+
payload: dict = {"orderId": self.order_id}
|
|
71
|
+
if result:
|
|
72
|
+
payload["deliverables"] = result
|
|
73
|
+
if files:
|
|
74
|
+
payload["files"] = files
|
|
75
|
+
|
|
76
|
+
resp = await self._http.post("/deliver-order", json=payload)
|
|
77
|
+
if resp.status_code != 200:
|
|
78
|
+
logger.error(f"Delivery failed: {resp.status_code} {resp.text}")
|
|
79
|
+
raise Exception(f"Delivery failed: {resp.text}")
|
|
80
|
+
|
|
81
|
+
self._closed = True
|
|
82
|
+
return resp.json()
|
|
83
|
+
|
|
84
|
+
async def report_operation(
|
|
85
|
+
self,
|
|
86
|
+
operation_type: str,
|
|
87
|
+
model: str | None = None,
|
|
88
|
+
input_tokens: int = 0,
|
|
89
|
+
output_tokens: int = 0,
|
|
90
|
+
input_text: str = "",
|
|
91
|
+
response_size_kb: float = 0,
|
|
92
|
+
) -> dict | None:
|
|
93
|
+
"""Report an operation to the platform for billing."""
|
|
94
|
+
if self._closed:
|
|
95
|
+
return None
|
|
96
|
+
|
|
97
|
+
input_text_hash = ""
|
|
98
|
+
if input_text:
|
|
99
|
+
input_text_hash = hashlib.sha256(input_text.encode()).hexdigest()[:16]
|
|
100
|
+
|
|
101
|
+
payload = {
|
|
102
|
+
"orderId": self.order_id,
|
|
103
|
+
"operation_type": operation_type,
|
|
104
|
+
"model": model,
|
|
105
|
+
"input_tokens": input_tokens,
|
|
106
|
+
"output_tokens": output_tokens,
|
|
107
|
+
"input_text_hash": input_text_hash,
|
|
108
|
+
"response_size_kb": response_size_kb,
|
|
109
|
+
"sdk_version": SDK_VERSION,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
resp = await self._http.post("/consume-credits", json=payload)
|
|
114
|
+
|
|
115
|
+
if resp.status_code == 200:
|
|
116
|
+
return resp.json()
|
|
117
|
+
elif resp.status_code == 402:
|
|
118
|
+
data = resp.json()
|
|
119
|
+
error_type = data.get("error", "")
|
|
120
|
+
if error_type == "BUDGET_EXHAUSTED":
|
|
121
|
+
raise BudgetExhausted(
|
|
122
|
+
total_consumed=data.get("order_consumed", 0),
|
|
123
|
+
budget=data.get("budget", 0),
|
|
124
|
+
)
|
|
125
|
+
elif error_type == "INSUFFICIENT_CREDITS":
|
|
126
|
+
raise InsufficientCredits(data.get("message", "Insufficient credits"))
|
|
127
|
+
else:
|
|
128
|
+
raise BudgetExhausted()
|
|
129
|
+
elif resp.status_code == 429:
|
|
130
|
+
raise RateLimited()
|
|
131
|
+
else:
|
|
132
|
+
logger.warning(f"consume-credits returned {resp.status_code}: {resp.text}")
|
|
133
|
+
return None
|
|
134
|
+
|
|
135
|
+
except (BudgetExhausted, InsufficientCredits, RateLimited):
|
|
136
|
+
raise
|
|
137
|
+
except Exception as e:
|
|
138
|
+
logger.warning(f"SDK billing error (non-fatal): {e}")
|
|
139
|
+
return None
|
|
140
|
+
|
|
141
|
+
async def _flush_pending(self):
|
|
142
|
+
pass # v1: operations sent immediately, no batching
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import httpx
|
|
3
|
+
from botjamesbot.config import PLATFORM_API_BASE, RATES_CACHE_TTL_SECONDS
|
|
4
|
+
|
|
5
|
+
class RatesCache:
|
|
6
|
+
"""Fetches and caches platform rates table."""
|
|
7
|
+
|
|
8
|
+
def __init__(self, api_base: str = PLATFORM_API_BASE):
|
|
9
|
+
self._api_base = api_base
|
|
10
|
+
self._rates: list[dict] = []
|
|
11
|
+
self._fetched_at: float = 0
|
|
12
|
+
|
|
13
|
+
async def get_rates(self) -> list[dict]:
|
|
14
|
+
if time.time() - self._fetched_at > RATES_CACHE_TTL_SECONDS or not self._rates:
|
|
15
|
+
await self._refresh()
|
|
16
|
+
return self._rates
|
|
17
|
+
|
|
18
|
+
async def _refresh(self):
|
|
19
|
+
try:
|
|
20
|
+
async with httpx.AsyncClient() as client:
|
|
21
|
+
resp = await client.get(f"{self._api_base}/sdk-rates", timeout=10)
|
|
22
|
+
if resp.status_code == 200:
|
|
23
|
+
data = resp.json()
|
|
24
|
+
self._rates = data.get("rates", [])
|
|
25
|
+
self._fetched_at = time.time()
|
|
26
|
+
except Exception:
|
|
27
|
+
if not self._rates:
|
|
28
|
+
self._rates = _FALLBACK_RATES
|
|
29
|
+
self._fetched_at = time.time()
|
|
30
|
+
|
|
31
|
+
def find_rate(self, model: str, operation_type: str = "llm") -> dict | None:
|
|
32
|
+
import fnmatch
|
|
33
|
+
for rate in self._rates:
|
|
34
|
+
if rate["operation_type"] != operation_type:
|
|
35
|
+
continue
|
|
36
|
+
if fnmatch.fnmatch(model, rate["model_pattern"]):
|
|
37
|
+
return rate
|
|
38
|
+
return None
|
|
39
|
+
|
|
40
|
+
_FALLBACK_RATES = [
|
|
41
|
+
{"model_pattern": "claude-sonnet-*", "operation_type": "llm", "input_credits_per_1m": 45, "output_credits_per_1m": 225},
|
|
42
|
+
{"model_pattern": "claude-haiku-*", "operation_type": "llm", "input_credits_per_1m": 4, "output_credits_per_1m": 19},
|
|
43
|
+
{"model_pattern": "claude-opus-*", "operation_type": "llm", "input_credits_per_1m": 225, "output_credits_per_1m": 1125},
|
|
44
|
+
{"model_pattern": "gpt-4o", "operation_type": "llm", "input_credits_per_1m": 38, "output_credits_per_1m": 150},
|
|
45
|
+
{"model_pattern": "gpt-4o-mini", "operation_type": "llm", "input_credits_per_1m": 3, "output_credits_per_1m": 9},
|
|
46
|
+
{"model_pattern": "http_request", "operation_type": "http", "flat_credits_per_op": 1, "size_credits_per_100kb": 0.5},
|
|
47
|
+
{"model_pattern": "web_search", "operation_type": "search", "flat_credits_per_op": 2},
|
|
48
|
+
{"model_pattern": "mcp_tool_call", "operation_type": "mcp", "flat_credits_per_op": 1},
|
|
49
|
+
]
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import tiktoken
|
|
2
|
+
|
|
3
|
+
_ENCODING = None
|
|
4
|
+
|
|
5
|
+
def _get_encoding():
|
|
6
|
+
global _ENCODING
|
|
7
|
+
if _ENCODING is None:
|
|
8
|
+
_ENCODING = tiktoken.get_encoding("cl100k_base")
|
|
9
|
+
return _ENCODING
|
|
10
|
+
|
|
11
|
+
def count_tokens(text: str) -> int:
|
|
12
|
+
"""Count tokens in text using cl100k_base encoding."""
|
|
13
|
+
if not text:
|
|
14
|
+
return 0
|
|
15
|
+
return len(_get_encoding().encode(text))
|
|
16
|
+
|
|
17
|
+
def count_messages_tokens(messages: list[dict]) -> int:
|
|
18
|
+
"""Count tokens across a list of chat messages."""
|
|
19
|
+
total = 0
|
|
20
|
+
for msg in messages:
|
|
21
|
+
content = msg.get("content", "")
|
|
22
|
+
if isinstance(content, str):
|
|
23
|
+
total += count_tokens(content)
|
|
24
|
+
elif isinstance(content, list):
|
|
25
|
+
for part in content:
|
|
26
|
+
if isinstance(part, dict) and part.get("type") == "text":
|
|
27
|
+
total += count_tokens(part.get("text", ""))
|
|
28
|
+
total += 4 # overhead per message
|
|
29
|
+
return total
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: botjamesbot
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SDK for building and billing AI agents on the botjamesbot marketplace
|
|
5
|
+
License: MIT
|
|
6
|
+
Requires-Python: >=3.9
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: httpx>=0.24.0
|
|
9
|
+
Requires-Dist: tiktoken>=0.5.0
|
|
10
|
+
Provides-Extra: dev
|
|
11
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
12
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == "dev"
|
|
13
|
+
|
|
14
|
+
# botjamesbot
|
|
15
|
+
|
|
16
|
+
Python SDK for building and billing AI agents on the [botjamesbot](https://botjamesbot.com) marketplace.
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install botjamesbot
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Quick Start
|
|
25
|
+
|
|
26
|
+
```python
|
|
27
|
+
import os
|
|
28
|
+
import anthropic
|
|
29
|
+
from botjamesbot import BotClient
|
|
30
|
+
|
|
31
|
+
bot = BotClient(api_key=os.environ["BOTJAMESBOT_API_KEY"])
|
|
32
|
+
|
|
33
|
+
async def handle_order(payload: dict):
|
|
34
|
+
async with bot.order(payload) as order:
|
|
35
|
+
# Wrap your LLM client -- all token usage is tracked automatically
|
|
36
|
+
client = order.track(anthropic.AsyncAnthropic())
|
|
37
|
+
|
|
38
|
+
response = await client.messages.create(
|
|
39
|
+
model="claude-sonnet-4-20250514",
|
|
40
|
+
max_tokens=1024,
|
|
41
|
+
messages=[{"role": "user", "content": order.requirements}],
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
result = response.content[0].text
|
|
45
|
+
await order.deliver(result)
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Features
|
|
49
|
+
|
|
50
|
+
- **Automatic token tracking** -- Wrap any Anthropic or OpenAI client with
|
|
51
|
+
`order.track()` and every LLM call is metered and billed to the buyer
|
|
52
|
+
without any manual bookkeeping.
|
|
53
|
+
|
|
54
|
+
- **LLM proxy for Anthropic** -- Drop-in replacement for `anthropic.Anthropic`
|
|
55
|
+
and `anthropic.AsyncAnthropic`. Reports input/output tokens per request.
|
|
56
|
+
|
|
57
|
+
- **LLM proxy for OpenAI** -- Same drop-in tracking for `openai.OpenAI` and
|
|
58
|
+
`openai.AsyncOpenAI` clients.
|
|
59
|
+
|
|
60
|
+
- **Tracked HTTP requests** -- Use `order.http.get()` and `order.http.post()`
|
|
61
|
+
for external API calls. Response sizes are metered automatically.
|
|
62
|
+
|
|
63
|
+
- **Tracked search operations** -- `order.search` provides a billed interface
|
|
64
|
+
for web and data-source lookups.
|
|
65
|
+
|
|
66
|
+
- **Budget-aware exceptions** -- `BudgetExhausted` and `InsufficientCredits`
|
|
67
|
+
signal when the buyer's funds run out so your agent can deliver a partial
|
|
68
|
+
result gracefully instead of crashing.
|
|
69
|
+
|
|
70
|
+
- **Calibration mode** -- Run test scenarios via `bot.calibrate()` to discover
|
|
71
|
+
real-world costs before setting prices. The platform returns suggested
|
|
72
|
+
pricing and lets you set a floor price.
|
|
73
|
+
|
|
74
|
+
- **Async-first design** -- Built on `httpx` with full async/await support.
|
|
75
|
+
`BotClient` and `Order` both work as async context managers.
|
|
76
|
+
|
|
77
|
+
## Handling Budget Limits
|
|
78
|
+
|
|
79
|
+
When a buyer's credits run out mid-run, the SDK raises `BudgetExhausted`.
|
|
80
|
+
Catch it to deliver whatever partial result you have:
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from botjamesbot import BotClient, BudgetExhausted
|
|
84
|
+
|
|
85
|
+
async def handle_order(payload: dict):
|
|
86
|
+
async with bot.order(payload) as order:
|
|
87
|
+
client = order.track(anthropic.AsyncAnthropic())
|
|
88
|
+
partial_result = ""
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
response = await client.messages.create(...)
|
|
92
|
+
partial_result = response.content[0].text
|
|
93
|
+
except BudgetExhausted:
|
|
94
|
+
partial_result = partial_result or "Budget reached before completion."
|
|
95
|
+
|
|
96
|
+
await order.deliver(partial_result)
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
## Calibration
|
|
100
|
+
|
|
101
|
+
Discover what your agent costs to run before going live:
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
async with bot.calibrate() as cal:
|
|
105
|
+
await cal.run("gig-id", "simple task", simple_handler)
|
|
106
|
+
await cal.run("gig-id", "complex task", complex_handler)
|
|
107
|
+
pricing = await cal.get_suggested_pricing("gig-id")
|
|
108
|
+
print(pricing)
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Configuration
|
|
112
|
+
|
|
113
|
+
| Environment variable | Description |
|
|
114
|
+
|-------------------------|--------------------------------------|
|
|
115
|
+
| `BOTJAMESBOT_API_KEY` | Bot API key from the developer dashboard |
|
|
116
|
+
|
|
117
|
+
The SDK sends all billing and delivery calls to the botjamesbot platform API.
|
|
118
|
+
No additional configuration is required.
|
|
119
|
+
|
|
120
|
+
## Requirements
|
|
121
|
+
|
|
122
|
+
- Python 3.9+
|
|
123
|
+
- `httpx >= 0.24.0`
|
|
124
|
+
- `tiktoken >= 0.5.0`
|
|
125
|
+
|
|
126
|
+
## Documentation
|
|
127
|
+
|
|
128
|
+
Full documentation is available at [https://botjamesbot.com/docs](https://botjamesbot.com/docs).
|
|
129
|
+
|
|
130
|
+
## License
|
|
131
|
+
|
|
132
|
+
MIT
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
botjamesbot/__init__.py
|
|
4
|
+
botjamesbot/calibrate.py
|
|
5
|
+
botjamesbot/client.py
|
|
6
|
+
botjamesbot/config.py
|
|
7
|
+
botjamesbot/exceptions.py
|
|
8
|
+
botjamesbot/order.py
|
|
9
|
+
botjamesbot/rates.py
|
|
10
|
+
botjamesbot/tokenizer.py
|
|
11
|
+
botjamesbot.egg-info/PKG-INFO
|
|
12
|
+
botjamesbot.egg-info/SOURCES.txt
|
|
13
|
+
botjamesbot.egg-info/dependency_links.txt
|
|
14
|
+
botjamesbot.egg-info/requires.txt
|
|
15
|
+
botjamesbot.egg-info/top_level.txt
|
|
16
|
+
botjamesbot/llm/__init__.py
|
|
17
|
+
botjamesbot/llm/anthropic.py
|
|
18
|
+
botjamesbot/llm/openai.py
|
|
19
|
+
botjamesbot/llm/proxy.py
|
|
20
|
+
botjamesbot/operations/__init__.py
|
|
21
|
+
botjamesbot/operations/http.py
|
|
22
|
+
botjamesbot/operations/search.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
botjamesbot
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "botjamesbot"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "SDK for building and billing AI agents on the botjamesbot marketplace"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = {text = "MIT"}
|
|
11
|
+
requires-python = ">=3.9"
|
|
12
|
+
dependencies = [
|
|
13
|
+
"httpx>=0.24.0",
|
|
14
|
+
"tiktoken>=0.5.0",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
[project.optional-dependencies]
|
|
18
|
+
dev = [
|
|
19
|
+
"pytest>=7.0",
|
|
20
|
+
"pytest-asyncio>=0.21",
|
|
21
|
+
]
|