devrel-origin 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devrel_origin/__init__.py +15 -0
- devrel_origin/cli/__init__.py +92 -0
- devrel_origin/cli/_common.py +243 -0
- devrel_origin/cli/analytics.py +28 -0
- devrel_origin/cli/argus.py +497 -0
- devrel_origin/cli/auth.py +227 -0
- devrel_origin/cli/config.py +108 -0
- devrel_origin/cli/content.py +259 -0
- devrel_origin/cli/cost.py +108 -0
- devrel_origin/cli/cro.py +298 -0
- devrel_origin/cli/deliverables.py +65 -0
- devrel_origin/cli/docs.py +91 -0
- devrel_origin/cli/doctor.py +178 -0
- devrel_origin/cli/experiment.py +29 -0
- devrel_origin/cli/growth.py +97 -0
- devrel_origin/cli/init.py +472 -0
- devrel_origin/cli/intel.py +27 -0
- devrel_origin/cli/kb.py +96 -0
- devrel_origin/cli/listen.py +31 -0
- devrel_origin/cli/marketing.py +66 -0
- devrel_origin/cli/migrate.py +45 -0
- devrel_origin/cli/run.py +46 -0
- devrel_origin/cli/sales.py +57 -0
- devrel_origin/cli/schedule.py +62 -0
- devrel_origin/cli/synthesize.py +28 -0
- devrel_origin/cli/triage.py +29 -0
- devrel_origin/cli/video.py +35 -0
- devrel_origin/core/__init__.py +58 -0
- devrel_origin/core/agent_config.py +75 -0
- devrel_origin/core/argus.py +964 -0
- devrel_origin/core/atlas.py +1450 -0
- devrel_origin/core/base.py +372 -0
- devrel_origin/core/cyra.py +563 -0
- devrel_origin/core/dex.py +708 -0
- devrel_origin/core/echo.py +614 -0
- devrel_origin/core/growth/__init__.py +27 -0
- devrel_origin/core/growth/recommendations.py +219 -0
- devrel_origin/core/growth/target_kinds.py +51 -0
- devrel_origin/core/iris.py +513 -0
- devrel_origin/core/kai.py +1367 -0
- devrel_origin/core/llm.py +542 -0
- devrel_origin/core/llm_backends.py +274 -0
- devrel_origin/core/mox.py +514 -0
- devrel_origin/core/nova.py +349 -0
- devrel_origin/core/pax.py +1205 -0
- devrel_origin/core/rex.py +532 -0
- devrel_origin/core/sage.py +486 -0
- devrel_origin/core/sentinel.py +385 -0
- devrel_origin/core/types.py +98 -0
- devrel_origin/core/video/__init__.py +22 -0
- devrel_origin/core/video/assembler.py +131 -0
- devrel_origin/core/video/browser_recorder.py +118 -0
- devrel_origin/core/video/desktop_recorder.py +254 -0
- devrel_origin/core/video/overlay_renderer.py +143 -0
- devrel_origin/core/video/script_parser.py +147 -0
- devrel_origin/core/video/tts_engine.py +82 -0
- devrel_origin/core/vox.py +268 -0
- devrel_origin/core/watchdog.py +321 -0
- devrel_origin/project/__init__.py +1 -0
- devrel_origin/project/config.py +75 -0
- devrel_origin/project/cost_sink.py +61 -0
- devrel_origin/project/init.py +104 -0
- devrel_origin/project/paths.py +75 -0
- devrel_origin/project/state.py +241 -0
- devrel_origin/project/templates/__init__.py +4 -0
- devrel_origin/project/templates/config.toml +24 -0
- devrel_origin/project/templates/devrel.gitignore +10 -0
- devrel_origin/project/templates/slop-blocklist.md +45 -0
- devrel_origin/project/templates/style.md +24 -0
- devrel_origin/project/templates/voice.md +29 -0
- devrel_origin/quality/__init__.py +66 -0
- devrel_origin/quality/editorial.py +357 -0
- devrel_origin/quality/persona.py +84 -0
- devrel_origin/quality/readability.py +148 -0
- devrel_origin/quality/slop.py +167 -0
- devrel_origin/quality/style.py +110 -0
- devrel_origin/quality/voice.py +15 -0
- devrel_origin/tools/__init__.py +9 -0
- devrel_origin/tools/analytics.py +304 -0
- devrel_origin/tools/api_client.py +393 -0
- devrel_origin/tools/apollo_client.py +305 -0
- devrel_origin/tools/code_validator.py +428 -0
- devrel_origin/tools/github_tools.py +297 -0
- devrel_origin/tools/instantly_client.py +412 -0
- devrel_origin/tools/kb_harvester.py +340 -0
- devrel_origin/tools/mcp_server.py +578 -0
- devrel_origin/tools/notifications.py +245 -0
- devrel_origin/tools/run_report.py +193 -0
- devrel_origin/tools/scheduler.py +231 -0
- devrel_origin/tools/search_tools.py +321 -0
- devrel_origin/tools/self_improve.py +168 -0
- devrel_origin/tools/sheets.py +236 -0
- devrel_origin-0.2.14.dist-info/METADATA +354 -0
- devrel_origin-0.2.14.dist-info/RECORD +98 -0
- devrel_origin-0.2.14.dist-info/WHEEL +5 -0
- devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
- devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
- devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
"""LLM provider backends for multi-provider support.
|
|
2
|
+
|
|
3
|
+
LLMClient delegates the actual chat call to a backend so different providers
|
|
4
|
+
(Anthropic direct, OpenRouter, future ones) can be swapped without rewriting
|
|
5
|
+
the cost-tracking, budget-gating, agent-attribution layers in core/llm.py.
|
|
6
|
+
|
|
7
|
+
A backend's responsibility is narrow: take a system prompt + user prompt +
|
|
8
|
+
generation params + a model id, return the response text plus token usage in
|
|
9
|
+
a normalized shape. Caching, retry, and rate limiting can be done inside the
|
|
10
|
+
backend if the provider supports it; the client layer doesn't care.
|
|
11
|
+
|
|
12
|
+
Each backend exposes:
|
|
13
|
+
- `name`: short id used in logs and config (`"anthropic"`, `"openrouter"`)
|
|
14
|
+
- `default_model`: backend-default when the caller doesn't override
|
|
15
|
+
- `cheap_model`: budget-downgrade target (used by BudgetGate)
|
|
16
|
+
- `resolve_alias(alias)`: translates `"haiku"`/`"sonnet"`/`"opus"` shorthand
|
|
17
|
+
to a real model id (backend-specific; OpenRouter uses dot notation without
|
|
18
|
+
date suffix like `anthropic/claude-haiku-4.5`, native Anthropic wants the
|
|
19
|
+
bare dated id like `claude-haiku-4-5-20251001`)
|
|
20
|
+
- async `chat(...)`: the actual call, returns a `BackendResponse`
|
|
21
|
+
- async `aclose()`: release any underlying clients (httpx pools etc.)
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
import logging
|
|
27
|
+
import os
|
|
28
|
+
from abc import ABC, abstractmethod
|
|
29
|
+
from dataclasses import dataclass, field
|
|
30
|
+
from typing import Any
|
|
31
|
+
|
|
32
|
+
import httpx
|
|
33
|
+
from anthropic import AsyncAnthropic
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass(frozen=True)
|
|
39
|
+
class BackendResponse:
|
|
40
|
+
"""Normalized response shape from any LLM backend."""
|
|
41
|
+
|
|
42
|
+
text: str
|
|
43
|
+
model: str # the model id that actually responded (provider may downgrade)
|
|
44
|
+
input_tokens: int
|
|
45
|
+
output_tokens: int
|
|
46
|
+
cache_creation_input_tokens: int = 0
|
|
47
|
+
cache_read_input_tokens: int = 0
|
|
48
|
+
raw_meta: dict[str, Any] = field(default_factory=dict)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class LLMBackend(ABC):
|
|
52
|
+
"""Abstract LLM backend. Every concrete impl handles one provider."""
|
|
53
|
+
|
|
54
|
+
name: str = "abstract"
|
|
55
|
+
default_model: str = ""
|
|
56
|
+
cheap_model: str = ""
|
|
57
|
+
|
|
58
|
+
@abstractmethod
|
|
59
|
+
def resolve_alias(self, alias: str) -> str:
|
|
60
|
+
"""Map shorthand ('haiku' / 'sonnet' / 'opus' / explicit id) to the
|
|
61
|
+
backend's model identifier. Pass-through for ids the backend already
|
|
62
|
+
recognizes; the client layer hands them to the backend as-is."""
|
|
63
|
+
|
|
64
|
+
@abstractmethod
|
|
65
|
+
async def chat(
|
|
66
|
+
self,
|
|
67
|
+
*,
|
|
68
|
+
model: str,
|
|
69
|
+
system_prompt: str,
|
|
70
|
+
user_prompt: str,
|
|
71
|
+
temperature: float,
|
|
72
|
+
max_tokens: int,
|
|
73
|
+
) -> BackendResponse:
|
|
74
|
+
"""Send a chat completion and return the normalized response."""
|
|
75
|
+
|
|
76
|
+
async def aclose(self) -> None: # noqa: B027 - intentional opt-in hook
|
|
77
|
+
"""Optional teardown for backends with persistent clients."""
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# --- Anthropic --------------------------------------------------------------
|
|
81
|
+
|
|
82
|
+
# Native Anthropic model ids. OpenRouter exposes the same Claude models under
|
|
83
|
+
# `anthropic/<id>` paths.
|
|
84
|
+
ANTHROPIC_DEFAULT_MODEL = "claude-sonnet-4-5-20250929"
|
|
85
|
+
ANTHROPIC_MODELS: dict[str, str] = {
|
|
86
|
+
"opus": "claude-opus-4-0-20250514",
|
|
87
|
+
"sonnet": ANTHROPIC_DEFAULT_MODEL,
|
|
88
|
+
"haiku": "claude-haiku-4-5-20251001",
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class AnthropicBackend(LLMBackend):
|
|
93
|
+
"""Direct Anthropic API via the official SDK. Default backend."""
|
|
94
|
+
|
|
95
|
+
name = "anthropic"
|
|
96
|
+
default_model = ANTHROPIC_DEFAULT_MODEL
|
|
97
|
+
cheap_model = ANTHROPIC_MODELS["haiku"]
|
|
98
|
+
|
|
99
|
+
def __init__(self, api_key: str = ""):
|
|
100
|
+
# Empty key: pass through 'dummy' so the SDK constructs (used by tests
|
|
101
|
+
# that mock out messages.create); a real call would still 401.
|
|
102
|
+
self._client = AsyncAnthropic(api_key=api_key or "dummy")
|
|
103
|
+
|
|
104
|
+
def resolve_alias(self, alias: str) -> str:
|
|
105
|
+
return ANTHROPIC_MODELS.get(alias, alias)
|
|
106
|
+
|
|
107
|
+
async def chat(
|
|
108
|
+
self,
|
|
109
|
+
*,
|
|
110
|
+
model: str,
|
|
111
|
+
system_prompt: str,
|
|
112
|
+
user_prompt: str,
|
|
113
|
+
temperature: float,
|
|
114
|
+
max_tokens: int,
|
|
115
|
+
) -> BackendResponse:
|
|
116
|
+
response = await self._client.messages.create(
|
|
117
|
+
model=model,
|
|
118
|
+
max_tokens=max_tokens,
|
|
119
|
+
temperature=temperature,
|
|
120
|
+
system=system_prompt,
|
|
121
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
122
|
+
)
|
|
123
|
+
return BackendResponse(
|
|
124
|
+
text=response.content[0].text,
|
|
125
|
+
model=response.model if hasattr(response, "model") else model,
|
|
126
|
+
input_tokens=response.usage.input_tokens,
|
|
127
|
+
output_tokens=response.usage.output_tokens,
|
|
128
|
+
cache_creation_input_tokens=getattr(response.usage, "cache_creation_input_tokens", 0)
|
|
129
|
+
or 0,
|
|
130
|
+
cache_read_input_tokens=getattr(response.usage, "cache_read_input_tokens", 0) or 0,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
async def aclose(self) -> None:
|
|
134
|
+
# AsyncAnthropic owns an internal httpx client; close it via SDK.
|
|
135
|
+
try:
|
|
136
|
+
await self._client.close()
|
|
137
|
+
except Exception:
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
# --- OpenRouter -------------------------------------------------------------
|
|
142
|
+
|
|
143
|
+
# OpenRouter is OpenAI-compatible. We POST to /chat/completions with model ids
|
|
144
|
+
# in the form `<provider>/<model>` (e.g. `anthropic/claude-sonnet-4.5`,
|
|
145
|
+
# `openai/gpt-4o-mini`). OpenRouter uses dot notation for Anthropic versions
|
|
146
|
+
# and does NOT accept Anthropic's dated suffixes (`-20250929`); a 400 Bad
|
|
147
|
+
# Request is the symptom of using the dated id here. Pricing is per-model;
|
|
148
|
+
# response usage is OpenAI-shape (prompt_tokens / completion_tokens).
|
|
149
|
+
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
|
|
150
|
+
OPENROUTER_DEFAULT_MODEL = "anthropic/claude-sonnet-4.5"
|
|
151
|
+
OPENROUTER_CHEAP_MODEL = "anthropic/claude-haiku-4.5"
|
|
152
|
+
OPENROUTER_ALIASES: dict[str, str] = {
|
|
153
|
+
"opus": "anthropic/claude-opus-4",
|
|
154
|
+
"sonnet": OPENROUTER_DEFAULT_MODEL,
|
|
155
|
+
"haiku": OPENROUTER_CHEAP_MODEL,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class OpenRouterBackend(LLMBackend):
|
|
160
|
+
"""OpenRouter via OpenAI-compatible HTTP endpoint.
|
|
161
|
+
|
|
162
|
+
No additional SDK dependency; we use the existing httpx core dep. Set
|
|
163
|
+
OPENROUTER_API_KEY in the environment, or pass api_key explicitly.
|
|
164
|
+
Optionally set OPENROUTER_REFERER + OPENROUTER_TITLE for OpenRouter's
|
|
165
|
+
leaderboard attribution.
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
name = "openrouter"
|
|
169
|
+
default_model = OPENROUTER_DEFAULT_MODEL
|
|
170
|
+
cheap_model = OPENROUTER_CHEAP_MODEL
|
|
171
|
+
|
|
172
|
+
def __init__(
|
|
173
|
+
self,
|
|
174
|
+
api_key: str = "",
|
|
175
|
+
*,
|
|
176
|
+
referer: str | None = None,
|
|
177
|
+
title: str | None = None,
|
|
178
|
+
timeout: float = 120.0,
|
|
179
|
+
):
|
|
180
|
+
self._api_key = api_key or os.environ.get("OPENROUTER_API_KEY", "")
|
|
181
|
+
self._referer = referer or os.environ.get(
|
|
182
|
+
"OPENROUTER_REFERER", "https://github.com/dovzhikova/devrel-origin"
|
|
183
|
+
)
|
|
184
|
+
self._title = title or os.environ.get("OPENROUTER_TITLE", "devrel-origin")
|
|
185
|
+
self._client = httpx.AsyncClient(
|
|
186
|
+
base_url=OPENROUTER_BASE_URL,
|
|
187
|
+
timeout=timeout,
|
|
188
|
+
headers={
|
|
189
|
+
"Authorization": f"Bearer {self._api_key}",
|
|
190
|
+
"HTTP-Referer": self._referer,
|
|
191
|
+
"X-Title": self._title,
|
|
192
|
+
"Content-Type": "application/json",
|
|
193
|
+
},
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
def resolve_alias(self, alias: str) -> str:
|
|
197
|
+
# Allow the caller to pass either a shorthand (haiku/sonnet/opus), a
|
|
198
|
+
# plain Anthropic id (claude-sonnet-4-5-...), or an already-prefixed
|
|
199
|
+
# OpenRouter path (anthropic/claude-sonnet-4-5-..., openai/gpt-4o).
|
|
200
|
+
if alias in OPENROUTER_ALIASES:
|
|
201
|
+
return OPENROUTER_ALIASES[alias]
|
|
202
|
+
if alias in ANTHROPIC_MODELS.values() or alias.startswith("claude-"):
|
|
203
|
+
return f"anthropic/{alias}"
|
|
204
|
+
return alias # already provider-qualified or unknown
|
|
205
|
+
|
|
206
|
+
async def chat(
|
|
207
|
+
self,
|
|
208
|
+
*,
|
|
209
|
+
model: str,
|
|
210
|
+
system_prompt: str,
|
|
211
|
+
user_prompt: str,
|
|
212
|
+
temperature: float,
|
|
213
|
+
max_tokens: int,
|
|
214
|
+
) -> BackendResponse:
|
|
215
|
+
payload = {
|
|
216
|
+
"model": model,
|
|
217
|
+
"messages": [
|
|
218
|
+
{"role": "system", "content": system_prompt},
|
|
219
|
+
{"role": "user", "content": user_prompt},
|
|
220
|
+
],
|
|
221
|
+
"temperature": temperature,
|
|
222
|
+
"max_tokens": max_tokens,
|
|
223
|
+
}
|
|
224
|
+
resp = await self._client.post("/chat/completions", json=payload)
|
|
225
|
+
resp.raise_for_status()
|
|
226
|
+
data = resp.json()
|
|
227
|
+
usage = data.get("usage") or {}
|
|
228
|
+
choice = (data.get("choices") or [{}])[0]
|
|
229
|
+
message = choice.get("message") or {}
|
|
230
|
+
text = message.get("content") or ""
|
|
231
|
+
return BackendResponse(
|
|
232
|
+
text=text,
|
|
233
|
+
model=data.get("model") or model,
|
|
234
|
+
input_tokens=int(usage.get("prompt_tokens") or 0),
|
|
235
|
+
output_tokens=int(usage.get("completion_tokens") or 0),
|
|
236
|
+
# OpenAI-compat usage doesn't carry Anthropic-style cache creation /
|
|
237
|
+
# read tokens. Some upstream providers route them via
|
|
238
|
+
# `prompt_tokens_details.cached_tokens`; surface them when present.
|
|
239
|
+
cache_read_input_tokens=int(
|
|
240
|
+
((usage.get("prompt_tokens_details") or {}).get("cached_tokens")) or 0
|
|
241
|
+
),
|
|
242
|
+
raw_meta={"id": data.get("id"), "provider": data.get("provider")},
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
async def aclose(self) -> None:
|
|
246
|
+
await self._client.aclose()
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# --- Factory ----------------------------------------------------------------
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def make_backend(
|
|
253
|
+
provider: str | None = None,
|
|
254
|
+
*,
|
|
255
|
+
anthropic_api_key: str = "",
|
|
256
|
+
openrouter_api_key: str = "",
|
|
257
|
+
) -> LLMBackend:
|
|
258
|
+
"""Construct a backend by name, falling back to env-var auto-detect.
|
|
259
|
+
|
|
260
|
+
Resolution order:
|
|
261
|
+
1. Explicit `provider` arg ('anthropic' | 'openrouter')
|
|
262
|
+
2. OPENROUTER_API_KEY set and ANTHROPIC_API_KEY unset -> openrouter
|
|
263
|
+
3. Default -> anthropic (preserves pre-multi-provider behavior)
|
|
264
|
+
"""
|
|
265
|
+
if provider == "openrouter":
|
|
266
|
+
return OpenRouterBackend(api_key=openrouter_api_key)
|
|
267
|
+
if provider == "anthropic":
|
|
268
|
+
return AnthropicBackend(api_key=anthropic_api_key)
|
|
269
|
+
|
|
270
|
+
has_or = bool(openrouter_api_key or os.environ.get("OPENROUTER_API_KEY"))
|
|
271
|
+
has_ant = bool(anthropic_api_key or os.environ.get("ANTHROPIC_API_KEY"))
|
|
272
|
+
if has_or and not has_ant:
|
|
273
|
+
return OpenRouterBackend(api_key=openrouter_api_key)
|
|
274
|
+
return AnthropicBackend(api_key=anthropic_api_key)
|