freeride-gateway 0.3.0a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. freeride_gateway-0.3.0a1/.gitignore +31 -0
  2. freeride_gateway-0.3.0a1/PKG-INFO +203 -0
  3. freeride_gateway-0.3.0a1/README.md +172 -0
  4. freeride_gateway-0.3.0a1/freeride/__init__.py +1 -0
  5. freeride_gateway-0.3.0a1/freeride/binders/__init__.py +18 -0
  6. freeride_gateway-0.3.0a1/freeride/binders/aider.py +84 -0
  7. freeride_gateway-0.3.0a1/freeride/binders/continue_.py +120 -0
  8. freeride_gateway-0.3.0a1/freeride/binders/hermes.py +137 -0
  9. freeride_gateway-0.3.0a1/freeride/binders/openclaw.py +109 -0
  10. freeride_gateway-0.3.0a1/freeride/cli/__init__.py +0 -0
  11. freeride_gateway-0.3.0a1/freeride/cli/cmd_bind.py +40 -0
  12. freeride_gateway-0.3.0a1/freeride/cli/cmd_serve.py +79 -0
  13. freeride_gateway-0.3.0a1/freeride/cli/cmd_telemetry.py +41 -0
  14. freeride_gateway-0.3.0a1/freeride/cli/main.py +115 -0
  15. freeride_gateway-0.3.0a1/freeride/cli/watcher.py +177 -0
  16. freeride_gateway-0.3.0a1/freeride/core/__init__.py +0 -0
  17. freeride_gateway-0.3.0a1/freeride/core/cache.py +46 -0
  18. freeride_gateway-0.3.0a1/freeride/core/chat_schema.py +189 -0
  19. freeride_gateway-0.3.0a1/freeride/core/cooldown.py +116 -0
  20. freeride_gateway-0.3.0a1/freeride/core/errors.py +53 -0
  21. freeride_gateway-0.3.0a1/freeride/core/provider.py +134 -0
  22. freeride_gateway-0.3.0a1/freeride/core/state.py +53 -0
  23. freeride_gateway-0.3.0a1/freeride/core/telemetry.py +196 -0
  24. freeride_gateway-0.3.0a1/freeride/core/types.py +49 -0
  25. freeride_gateway-0.3.0a1/freeride/providers/__init__.py +0 -0
  26. freeride_gateway-0.3.0a1/freeride/providers/nim_model_metadata.py +57 -0
  27. freeride_gateway-0.3.0a1/freeride/providers/nvidia_nim.py +315 -0
  28. freeride_gateway-0.3.0a1/freeride/providers/openrouter.py +357 -0
  29. freeride_gateway-0.3.0a1/freeride/server/__init__.py +0 -0
  30. freeride_gateway-0.3.0a1/freeride/server/app.py +130 -0
  31. freeride_gateway-0.3.0a1/freeride/server/routes/__init__.py +0 -0
  32. freeride_gateway-0.3.0a1/freeride/server/routes/chat.py +214 -0
  33. freeride_gateway-0.3.0a1/freeride/server/routes/models.py +112 -0
  34. freeride_gateway-0.3.0a1/freeride/v2compat/__init__.py +14 -0
  35. freeride_gateway-0.3.0a1/freeride/v2compat/commands.py +465 -0
  36. freeride_gateway-0.3.0a1/freeride/v2compat/models.py +216 -0
  37. freeride_gateway-0.3.0a1/freeride/v2compat/openclaw.py +187 -0
  38. freeride_gateway-0.3.0a1/knowledge/CONSUMERS.md +190 -0
  39. freeride_gateway-0.3.0a1/knowledge/DAYTONA.md +122 -0
  40. freeride_gateway-0.3.0a1/knowledge/EXECUTION_PLAN.md +1056 -0
  41. freeride_gateway-0.3.0a1/knowledge/HERMES.md +89 -0
  42. freeride_gateway-0.3.0a1/knowledge/PLAN_GATEWAY.md +354 -0
  43. freeride_gateway-0.3.0a1/knowledge/providers/SURVEY.md +68 -0
  44. freeride_gateway-0.3.0a1/knowledge/providers/nvidia_nim.md +248 -0
  45. freeride_gateway-0.3.0a1/pyproject.toml +68 -0
  46. freeride_gateway-0.3.0a1/tests/__init__.py +0 -0
  47. freeride_gateway-0.3.0a1/tests/conformance/__init__.py +0 -0
  48. freeride_gateway-0.3.0a1/tests/conformance/test_provider_conformance.py +133 -0
  49. freeride_gateway-0.3.0a1/tests/e2e/__init__.py +0 -0
  50. freeride_gateway-0.3.0a1/tests/e2e/conftest.py +117 -0
  51. freeride_gateway-0.3.0a1/tests/e2e/test_aider.py +68 -0
  52. freeride_gateway-0.3.0a1/tests/e2e/test_hermes.py +66 -0
  53. freeride_gateway-0.3.0a1/tests/e2e/test_openai_python.py +61 -0
  54. freeride_gateway-0.3.0a1/tests/e2e/test_openclaw.py +85 -0
  55. freeride_gateway-0.3.0a1/tests/fixtures/__init__.py +0 -0
  56. freeride_gateway-0.3.0a1/tests/fixtures/noop_provider.py +97 -0
  57. freeride_gateway-0.3.0a1/tests/providers/__init__.py +0 -0
  58. freeride_gateway-0.3.0a1/tests/providers/test_openrouter.py +322 -0
  59. freeride_gateway-0.3.0a1/tests/test_binders.py +224 -0
  60. freeride_gateway-0.3.0a1/tests/test_cooldown.py +101 -0
  61. freeride_gateway-0.3.0a1/tests/test_core_types.py +303 -0
  62. freeride_gateway-0.3.0a1/tests/test_state.py +97 -0
  63. freeride_gateway-0.3.0a1/tests/test_telemetry.py +206 -0
  64. freeride_gateway-0.3.0a1/tests/v2compat/__init__.py +0 -0
  65. freeride_gateway-0.3.0a1/tests/v2compat/test_models.py +156 -0
  66. freeride_gateway-0.3.0a1/tests/v2compat/test_openclaw_writer.py +190 -0
@@ -0,0 +1,31 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.egg-info/
6
+ *.egg
7
+ build/
8
+ dist/
9
+
10
+ # Virtualenvs
11
+ .venv/
12
+ venv/
13
+ env/
14
+
15
+ # Editor / OS
16
+ .vscode/
17
+ .idea/
18
+ .DS_Store
19
+
20
+ # pytest
21
+ .pytest_cache/
22
+ .coverage
23
+ htmlcov/
24
+
25
+ # ruff / mypy
26
+ .ruff_cache/
27
+ .mypy_cache/
28
+
29
+ # FreeRide local state (must NEVER be committed)
30
+ .freeride/
31
+ freeride.egg-info/
@@ -0,0 +1,203 @@
1
+ Metadata-Version: 2.4
2
+ Name: freeride-gateway
3
+ Version: 0.3.0a1
4
+ Summary: Free-AI gateway: OpenAI-compatible local proxy that orchestrates free-tier inference across multiple providers
5
+ Project-URL: Homepage, https://github.com/Shaivpidadi/FreeRideV3
6
+ Project-URL: Repository, https://github.com/Shaivpidadi/FreeRideV3
7
+ Author: Shaishav Pidadi
8
+ License-Expression: MIT
9
+ Keywords: ai,gateway,llm,openai,openrouter,proxy
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Requires-Python: >=3.10
18
+ Requires-Dist: fastapi>=0.115
19
+ Requires-Dist: httpx>=0.27
20
+ Requires-Dist: pydantic>=2.7
21
+ Requires-Dist: uvicorn[standard]>=0.30
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
24
+ Requires-Dist: pytest-httpx>=0.30; extra == 'dev'
25
+ Requires-Dist: pytest-timeout>=2.3; extra == 'dev'
26
+ Requires-Dist: pytest>=8; extra == 'dev'
27
+ Requires-Dist: ruff>=0.6; extra == 'dev'
28
+ Provides-Extra: e2e
29
+ Requires-Dist: openai>=2; extra == 'e2e'
30
+ Description-Content-Type: text/markdown
31
+
32
+ # FreeRide
33
+
34
+ **Free AI for everyone.** A local OpenAI-compatible gateway that orchestrates free-tier inference across multiple providers — OpenRouter, NVIDIA NIM, and more — and routes around outages and rate limits transparently.
35
+
36
+ ```
37
+ [any agent] ──HTTP──> [FreeRide on localhost] ──HTTPS──> OpenRouter
38
+ └──> NVIDIA NIM
39
+ └──> (more providers)
40
+ ```
41
+
42
+ Point any OpenAI-compatible client at `http://localhost:11343/v1` with API key `any` and you get free AI. When one provider rate-limits or fails, FreeRide invisibly fails over to the next. Streaming, tool calls, vision, and structured outputs all pass through.
43
+
44
+ ## Why
45
+
46
+ You can already get free models from OpenRouter, NIM, Groq, etc. — but each has different rate limits, different free-detection rules, and different rate-limit semantics. Hitting one's daily cap means your agent stalls until tomorrow. FreeRide unifies them behind one OpenAI-shaped endpoint and rotates across providers and keys so your agent never sees a 429.
47
+
48
+ Crucially:
49
+
50
+ - **Local-first.** The gateway runs on your machine. Your prompts and completions never touch any FreeRide-operated server.
51
+ - **Free-only by religion.** No paid fallback paths. No upsells.
52
+ - **BYO keys.** You bring your own free-tier keys for each provider; FreeRide just routes.
53
+ - **Telemetry off by default.** Optional, audit-friendly aggregate beacon (token counts, no content) — opt-in only via `freeride telemetry on`.
54
+
55
+ ## Install
56
+
57
+ ```bash
58
+ pip install freeride-gateway # latest stable (after 0.3.0 final)
59
+ pip install --pre freeride-gateway # alpha / pre-release (current)
60
+ ```
61
+
62
+ The PyPI distribution is named `freeride-gateway`; the CLI binary it installs is `freeride`. Python ≥ 3.10.
63
+
64
+ For local development, clone and `pip install -e .` from the repo root.
65
+
66
+ ## Quick start
67
+
68
+ ### 1. Get free API keys
69
+
70
+ | Provider | Sign-up | Required env var |
71
+ |---|---|---|
72
+ | OpenRouter | https://openrouter.ai/keys | `OPENROUTER_API_KEY` |
73
+ | NVIDIA NIM | https://build.nvidia.com/explore/discover | `NVIDIA_API_KEY` |
74
+
75
+ You only need one to get started; more = better failover.
76
+
77
+ ### 2. Start the gateway
78
+
79
+ ```bash
80
+ export OPENROUTER_API_KEY="sk-or-v1-..."
81
+ export NVIDIA_API_KEY="nvapi-..." # optional
82
+
83
+ freeride serve
84
+ # freeride gateway listening on http://127.0.0.1:11343
85
+ # providers: openrouter, nvidia_nim
86
+ # point any OpenAI-compatible agent at:
87
+ # OPENAI_API_BASE=http://127.0.0.1:11343/v1
88
+ # OPENAI_API_KEY=any
89
+ ```
90
+
91
+ ### 3. Point your agent at it
92
+
93
+ The fastest way is via a built-in binder:
94
+
95
+ ```bash
96
+ freeride bind aider # writes ~/.aider.conf.yml
97
+ freeride bind continue # writes ~/.continue/config.yaml
98
+ freeride bind hermes # writes ~/.hermes/cli-config.yaml
99
+ freeride bind openclaw # writes ~/.openclaw/openclaw.json
100
+ ```
101
+
102
+ Or point any OpenAI-shaped client manually:
103
+
104
+ ```bash
105
+ export OPENAI_API_BASE=http://localhost:11343/v1
106
+ export OPENAI_API_KEY=any
107
+ ```
108
+
109
+ That's it. Your agent now uses free AI with cross-provider failover.
110
+
111
+ ### 4. (Optional) Multi-key rotation
112
+
113
+ ```bash
114
+ # JSON-array form to register multiple keys per provider
115
+ export OPENROUTER_API_KEY='["sk-or-v1-key1", "sk-or-v1-key2"]'
116
+ ```
117
+
118
+ When one key hits 429, FreeRide marks it cooling and uses the next on the next request. Cooldowns persist across restarts.
119
+
120
+ ## How it works
121
+
122
+ ### Cross-provider failover
123
+
124
+ When you call `chat/completions`, FreeRide tries providers in registration order. For each provider it walks the available (non-cooling) keys; on `RATE_LIMIT` or `AUTH` it marks the key cooling and tries the next. On `MODEL_NOT_FOUND` it advances to the next provider. Once a provider produces a successful response (or a streaming response's first chunk), FreeRide commits and returns it to the client.
125
+
126
+ The client never sees the failures — the response includes a `_freeride_provider` field (or `X-FreeRide-Provider` header on streaming responses) so you can audit which provider actually served any given request.
127
+
128
+ ### Streaming
129
+
130
+ Streaming uses **buffer-first-chunk failover**: FreeRide holds the first SSE event from upstream until it confirms the stream started successfully. If upstream errors before producing the first chunk, FreeRide tries the next (provider, key) tuple. Once the first chunk has shipped to the client, mid-stream errors propagate as a truncated stream (rare in practice; documented limitation).
131
+
132
+ ### Telemetry
133
+
134
+ Off by default. When you opt in (`freeride telemetry on`), FreeRide POSTs an aggregate beacon hourly with:
135
+
136
+ ```json
137
+ {
138
+ "installation_id": "uuid-v4",
139
+ "version": "0.3.0",
140
+ "os": "darwin",
141
+ "tokens_served": 412034,
142
+ "request_count": 187,
143
+ "providers_active": ["openrouter", "nvidia_nim"],
144
+ "uptime_hours": 8
145
+ }
146
+ ```
147
+
148
+ **Never sent**: prompts, completions, model IDs, API keys, hostnames. Run `freeride telemetry` (no args) to inspect the exact payload before deciding.
149
+
150
+ ## Commands
151
+
152
+ | Command | What it does |
153
+ |---|---|
154
+ | `freeride serve` | Start the gateway on `localhost:11343` |
155
+ | `freeride bind <agent>` | Write the gateway URL into the agent's config (atomic; preserves unrelated keys) |
156
+ | `freeride telemetry [on\|off]` | Manage opt-in beacon (default OFF) |
157
+ | `freeride list` | List available free models, ranked (v2 behavior) |
158
+ | `freeride status` | Show current OpenClaw config + cache age (v2 behavior) |
159
+ | `freeride auto` | Auto-configure best free model for OpenClaw (v2 behavior) |
160
+ | `freeride rotate` | Live-test current primary; swap if it fails (v2 behavior) |
161
+ | `freeride-watcher` | Background daemon that probes and rotates on failure (v2 behavior) |
162
+
163
+ The v2 commands keep working for existing OpenClaw users; the new commands (`serve`, `bind`, `telemetry`) are the v3 surface.
164
+
165
+ ## Supported providers
166
+
167
+ - **OpenRouter** ✅ — chat, streaming, tools, vision, structured outputs
168
+ - **NVIDIA NIM** ✅ — chat, streaming (curated free-model allowlist; `NVIDIA_NIM_FREE_MODELS_OVERRIDE` env var to expand)
169
+ - *Groq*, *Cloudflare Workers AI*, *HuggingFace Inference Providers*: Provider Protocol fits all three (see `knowledge/providers/SURVEY.md`); plugin implementations welcome.
170
+
171
+ ## Supported agents
172
+
173
+ | Agent | `freeride bind <agent>` | Hot reload |
174
+ |---|---|---|
175
+ | OpenClaw | ✅ | restart needed |
176
+ | Aider | ✅ (--scope home/cwd/git) | restart needed |
177
+ | Continue | ✅ | yes |
178
+ | Hermes (NousResearch/hermes-agent) | ✅ | restart needed |
179
+ | OpenCode | extended; not yet shipped | — |
180
+
181
+ Or any other OpenAI-compatible client via `OPENAI_API_BASE` + `OPENAI_API_KEY=any`.
182
+
183
+ ## Project documents
184
+
185
+ - [`knowledge/PLAN_GATEWAY.md`](knowledge/PLAN_GATEWAY.md) — design plan, decisions, telemetry spec
186
+ - [`knowledge/EXECUTION_PLAN.md`](knowledge/EXECUTION_PLAN.md) — phased execution playbook (90+ tasks)
187
+ - [`knowledge/providers/`](knowledge/providers/) — per-provider technical references
188
+ - [`knowledge/CONSUMERS.md`](knowledge/CONSUMERS.md) — per-agent bind reference
189
+ - [`knowledge/HERMES.md`](knowledge/HERMES.md) — Hermes identification + bind plan
190
+
191
+ ## Contributing
192
+
193
+ The Provider Protocol is `freeride.core.provider.Provider` with `api_version = 1`. To add a new provider:
194
+
195
+ 1. Implement the Protocol in `freeride/providers/<name>.py`
196
+ 2. Register your class in `tests/conformance/test_provider_conformance.py`'s `CONFORMANT_PROVIDERS` list
197
+ 3. Add `freeride/providers/<name>_model_metadata.py` if the catalog endpoint doesn't expose context length / capabilities
198
+
199
+ The conformance suite covers the load-bearing invariants automatically.
200
+
201
+ ## License
202
+
203
+ MIT.
@@ -0,0 +1,172 @@
1
+ # FreeRide
2
+
3
+ **Free AI for everyone.** A local OpenAI-compatible gateway that orchestrates free-tier inference across multiple providers — OpenRouter, NVIDIA NIM, and more — and routes around outages and rate limits transparently.
4
+
5
+ ```
6
+ [any agent] ──HTTP──> [FreeRide on localhost] ──HTTPS──> OpenRouter
7
+ └──> NVIDIA NIM
8
+ └──> (more providers)
9
+ ```
10
+
11
+ Point any OpenAI-compatible client at `http://localhost:11343/v1` with API key `any` and you get free AI. When one provider rate-limits or fails, FreeRide invisibly fails over to the next. Streaming, tool calls, vision, and structured outputs all pass through.
12
+
13
+ ## Why
14
+
15
+ You can already get free models from OpenRouter, NIM, Groq, etc. — but each has different rate limits, different free-detection rules, and different rate-limit semantics. Hitting one's daily cap means your agent stalls until tomorrow. FreeRide unifies them behind one OpenAI-shaped endpoint and rotates across providers and keys so your agent never sees a 429.
16
+
17
+ Crucially:
18
+
19
+ - **Local-first.** The gateway runs on your machine. Your prompts and completions never touch any FreeRide-operated server.
20
+ - **Free-only by religion.** No paid fallback paths. No upsells.
21
+ - **BYO keys.** You bring your own free-tier keys for each provider; FreeRide just routes.
22
+ - **Telemetry off by default.** Optional, audit-friendly aggregate beacon (token counts, no content) — opt-in only via `freeride telemetry on`.
23
+
24
+ ## Install
25
+
26
+ ```bash
27
+ pip install freeride-gateway # latest stable (after 0.3.0 final)
28
+ pip install --pre freeride-gateway # alpha / pre-release (current)
29
+ ```
30
+
31
+ The PyPI distribution is named `freeride-gateway`; the CLI binary it installs is `freeride`. Python ≥ 3.10.
32
+
33
+ For local development, clone and `pip install -e .` from the repo root.
34
+
35
+ ## Quick start
36
+
37
+ ### 1. Get free API keys
38
+
39
+ | Provider | Sign-up | Required env var |
40
+ |---|---|---|
41
+ | OpenRouter | https://openrouter.ai/keys | `OPENROUTER_API_KEY` |
42
+ | NVIDIA NIM | https://build.nvidia.com/explore/discover | `NVIDIA_API_KEY` |
43
+
44
+ You only need one to get started; more = better failover.
45
+
46
+ ### 2. Start the gateway
47
+
48
+ ```bash
49
+ export OPENROUTER_API_KEY="sk-or-v1-..."
50
+ export NVIDIA_API_KEY="nvapi-..." # optional
51
+
52
+ freeride serve
53
+ # freeride gateway listening on http://127.0.0.1:11343
54
+ # providers: openrouter, nvidia_nim
55
+ # point any OpenAI-compatible agent at:
56
+ # OPENAI_API_BASE=http://127.0.0.1:11343/v1
57
+ # OPENAI_API_KEY=any
58
+ ```
59
+
60
+ ### 3. Point your agent at it
61
+
62
+ The fastest way is via a built-in binder:
63
+
64
+ ```bash
65
+ freeride bind aider # writes ~/.aider.conf.yml
66
+ freeride bind continue # writes ~/.continue/config.yaml
67
+ freeride bind hermes # writes ~/.hermes/cli-config.yaml
68
+ freeride bind openclaw # writes ~/.openclaw/openclaw.json
69
+ ```
70
+
71
+ Or point any OpenAI-shaped client manually:
72
+
73
+ ```bash
74
+ export OPENAI_API_BASE=http://localhost:11343/v1
75
+ export OPENAI_API_KEY=any
76
+ ```
77
+
78
+ That's it. Your agent now uses free AI with cross-provider failover.
79
+
80
+ ### 4. (Optional) Multi-key rotation
81
+
82
+ ```bash
83
+ # JSON-array form to register multiple keys per provider
84
+ export OPENROUTER_API_KEY='["sk-or-v1-key1", "sk-or-v1-key2"]'
85
+ ```
86
+
87
+ When one key hits 429, FreeRide marks it cooling and uses the next on the next request. Cooldowns persist across restarts.
88
+
89
+ ## How it works
90
+
91
+ ### Cross-provider failover
92
+
93
+ When you call `chat/completions`, FreeRide tries providers in registration order. For each provider it walks the available (non-cooling) keys; on `RATE_LIMIT` or `AUTH` it marks the key cooling and tries the next. On `MODEL_NOT_FOUND` it advances to the next provider. Once a provider produces a successful response (or a streaming response's first chunk), FreeRide commits and returns it to the client.
94
+
95
+ The client never sees the failures — the response includes a `_freeride_provider` field (or `X-FreeRide-Provider` header on streaming responses) so you can audit which provider actually served any given request.
96
+
97
+ ### Streaming
98
+
99
+ Streaming uses **buffer-first-chunk failover**: FreeRide holds the first SSE event from upstream until it confirms the stream started successfully. If upstream errors before producing the first chunk, FreeRide tries the next (provider, key) tuple. Once the first chunk has shipped to the client, mid-stream errors propagate as a truncated stream (rare in practice; documented limitation).
100
+
101
+ ### Telemetry
102
+
103
+ Off by default. When you opt in (`freeride telemetry on`), FreeRide POSTs an aggregate beacon hourly with:
104
+
105
+ ```json
106
+ {
107
+ "installation_id": "uuid-v4",
108
+ "version": "0.3.0",
109
+ "os": "darwin",
110
+ "tokens_served": 412034,
111
+ "request_count": 187,
112
+ "providers_active": ["openrouter", "nvidia_nim"],
113
+ "uptime_hours": 8
114
+ }
115
+ ```
116
+
117
+ **Never sent**: prompts, completions, model IDs, API keys, hostnames. Run `freeride telemetry` (no args) to inspect the exact payload before deciding.
118
+
119
+ ## Commands
120
+
121
+ | Command | What it does |
122
+ |---|---|
123
+ | `freeride serve` | Start the gateway on `localhost:11343` |
124
+ | `freeride bind <agent>` | Write the gateway URL into the agent's config (atomic; preserves unrelated keys) |
125
+ | `freeride telemetry [on\|off]` | Manage opt-in beacon (default OFF) |
126
+ | `freeride list` | List available free models, ranked (v2 behavior) |
127
+ | `freeride status` | Show current OpenClaw config + cache age (v2 behavior) |
128
+ | `freeride auto` | Auto-configure best free model for OpenClaw (v2 behavior) |
129
+ | `freeride rotate` | Live-test current primary; swap if it fails (v2 behavior) |
130
+ | `freeride-watcher` | Background daemon that probes and rotates on failure (v2 behavior) |
131
+
132
+ The v2 commands keep working for existing OpenClaw users; the new commands (`serve`, `bind`, `telemetry`) are the v3 surface.
133
+
134
+ ## Supported providers
135
+
136
+ - **OpenRouter** ✅ — chat, streaming, tools, vision, structured outputs
137
+ - **NVIDIA NIM** ✅ — chat, streaming (curated free-model allowlist; `NVIDIA_NIM_FREE_MODELS_OVERRIDE` env var to expand)
138
+ - *Groq*, *Cloudflare Workers AI*, *HuggingFace Inference Providers*: Provider Protocol fits all three (see `knowledge/providers/SURVEY.md`); plugin implementations welcome.
139
+
140
+ ## Supported agents
141
+
142
+ | Agent | `freeride bind <agent>` | Hot reload |
143
+ |---|---|---|
144
+ | OpenClaw | ✅ | restart needed |
145
+ | Aider | ✅ (--scope home/cwd/git) | restart needed |
146
+ | Continue | ✅ | yes |
147
+ | Hermes (NousResearch/hermes-agent) | ✅ | restart needed |
148
+ | OpenCode | extended; not yet shipped | — |
149
+
150
+ Or any other OpenAI-compatible client via `OPENAI_API_BASE` + `OPENAI_API_KEY=any`.
151
+
152
+ ## Project documents
153
+
154
+ - [`knowledge/PLAN_GATEWAY.md`](knowledge/PLAN_GATEWAY.md) — design plan, decisions, telemetry spec
155
+ - [`knowledge/EXECUTION_PLAN.md`](knowledge/EXECUTION_PLAN.md) — phased execution playbook (90+ tasks)
156
+ - [`knowledge/providers/`](knowledge/providers/) — per-provider technical references
157
+ - [`knowledge/CONSUMERS.md`](knowledge/CONSUMERS.md) — per-agent bind reference
158
+ - [`knowledge/HERMES.md`](knowledge/HERMES.md) — Hermes identification + bind plan
159
+
160
+ ## Contributing
161
+
162
+ The Provider Protocol is `freeride.core.provider.Provider` with `api_version = 1`. To add a new provider:
163
+
164
+ 1. Implement the Protocol in `freeride/providers/<name>.py`
165
+ 2. Register your class in `tests/conformance/test_provider_conformance.py`'s `CONFORMANT_PROVIDERS` list
166
+ 3. Add `freeride/providers/<name>_model_metadata.py` if the catalog endpoint doesn't expose context length / capabilities
167
+
168
+ The conformance suite covers the load-bearing invariants automatically.
169
+
170
+ ## License
171
+
172
+ MIT.
@@ -0,0 +1 @@
1
+ __version__ = "0.3.0a1"
@@ -0,0 +1,18 @@
1
+ """Per-agent binder helpers — write one URL into one config file.
2
+
3
+ These are deliberately *not* a generic Consumer plugin abstraction (see
4
+ PLAN_GATEWAY.md §6). Each binder is a small, ad-hoc adapter that knows
5
+ exactly how to point its specific agent at the FreeRide gateway. The
6
+ common pattern is:
7
+
8
+ * Locate the agent's config file (env-var override > default path)
9
+ * Read it (atomic; preserve all unrelated keys)
10
+ * Set the gateway URL + api_key="any"
11
+ * Atomic-write back
12
+
13
+ The contract is one function per agent, signature::
14
+
15
+ def bind(gateway_url: str, *, api_key: str = "any") -> str
16
+
17
+ Returning a one-line status string the CLI prints.
18
+ """
@@ -0,0 +1,84 @@
1
+ """``freeride bind aider`` — point Aider at the gateway.
2
+
3
+ Per ``knowledge/CONSUMERS.md``: Aider's config search order is
4
+ git-root → cwd → home, so we default to the home-scoped
5
+ ``~/.aider.conf.yml``. The user can pass a ``scope`` to force one of
6
+ the other locations.
7
+
8
+ Aider requires a restart after config changes (no hot reload).
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import os
14
+ from pathlib import Path
15
+ from typing import Literal
16
+
17
+ from freeride.core.state import atomic_write
18
+
19
+
20
+ Scope = Literal["home", "cwd", "git"]
21
+
22
+
23
+ def _aider_config_path(scope: Scope) -> Path:
24
+ if scope == "home":
25
+ return Path.home() / ".aider.conf.yml"
26
+ if scope == "cwd":
27
+ return Path.cwd() / ".aider.conf.yml"
28
+ # git: walk up from cwd looking for a .git directory
29
+ cur = Path.cwd().resolve()
30
+ while True:
31
+ if (cur / ".git").exists():
32
+ return cur / ".aider.conf.yml"
33
+ if cur.parent == cur:
34
+ # No git root found; fall back to cwd.
35
+ return Path.cwd() / ".aider.conf.yml"
36
+ cur = cur.parent
37
+
38
+
39
+ def _read_yaml_lines(path: Path) -> list[str]:
40
+ """Read existing aider config preserving all other lines.
41
+
42
+ We avoid pulling in PyYAML by doing line-based edits — Aider's
43
+ config file is a flat key:value YAML, no nested structures. This
44
+ keeps the binder dep-free.
45
+ """
46
+ if not path.exists():
47
+ return []
48
+ return path.read_text().splitlines()
49
+
50
+
51
+ def _set_or_append(lines: list[str], key: str, value: str) -> list[str]:
52
+ """Replace the first matching ``key:`` line, or append at end."""
53
+ out: list[str] = []
54
+ seen = False
55
+ for line in lines:
56
+ if not seen and line.lstrip().startswith(f"{key}:"):
57
+ out.append(f"{key}: {value}")
58
+ seen = True
59
+ else:
60
+ out.append(line)
61
+ if not seen:
62
+ out.append(f"{key}: {value}")
63
+ return out
64
+
65
+
66
+ def bind(
67
+ gateway_url: str,
68
+ *,
69
+ api_key: str = "any",
70
+ scope: Scope = "home",
71
+ config_path: Path | None = None,
72
+ ) -> str:
73
+ path = config_path or _aider_config_path(scope)
74
+ lines = _read_yaml_lines(path)
75
+ lines = _set_or_append(lines, "openai-api-base", gateway_url)
76
+ lines = _set_or_append(lines, "openai-api-key", api_key)
77
+
78
+ atomic_write(path, "\n".join(lines) + "\n")
79
+ return (
80
+ f"Aider config at {path} updated.\n"
81
+ f" openai-api-base: {gateway_url}\n"
82
+ f" openai-api-key: {api_key}\n"
83
+ f" Aider has no hot-reload — restart aider for changes to take effect."
84
+ )
@@ -0,0 +1,120 @@
1
+ """``freeride bind continue`` — append a model entry to Continue's config.
2
+
3
+ Per ``knowledge/CONSUMERS.md``: current Continue uses ``~/.continue/config.yaml``
4
+ (legacy ``config.json`` may exist on older installs). The provider type
5
+ must be ``openai`` (NOT ``openai-compatible``). Continue hot-reloads on
6
+ next prompt — no restart needed.
7
+
8
+ We support both YAML and JSON paths. If both exist we update YAML
9
+ (the current canonical format). If neither exists we create the YAML.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ from pathlib import Path
16
+
17
+ from freeride.core.state import atomic_write
18
+
19
+
20
+ _DEFAULT_DIR = Path.home() / ".continue"
21
+ _YAML_NAME = "config.yaml"
22
+ _JSON_NAME = "config.json"
23
+
24
+ _FREERIDE_TITLE = "freeride"
25
+
26
+
27
+ def _yaml_block(gateway_url: str, api_key: str) -> str:
28
+ """Return a single ``models:`` entry block as YAML — appended to whatever
29
+ list already exists. Avoids requiring PyYAML for the common path.
30
+ """
31
+ return (
32
+ f" - title: {_FREERIDE_TITLE}\n"
33
+ f" provider: openai\n"
34
+ f" model: free\n"
35
+ f" apiBase: {gateway_url}\n"
36
+ f" apiKey: {api_key}\n"
37
+ f" roles: [chat, edit, autocomplete]\n"
38
+ )
39
+
40
+
41
+ def _bind_yaml(path: Path, gateway_url: str, api_key: str) -> None:
42
+ if not path.exists():
43
+ atomic_write(path, "models:\n" + _yaml_block(gateway_url, api_key))
44
+ return
45
+
46
+ text = path.read_text()
47
+ # Strip any prior freeride entry (idempotent re-runs).
48
+ if f"title: {_FREERIDE_TITLE}\n" in text or f"title: {_FREERIDE_TITLE}" in text.splitlines()[-1] if text else False:
49
+ # Simple drop of the previous block: split on lines, skip 6
50
+ # consecutive lines starting with " - title: freeride".
51
+ lines = text.splitlines()
52
+ out: list[str] = []
53
+ skip = 0
54
+ for line in lines:
55
+ if skip > 0:
56
+ skip -= 1
57
+ continue
58
+ if line.strip() == f"- title: {_FREERIDE_TITLE}" or line.strip() == f"- title: {_FREERIDE_TITLE}\n":
59
+ skip = 5 # 5 indented lines after the - title line
60
+ continue
61
+ out.append(line)
62
+ text = "\n".join(out)
63
+ if not text.endswith("\n"):
64
+ text += "\n"
65
+
66
+ if "models:" not in text:
67
+ text = (text.rstrip() + "\n" if text else "") + "models:\n"
68
+
69
+ # Append the freeride block right after the "models:" line.
70
+ new_text = text.rstrip() + "\n" + _yaml_block(gateway_url, api_key)
71
+ atomic_write(path, new_text)
72
+
73
+
74
+ def _bind_json(path: Path, gateway_url: str, api_key: str) -> None:
75
+ try:
76
+ config = json.loads(path.read_text()) if path.exists() else {}
77
+ except json.JSONDecodeError:
78
+ config = {}
79
+
80
+ models = config.setdefault("models", [])
81
+ models = [m for m in models if m.get("title") != _FREERIDE_TITLE]
82
+ models.append(
83
+ {
84
+ "title": _FREERIDE_TITLE,
85
+ "provider": "openai",
86
+ "model": "free",
87
+ "apiBase": gateway_url,
88
+ "apiKey": api_key,
89
+ "roles": ["chat", "edit", "autocomplete"],
90
+ }
91
+ )
92
+ config["models"] = models
93
+ atomic_write(path, json.dumps(config, indent=2))
94
+
95
+
96
+ def bind(
97
+ gateway_url: str,
98
+ *,
99
+ api_key: str = "any",
100
+ config_dir: Path | None = None,
101
+ ) -> str:
102
+ base = config_dir or _DEFAULT_DIR
103
+ base.mkdir(parents=True, exist_ok=True)
104
+ yaml_path = base / _YAML_NAME
105
+ json_path = base / _JSON_NAME
106
+
107
+ if yaml_path.exists() or not json_path.exists():
108
+ # Default modern path: YAML
109
+ _bind_yaml(yaml_path, gateway_url, api_key)
110
+ chosen = yaml_path
111
+ else:
112
+ _bind_json(json_path, gateway_url, api_key)
113
+ chosen = json_path
114
+
115
+ return (
116
+ f"Continue config at {chosen} updated.\n"
117
+ f" Added model: freeride (provider=openai, apiBase={gateway_url})\n"
118
+ f" Roles: chat, edit, autocomplete\n"
119
+ f" Continue hot-reloads — no restart needed."
120
+ )