lottobot-server 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -0
- package/index.js +354 -0
- package/package.json +25 -0
package/README.md
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# lottobot-server
|
|
2
|
+
|
|
3
|
+
Local proxy for [LottoBot](https://lottobot.ai) — connects your AI chat app to LottoBot's prediction engine.
|
|
4
|
+
|
|
5
|
+
Exposes an Ollama-compatible API on `localhost:11434` so you can use LottoBot from any app that supports Ollama or the OpenAI API format.
|
|
6
|
+
|
|
7
|
+
## Requirements
|
|
8
|
+
|
|
9
|
+
- Node.js 18+ ([nodejs.org](https://nodejs.org))
|
|
10
|
+
- A LottoBot API key ([lottobot.ai/dashboard](https://lottobot.ai/dashboard))
|
|
11
|
+
|
|
12
|
+
## Usage
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
npx lottobot-server --key lb_YOUR_API_KEY
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
Then open your AI chat app and connect to `http://localhost:11434`. Select **lottobot** as the model.
|
|
19
|
+
|
|
20
|
+
## Compatible Apps
|
|
21
|
+
|
|
22
|
+
| App | Connection |
|
|
23
|
+
|---|---|
|
|
24
|
+
| Open WebUI | Ollama URL: `http://localhost:11434` |
|
|
25
|
+
| AnythingLLM | Ollama endpoint: `http://localhost:11434` |
|
|
26
|
+
| LM Studio | OpenAI base URL: `http://localhost:11434/v1` |
|
|
27
|
+
| Continue.dev | Ollama provider: `http://localhost:11434` |
|
|
28
|
+
|
|
29
|
+
## Commands in Chat
|
|
30
|
+
|
|
31
|
+
- `generate` — get 6 prediction sets
|
|
32
|
+
- `generate 8 sets` — get 8 prediction sets
|
|
33
|
+
- `give me lucky numbers` — same as generate
|
|
34
|
+
|
|
35
|
+
Each prediction uses one draw from your API key.
|
|
36
|
+
|
|
37
|
+
## Platform Support
|
|
38
|
+
|
|
39
|
+
Works on Mac, Linux, and Windows (Command Prompt or PowerShell).
|
package/index.js
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* LottoBot Local Proxy
|
|
4
|
+
*
|
|
5
|
+
* Exposes an Ollama-compatible REST API on localhost:11434 and forwards
|
|
6
|
+
* all requests to the LottoBot server using the user's API key.
|
|
7
|
+
*
|
|
8
|
+
* Cross-platform: Mac, Windows, Linux (requires Node.js 18+)
|
|
9
|
+
*
|
|
10
|
+
* Usage:
|
|
11
|
+
* npx lottobot-server --key lb_YOUR_API_KEY
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
"use strict";
|
|
15
|
+
|
|
16
|
+
import http from "http";
|
|
17
|
+
import https from "https";
|
|
18
|
+
import { URL } from "url";
|
|
19
|
+
|
|
20
|
+
// ── Configuration ─────────────────────────────────────────────────────────────
|
|
21
|
+
|
|
22
|
+
const PROXY_PORT = 11434;
|
|
23
|
+
const LOTTOBOT_API_BASE = process.env.LOTTOBOT_API_BASE || "https://lottobot.ai";
|
|
24
|
+
const MODEL_NAME = "lottobot";
|
|
25
|
+
|
|
26
|
+
// ── Parse CLI arguments ───────────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
function parseArgs() {
|
|
29
|
+
const args = process.argv.slice(2);
|
|
30
|
+
let apiKey = null;
|
|
31
|
+
|
|
32
|
+
for (let i = 0; i < args.length; i++) {
|
|
33
|
+
if (args[i] === "--key" && args[i + 1]) {
|
|
34
|
+
apiKey = args[i + 1];
|
|
35
|
+
i++;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
if (!apiKey) {
|
|
40
|
+
console.error("Error: API key is required.");
|
|
41
|
+
console.error("Usage: npx lottobot-server --key lb_YOUR_API_KEY");
|
|
42
|
+
console.error("\nGet your free API key at https://lottobot.ai/dashboard");
|
|
43
|
+
process.exit(1);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
return { apiKey };
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// ── HTTP helpers ──────────────────────────────────────────────────────────────
|
|
50
|
+
|
|
51
|
+
function readBody(req) {
|
|
52
|
+
return new Promise((resolve, reject) => {
|
|
53
|
+
const chunks = [];
|
|
54
|
+
req.on("data", (chunk) => chunks.push(chunk));
|
|
55
|
+
req.on("end", () => resolve(Buffer.concat(chunks).toString("utf8")));
|
|
56
|
+
req.on("error", reject);
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
function fetchLottoBot(path, options, apiKey) {
|
|
61
|
+
const url = new URL(path, LOTTOBOT_API_BASE);
|
|
62
|
+
const isHttps = url.protocol === "https:";
|
|
63
|
+
const lib = isHttps ? https : http;
|
|
64
|
+
|
|
65
|
+
const reqOptions = {
|
|
66
|
+
hostname: url.hostname,
|
|
67
|
+
port: url.port || (isHttps ? 443 : 80),
|
|
68
|
+
path: url.pathname + url.search,
|
|
69
|
+
method: options.method || "GET",
|
|
70
|
+
headers: {
|
|
71
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
72
|
+
"Content-Type": "application/json",
|
|
73
|
+
...(options.headers || {}),
|
|
74
|
+
},
|
|
75
|
+
};
|
|
76
|
+
|
|
77
|
+
return new Promise((resolve, reject) => {
|
|
78
|
+
const req = lib.request(reqOptions, resolve);
|
|
79
|
+
req.on("error", reject);
|
|
80
|
+
if (options.body) req.write(options.body);
|
|
81
|
+
req.end();
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ── Static response bodies ────────────────────────────────────────────────────
|
|
86
|
+
|
|
87
|
+
function ollamaVersionResponse() {
|
|
88
|
+
return JSON.stringify({ version: "0.1.0-lottobot" });
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
function ollamaTagsResponse() {
|
|
92
|
+
return JSON.stringify({
|
|
93
|
+
models: [{
|
|
94
|
+
name: MODEL_NAME,
|
|
95
|
+
model: MODEL_NAME,
|
|
96
|
+
modified_at: "2025-01-01T00:00:00Z",
|
|
97
|
+
size: 0,
|
|
98
|
+
digest: "lottobot",
|
|
99
|
+
details: {
|
|
100
|
+
parent_model: "",
|
|
101
|
+
format: "lottobot",
|
|
102
|
+
family: "lottobot",
|
|
103
|
+
families: ["lottobot"],
|
|
104
|
+
parameter_size: "0B",
|
|
105
|
+
quantization_level: "none",
|
|
106
|
+
},
|
|
107
|
+
}],
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
function openaiModelsResponse() {
|
|
112
|
+
return JSON.stringify({
|
|
113
|
+
object: "list",
|
|
114
|
+
data: [{
|
|
115
|
+
id: MODEL_NAME,
|
|
116
|
+
object: "model",
|
|
117
|
+
created: 1700000000,
|
|
118
|
+
owned_by: "lottobot",
|
|
119
|
+
}],
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// ── Convert OpenAI chat format → Ollama chat format for streaming passthrough ─
|
|
124
|
+
|
|
125
|
+
function openaiToOllamaMessage(openaiMsg) {
|
|
126
|
+
return {
|
|
127
|
+
role: openaiMsg.role,
|
|
128
|
+
content: openaiMsg.content || "",
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// ── SSE passthrough: OpenAI SSE → Ollama streaming format ────────────────────
|
|
133
|
+
|
|
134
|
+
async function proxyStreamingResponse(clientRes, upstreamRes) {
|
|
135
|
+
clientRes.writeHead(200, {
|
|
136
|
+
"Content-Type": "application/x-ndjson",
|
|
137
|
+
"Transfer-Encoding": "chunked",
|
|
138
|
+
"Cache-Control": "no-cache",
|
|
139
|
+
});
|
|
140
|
+
|
|
141
|
+
let buffer = "";
|
|
142
|
+
|
|
143
|
+
upstreamRes.on("data", (chunk) => {
|
|
144
|
+
buffer += chunk.toString("utf8");
|
|
145
|
+
const lines = buffer.split("\n");
|
|
146
|
+
buffer = lines.pop(); // keep incomplete line in buffer
|
|
147
|
+
|
|
148
|
+
for (const line of lines) {
|
|
149
|
+
const trimmed = line.trim();
|
|
150
|
+
if (!trimmed || trimmed === "data: [DONE]") {
|
|
151
|
+
if (trimmed === "data: [DONE]") {
|
|
152
|
+
// Send Ollama-format "done" message
|
|
153
|
+
const done = JSON.stringify({ model: MODEL_NAME, done: true, message: { role: "assistant", content: "" } });
|
|
154
|
+
clientRes.write(done + "\n");
|
|
155
|
+
}
|
|
156
|
+
continue;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
if (trimmed.startsWith("data: ")) {
|
|
160
|
+
try {
|
|
161
|
+
const openaiChunk = JSON.parse(trimmed.slice(6));
|
|
162
|
+
const delta = openaiChunk.choices?.[0]?.delta?.content || "";
|
|
163
|
+
if (delta) {
|
|
164
|
+
const ollamaChunk = JSON.stringify({
|
|
165
|
+
model: MODEL_NAME,
|
|
166
|
+
created_at: new Date().toISOString(),
|
|
167
|
+
message: { role: "assistant", content: delta },
|
|
168
|
+
done: false,
|
|
169
|
+
});
|
|
170
|
+
clientRes.write(ollamaChunk + "\n");
|
|
171
|
+
}
|
|
172
|
+
} catch {
|
|
173
|
+
// ignore malformed chunks
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
upstreamRes.on("end", () => {
|
|
180
|
+
clientRes.end();
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
upstreamRes.on("error", () => {
|
|
184
|
+
clientRes.end();
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
// ── Route handlers ────────────────────────────────────────────────────────────
|
|
189
|
+
|
|
190
|
+
async function handleOllamaChat(req, clientRes, apiKey) {
|
|
191
|
+
const rawBody = await readBody(req);
|
|
192
|
+
let body;
|
|
193
|
+
try {
|
|
194
|
+
body = JSON.parse(rawBody);
|
|
195
|
+
} catch {
|
|
196
|
+
clientRes.writeHead(400);
|
|
197
|
+
clientRes.end(JSON.stringify({ error: "Invalid JSON" }));
|
|
198
|
+
return;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// Convert Ollama chat format to OpenAI format
|
|
202
|
+
const openaiBody = JSON.stringify({
|
|
203
|
+
model: MODEL_NAME,
|
|
204
|
+
messages: (body.messages || []).map(openaiToOllamaMessage),
|
|
205
|
+
stream: true,
|
|
206
|
+
});
|
|
207
|
+
|
|
208
|
+
try {
|
|
209
|
+
const upstreamRes = await fetchLottoBot("/v1/chat/completions", {
|
|
210
|
+
method: "POST",
|
|
211
|
+
body: openaiBody,
|
|
212
|
+
}, apiKey);
|
|
213
|
+
|
|
214
|
+
await proxyStreamingResponse(clientRes, upstreamRes);
|
|
215
|
+
} catch (err) {
|
|
216
|
+
clientRes.writeHead(503);
|
|
217
|
+
clientRes.end(JSON.stringify({ error: "LottoBot server is unreachable. Is the proxy connected?" }));
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
async function handleOpenAIChat(req, clientRes, apiKey) {
|
|
222
|
+
const rawBody = await readBody(req);
|
|
223
|
+
|
|
224
|
+
try {
|
|
225
|
+
const upstreamRes = await fetchLottoBot("/v1/chat/completions", {
|
|
226
|
+
method: "POST",
|
|
227
|
+
body: rawBody,
|
|
228
|
+
}, apiKey);
|
|
229
|
+
|
|
230
|
+
// Pass through status and headers, stream body directly
|
|
231
|
+
const statusCode = upstreamRes.statusCode || 200;
|
|
232
|
+
const contentType = upstreamRes.headers["content-type"] || "application/json";
|
|
233
|
+
clientRes.writeHead(statusCode, {
|
|
234
|
+
"Content-Type": contentType,
|
|
235
|
+
"Cache-Control": "no-cache",
|
|
236
|
+
"Access-Control-Allow-Origin": "*",
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
upstreamRes.pipe(clientRes);
|
|
240
|
+
} catch (err) {
|
|
241
|
+
clientRes.writeHead(503);
|
|
242
|
+
clientRes.end(JSON.stringify({ error: "LottoBot server is unreachable." }));
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// ── Startup validation ────────────────────────────────────────────────────────
|
|
247
|
+
|
|
248
|
+
async function validateApiKey(apiKey) {
|
|
249
|
+
try {
|
|
250
|
+
const res = await fetchLottoBot("/health", { method: "GET" }, apiKey);
|
|
251
|
+
// /health doesn't need auth — just check it's reachable
|
|
252
|
+
return res.statusCode < 500;
|
|
253
|
+
} catch {
|
|
254
|
+
return false;
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// ── Main server ───────────────────────────────────────────────────────────────
|
|
259
|
+
|
|
260
|
+
async function main() {
|
|
261
|
+
const { apiKey } = parseArgs();
|
|
262
|
+
|
|
263
|
+
console.log("Connecting to LottoBot server...");
|
|
264
|
+
const reachable = await validateApiKey(apiKey);
|
|
265
|
+
if (!reachable) {
|
|
266
|
+
console.error("Error: Could not reach the LottoBot server (https://lottobot.ai).");
|
|
267
|
+
console.error("Check your internet connection and try again.");
|
|
268
|
+
process.exit(1);
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
const server = http.createServer(async (req, clientRes) => {
|
|
272
|
+
const url = new URL(req.url, `http://localhost:${PROXY_PORT}`);
|
|
273
|
+
const method = req.method.toUpperCase();
|
|
274
|
+
|
|
275
|
+
// CORS preflight
|
|
276
|
+
if (method === "OPTIONS") {
|
|
277
|
+
clientRes.writeHead(204, {
|
|
278
|
+
"Access-Control-Allow-Origin": "*",
|
|
279
|
+
"Access-Control-Allow-Headers": "Authorization, Content-Type",
|
|
280
|
+
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
|
|
281
|
+
});
|
|
282
|
+
clientRes.end();
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// ── Ollama API endpoints ──────────────────────────────────────────────────
|
|
287
|
+
if (method === "GET" && url.pathname === "/") {
|
|
288
|
+
clientRes.writeHead(200, { "Content-Type": "application/json" });
|
|
289
|
+
clientRes.end(ollamaVersionResponse());
|
|
290
|
+
return;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
if (method === "GET" && url.pathname === "/api/tags") {
|
|
294
|
+
clientRes.writeHead(200, { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" });
|
|
295
|
+
clientRes.end(ollamaTagsResponse());
|
|
296
|
+
return;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
if (method === "POST" && url.pathname === "/api/chat") {
|
|
300
|
+
await handleOllamaChat(req, clientRes, apiKey);
|
|
301
|
+
return;
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
if (method === "POST" && url.pathname === "/api/generate") {
|
|
305
|
+
// Route generate through chat completions (single-turn)
|
|
306
|
+
await handleOllamaChat(req, clientRes, apiKey);
|
|
307
|
+
return;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// ── OpenAI-compatible API endpoints ──────────────────────────────────────
|
|
311
|
+
if (method === "GET" && url.pathname === "/v1/models") {
|
|
312
|
+
clientRes.writeHead(200, { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" });
|
|
313
|
+
clientRes.end(openaiModelsResponse());
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
if (method === "POST" && url.pathname === "/v1/chat/completions") {
|
|
318
|
+
await handleOpenAIChat(req, clientRes, apiKey);
|
|
319
|
+
return;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// ── 404 for anything else ─────────────────────────────────────────────────
|
|
323
|
+
clientRes.writeHead(404, { "Content-Type": "application/json" });
|
|
324
|
+
clientRes.end(JSON.stringify({ error: "Not found" }));
|
|
325
|
+
});
|
|
326
|
+
|
|
327
|
+
// Bind to 0.0.0.0 so Windows Firewall can prompt once for local access
|
|
328
|
+
server.listen(PROXY_PORT, "0.0.0.0", () => {
|
|
329
|
+
console.log("");
|
|
330
|
+
console.log(` LottoBot proxy running on http://localhost:${PROXY_PORT}`);
|
|
331
|
+
console.log(` Model: ${MODEL_NAME} | Connected to: ${LOTTOBOT_API_BASE}`);
|
|
332
|
+
console.log("");
|
|
333
|
+
console.log(" Open your AI chat app and connect to http://localhost:11434");
|
|
334
|
+
console.log(" Compatible with: Open WebUI, AnythingLLM, Continue.dev, LM Studio, and more");
|
|
335
|
+
console.log("");
|
|
336
|
+
console.log(" Press Ctrl+C to stop.");
|
|
337
|
+
console.log("");
|
|
338
|
+
});
|
|
339
|
+
|
|
340
|
+
// Handle Ctrl+C gracefully on all platforms
|
|
341
|
+
process.on("SIGINT", () => {
|
|
342
|
+
console.log("\nLottoBot proxy stopped.");
|
|
343
|
+
server.close(() => process.exit(0));
|
|
344
|
+
});
|
|
345
|
+
|
|
346
|
+
process.on("SIGTERM", () => {
|
|
347
|
+
server.close(() => process.exit(0));
|
|
348
|
+
});
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
main().catch((err) => {
|
|
352
|
+
console.error("Fatal error:", err.message);
|
|
353
|
+
process.exit(1);
|
|
354
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "lottobot-server",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "LottoBot local proxy — connect your AI chat app to LottoBot predictions",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"bin": {
|
|
8
|
+
"lottobot-server": "index.js"
|
|
9
|
+
},
|
|
10
|
+
"engines": {
|
|
11
|
+
"node": ">=18"
|
|
12
|
+
},
|
|
13
|
+
"keywords": [
|
|
14
|
+
"lottobot",
|
|
15
|
+
"lotto",
|
|
16
|
+
"ollama",
|
|
17
|
+
"proxy",
|
|
18
|
+
"ai"
|
|
19
|
+
],
|
|
20
|
+
"license": "MIT",
|
|
21
|
+
"files": [
|
|
22
|
+
"index.js",
|
|
23
|
+
"README.md"
|
|
24
|
+
]
|
|
25
|
+
}
|