@moonwatch/js 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,206 @@
1
+ # Moonwatch SDK
2
+
3
+ A lightweight logging SDK that sends structured logs to a Moonwatch backend via HTTP or WebSocket, while echoing to the local console.
4
+
5
+ ## Quick Start
6
+
7
+ ```ts
8
+ import { createLogger } from '@moonwatch/js';
9
+
10
+ const logger = createLogger({
11
+ logId: 'your-log-file-id',
12
+ apiKey: 'your-api-key',
13
+ });
14
+
15
+ logger.info("server started", { port: 3000 });
16
+ logger.warn("slow query", { duration: 1200, query: "SELECT ..." });
17
+ logger.error("payment failed", { orderId: "abc-123" });
18
+ ```
19
+
20
+ ## Configuration
21
+
22
+ ```ts
23
+ const logger = createLogger({
24
+ // Required
25
+ logId: 'uuid', // Log file ID from the dashboard
26
+ apiKey: 'your-api-key', // API key for ingestion auth
27
+
28
+ // Optional
29
+ group: 'api', // Default group for all entries
30
+ traceId: 'req-123', // Default trace ID
31
+ silent: false, // Set true to suppress console output (default: false)
32
+ level: 'DEBUG', // Only send logs at this level and above (default: 'DEBUG')
33
+ onError: (err, logs) => { // Custom error handler for flush failures
34
+ // handle error
35
+ },
36
+ });
37
+ ```
38
+
39
+ ### `level` option
40
+
41
+ Controls which logs are sent to the remote service. Defaults to `'DEBUG'` (send everything). Logs below this level are silently dropped — they won't be sent remotely or printed to the console.
42
+
43
+ This is useful when you want full remote logging in production but don't need to send every debug log during local development where you're already watching the console:
44
+
45
+ ```ts
46
+ const logger = createLogger({
47
+ logId: 'your-log-file-id',
48
+ apiKey: 'your-api-key',
49
+ level: process.env.NODE_ENV === 'production' ? 'DEBUG' : 'ERROR',
50
+ });
51
+ ```
52
+
53
+ In this setup, local dev only sends errors remotely (you're already seeing everything in your terminal), while production captures the full picture for dashboard viewing and LLM analysis.
54
+
55
+ ### `silent` option
56
+
57
+ By default, every `logger.*` call also prints to the local console so developers don't lose visibility when replacing `console.log` with `logger.info`. Set `silent: true` in production to disable this.
58
+
59
+ ## Log Methods
60
+
61
+ Every level supports two call styles:
62
+
63
+ ### Simple form — message + optional metadata
64
+
65
+ ```ts
66
+ logger.debug("cache miss", { key: "user:42" });
67
+ logger.info("request handled", { method: "GET", path: "/api/users", ms: 12 });
68
+ logger.warn("rate limit approaching", { current: 95, max: 100 });
69
+ logger.error("query failed", { table: "orders" });
70
+ logger.fatal("database unreachable");
71
+ ```
72
+
73
+ The second argument is a plain metadata object. It can contain nested objects and arrays — the SDK sends it as-is.
74
+
75
+ ### Object form — for per-call group or trace ID
76
+
77
+ ```ts
78
+ logger.info({
79
+ message: "user signed in",
80
+ group: "auth",
81
+ traceId: "req-abc",
82
+ metadata: { userId: 123, method: "oauth" },
83
+ });
84
+ ```
85
+
86
+ Use this when you need to override `group` or `traceId` on a single call without changing the logger's defaults.
87
+
88
+ ## Error Logging
89
+
90
+ `error()` and `fatal()` accept Error objects directly — the SDK extracts the message, stack trace, and error type automatically:
91
+
92
+ ```ts
93
+ try {
94
+ await processPayment(order);
95
+ } catch (err) {
96
+ logger.error(err as Error, { orderId: order.id });
97
+ }
98
+ ```
99
+
100
+ This populates the `stack` and `error_type` columns in ClickHouse so stack traces and exception class names are stored as structured fields, not mashed into the message.
101
+
102
+ ## Scoped Loggers
103
+
104
+ ### Group Logger
105
+
106
+ ```ts
107
+ const authLogger = logger.withGroup("auth");
108
+ authLogger.info("login attempt", { email: "user@example.com" });
109
+ // → group: "auth"
110
+ ```
111
+
112
+ ### Trace Logger
113
+
114
+ ```ts
115
+ const reqLogger = logger.withTraceId("req-abc-123");
116
+ reqLogger.info("handling request");
117
+ reqLogger.warn("slow downstream call", { service: "billing", ms: 800 });
118
+ // → trace_id: "req-abc-123" on both entries
119
+ ```
120
+
121
+ ### Mutating defaults
122
+
123
+ ```ts
124
+ // Set for all subsequent calls
125
+ logger.setGroup("worker");
126
+ logger.setTraceId("job-456");
127
+
128
+ // Clear
129
+ logger.setGroup(undefined);
130
+ logger.setTraceId(undefined);
131
+ ```
132
+
133
+ ## Console Interception
134
+
135
+ Capture existing `console.*` calls without changing application code:
136
+
137
+ ```ts
138
+ logger.interceptConsole(); // uses group "console"
139
+ logger.interceptConsole("app"); // uses custom group
140
+
141
+ // Later, to restore original console behavior:
142
+ logger.restoreConsole();
143
+ ```
144
+
145
+ This wraps `console.debug`, `console.log`, `console.info`, and `console.warn`. Errors are captured via `window.addEventListener('error')` and `unhandledrejection` listeners instead of wrapping `console.error`, to avoid polluting stack traces in frameworks like Next.js/React.
146
+
147
+ ## Source Map Support (Vite)
148
+
149
+ Automatically resolve minified production stack traces back to original source. The Vite plugin uploads source maps at build time, and the ingestion service resolves stack traces before storing them.
150
+
151
+ ```ts
152
+ // vite.config.ts
153
+ import moonwatchPlugin from '@moonwatch/js/vite';
154
+
155
+ export default defineConfig({
156
+ plugins: [
157
+ moonwatchPlugin({
158
+ apiKey: process.env.MOONWATCH_API_KEY!,
159
+ logFileId: 'your-log-file-id',
160
+ }),
161
+ ],
162
+ build: { sourcemap: true },
163
+ });
164
+ ```
165
+
166
+ The plugin:
167
+ - **Skips in dev mode** — stacks are already readable
168
+ - **Generates a release UUID** per build, injected as `__MOONWATCH_RELEASE__`
169
+ - **Uploads `.map` files** from the build output to the ingestion service on `closeBundle`
170
+ - **Multi-bundle (Electron):** All plugin instances in the same Node process share one release ID, so main + renderer maps are grouped under the same release
171
+
172
+ No SDK configuration needed — the SDK automatically detects `__MOONWATCH_RELEASE__` and attaches it to every log entry.
173
+
174
+ ## AI Assistant Integration
175
+
176
+ Run the init command to configure your AI coding assistant (Claude Code, Cursor, etc.) to use the SDK:
177
+
178
+ ```bash
179
+ npx @moonwatch/js init
180
+ ```
181
+
182
+ This adds a reference to the SDK's workflow guide in your `CLAUDE.md` and `.cursorrules`. Once configured, your AI assistant will know how to place logs using the SDK and query them via MCP — no copy-pasting logs needed.
183
+
184
+ To set up manually, add this line to your `CLAUDE.md`:
185
+
186
+ ```md
187
+ > See node_modules/@moonwatch/js/SKILL.md for the @moonwatch/js logging workflow.
188
+ ```
189
+
190
+ ## MCP Server (AI Log Analysis)
191
+
192
+ Moonwatch includes an MCP server that lets AI assistants query your logs directly.
193
+
194
+ ### Add
195
+
196
+ ```bash
197
+ claude mcp add --transport http moonwatch https://moonwatch.dev/mcp --header "Authorization: Bearer YOUR_API_KEY"
198
+ ```
199
+
200
+ ### Remove
201
+
202
+ ```bash
203
+ claude mcp remove moonwatch
204
+ ```
205
+
206
+ > **Note:** The `--transport http` flag is required — without it, `claude` defaults to stdio transport which won't work with a remote server.
package/SKILL.md ADDED
@@ -0,0 +1,320 @@
1
+ # @moonwatch/js — AI Workflow Guide
2
+
3
+ This project uses Moonwatch to give you persistent runtime insight. You can place log statements freely — they are sent to a central server and you can query them via MCP at any time. This means you don't need to ask the user to copy-paste logs. You place them, the user reproduces the issue, and you query the results yourself.
4
+
5
+ ## The Workflow
6
+
7
+ 1. **Create a watcher** via MCP (`watchers_create`) describing what you're investigating — you get back a UUID
8
+ 2. **Place logs liberally** using `logger.debug()` with the watcher ID attached — instrument every code path that could be relevant
9
+ 3. **Ask the user to reproduce** the issue (or wait for the next occurrence)
10
+ 4. **Query the logs via MCP** using `logs_query` with `WHERE watcher_id = '<uuid>'` — you see exactly the logs you placed, no noise
11
+ 5. **Record findings** via `watchers_update` — accumulate your analysis so you don't lose context across conversations
12
+ 6. **Iterate** — add more logs, query again, update findings
13
+ 7. **Resolve** — once fixed, mark the watcher as resolved via `watchers_update` with `status: "resolved"`
14
+
15
+ You should default to placing logs early and liberally when investigating runtime issues. Don't spend multiple rounds guessing from static code alone — instrument the code, get the data, then fix the bug. Logs persist across runs, so once placed, they keep providing insight. Use `logger.debug()` freely — the watcher ID lets you filter to only your logs, so volume isn't a concern.
16
+
17
+ **Be generous with logging.** When investigating an issue, don't just log the one line you suspect — log the inputs, the intermediate state, the branch taken, the output. Five `logger.debug()` calls is better than one if it saves a round-trip of "add more logs, reproduce again."
18
+
19
+ **Cleanup rule:** Use `logger.debug()` for temporary diagnostic logs placed during investigation. Once the issue is resolved, remove all debug-level logs you added. Permanent logs that provide ongoing operational value (errors, warnings, key events) should use the appropriate level and can stay.
20
+
21
+ ## Watchers
22
+
23
+ Watchers are your primary tool for structured investigations. Every time you start debugging a runtime issue, **create a watcher first**. This gives you a UUID that tags all your diagnostic logs, so you can query them in isolation.
24
+
25
+ ### Creating and Using a Watcher
26
+
27
+ ```
28
+ 1. watchers_create({ description: "Investigating why WebSocket reconnects fail after 5 attempts" })
29
+ → returns UUID: "a1b2c3d4-..."
30
+
31
+ 2. Place logs in code with that watcher ID:
32
+ ```
33
+
34
+ ```ts
35
+ // Scoped logger — all logs from this get the watcher ID automatically
36
+ const dbg = logger.withWatcher("a1b2c3d4-...");
37
+ dbg.debug("ws reconnect attempt", { attempt: this.wsReconnectAttempts, delay });
38
+ dbg.debug("ws state before connect", { state: this.wsState, hasSocket: !!this.ws });
39
+ dbg.debug("ws onclose fired", { code: event.code, reason: event.reason });
40
+
41
+ // Or inline for one-off logs
42
+ logger.debug({ message: "auth header value", watcherId: "a1b2c3d4-...", metadata: { header } });
43
+ ```
44
+
45
+ ```
46
+ 3. User reproduces → query YOUR logs only:
47
+ logs_query({ query: "SELECT timestamp, message, metadata FROM logs.entries WHERE watcher_id = 'a1b2c3d4-...' ORDER BY timestamp" })
48
+
49
+ 4. Record what you learned AND report to the user:
50
+ watchers_update({ id: "a1b2c3d4-...", findings: "The reconnect fails because wsState is still 'connecting' when onclose fires — race condition between..." })
51
+ Always tell the user what you found — the watcher records findings for future sessions, but the user needs to hear it now.
52
+
53
+ 5. Fix the bug, verify, then resolve:
54
+ watchers_update({ id: "a1b2c3d4-...", status: "resolved", findings: "Fixed by checking wsState in onclose handler. Verified reconnect works after 5 attempts." })
55
+ ```
56
+
57
+ ### Watcher MCP Tools
58
+
59
+ - **`watchers_create`** — Start an investigation. Always include a clear description.
60
+ - **`watchers_list`** — See all your watchers and which have new data.
61
+ - **`watchers_get`** — Read full details + findings. Resets the "new data" flag.
62
+ - **`watchers_update`** — Record findings, change status, update description.
63
+
64
+ ### When to Create a Watcher
65
+
66
+ - **Always** when investigating a runtime bug — even if you think it'll be quick
67
+ - When monitoring a fix to verify it works in production
68
+ - When you need to understand the flow through unfamiliar code at runtime
69
+ - When correlating behavior across multiple components
70
+
71
+ ### Logging Generously with Watchers
72
+
73
+ Because watcher IDs let you filter to only your logs, don't be stingy. For a typical investigation, you might place 5-15 `logger.debug()` calls across the relevant code path:
74
+
75
+ ```ts
76
+ const dbg = logger.withWatcher("a1b2c3d4-...");
77
+
78
+ // Log inputs
79
+ dbg.debug("handleRequest called", { method, path, headers: Object.keys(headers) });
80
+
81
+ // Log branch decisions
82
+ dbg.debug("auth check result", { authenticated, userId, tokenExpiry });
83
+
84
+ // Log intermediate state
85
+ dbg.debug("query built", { sql: query.substring(0, 200), paramCount: params.length });
86
+
87
+ // Log the thing you actually suspect
88
+ dbg.debug("response timing", { dbMs, renderMs, totalMs });
89
+
90
+ // Log the output
91
+ dbg.debug("response sent", { status, bodyLength: body.length });
92
+ ```
93
+
94
+ This is much more effective than placing one log and having to do multiple rounds of "add more logs, reproduce again."
95
+
96
+ ## Example: User Asks You to Analyze Logs
97
+
98
+ When the user asks you to look into a problem visible in logs (e.g. "why are we getting 500s on the billing endpoint?"), follow this pattern:
99
+
100
+ 1. **Query the logs first** — use `logs_query` to understand the problem
101
+ 2. **Report your findings** to the user
102
+ 3. **Check for existing watchers** — call `watchers_list` to see if there's already a watcher tracking this issue
103
+ 4. **If a related watcher exists** — read it with `watchers_get`, append your new findings via `watchers_update`
104
+ 5. **If no watcher exists** — offer to create one:
105
+
106
+ > "I found the issue — the billing endpoint is throwing a null reference when `subscription.plan` is missing. I can set up a watcher to track this. It'll place debug logs around the billing flow so we automatically capture the full context next time it happens. Want me to do that?"
107
+
108
+ If the user agrees:
109
+ - `watchers_create({ description: "Tracking null ref in billing endpoint when subscription.plan is missing", logFileId: "..." })`
110
+ - Place `logger.debug()` calls with the watcher ID around the relevant code paths
111
+ - The cron job will flag new matching logs automatically, so next time it happens you'll have full context
112
+
113
+ **This matters because investigations span multiple conversations.** A watcher persists your findings and keeps collecting data between sessions. Without one, the next conversation starts from scratch. With one, you (or a future Claude) can call `watchers_list`, see there's an active watcher with findings, and pick up where you left off.
114
+
115
+ ### The full flow looks like:
116
+
117
+ ```
118
+ User: "We're seeing intermittent 500s on POST /api/orders"
119
+
120
+ You:
121
+ 1. logs_query → find the errors, identify the pattern
122
+ 2. Report: "These are timeout errors when the payment gateway takes >10s"
123
+ 3. watchers_list → no existing watcher for this
124
+ 4. Offer: "Want me to set up a watcher? I'll instrument the order flow
125
+ so we capture timing data automatically next time."
126
+
127
+ User: "Yes"
128
+
129
+ You:
130
+ 5. watchers_create({ description: "Intermittent 500s on POST /api/orders — payment gateway timeouts >10s" })
131
+ 6. Place debug logs:
132
+ - Before the gateway call (log request params, timestamp)
133
+ - After the gateway call (log response time, status)
134
+ - In the error handler (log the full error context)
135
+ - At the retry logic (log attempt count, backoff delay)
136
+ 7. watchers_update({ findings: "Initial analysis: gateway timeouts when response >10s.
137
+ Placed debug logs in orderService.createOrder(), paymentGateway.charge(),
138
+ and the retry handler. Will have full timing data on next occurrence." })
139
+ 8. Report back to user with what you placed and what to expect
140
+
141
+ --- later, in a new conversation ---
142
+
143
+ User: "The 500s happened again"
144
+
145
+ You:
146
+ 1. watchers_list → see the active watcher with new_data = true
147
+ 2. watchers_get → read previous findings
148
+ 3. logs_query WHERE watcher_id = '...' → see the detailed debug logs
149
+ 4. Now you have full context without starting over
150
+ ```
151
+
152
+ ## When to Place Logs
153
+
154
+ - Debugging a runtime bug that isn't obvious from code alone
155
+ - Investigating timing issues, race conditions, or intermittent failures
156
+ - Understanding the flow through async code, event handlers, or callbacks
157
+ - Correlating behavior across multiple services or processes (use `traceId`)
158
+ - Monitoring a specific code path after a fix to verify it works
159
+
160
+ ## Best Practices
161
+
162
+ **Use `withGroup()` to categorize logs — don't embed context in the message.**
163
+
164
+ The `group` field is a queryable column in ClickHouse. When you prefix messages manually, you lose the ability to filter and aggregate by subsystem. Groups should reflect the domain/feature area using slash-separated paths, like a file system:
165
+
166
+ ```ts
167
+ // Bad — context is buried in the message string, not queryable
168
+ logger.info("[StorageService] Getting user");
169
+
170
+ // Good — group is a structured, queryable field
171
+ const log = logger.withGroup("storage");
172
+ log.info("Getting user");
173
+
174
+ // Also good — inline object form for one-off logs
175
+ logger.info({ message: "Getting user", group: "storage" });
176
+ ```
177
+
178
+ **Use hierarchical slash-separated groups** so you can filter at any level of granularity:
179
+
180
+ ```ts
181
+ // Specific subsystem groups — lets you zoom in or out when querying
182
+ logger.withGroup("api/billing").debug("charge initiated", { amount, customerId });
183
+ logger.withGroup("api/billing/webhooks").debug("stripe event received", { type, eventId });
184
+ logger.withGroup("api/orders").debug("order created", { orderId, items: items.length });
185
+ logger.withGroup("api/orders/autodispatch").debug("driver assignment", { orderId, driverId, distance });
186
+ logger.withGroup("ws/connections").debug("client connected", { clientId, protocol });
187
+ logger.withGroup("auth/oauth").debug("token refresh", { userId, expiresIn });
188
+ logger.withGroup("cron/cleanup").debug("retention sweep", { partitionsScanned, deleted });
189
+ ```
190
+
191
+ This lets you query at different levels:
192
+ ```sql
193
+ -- Everything in the billing subsystem
194
+ WHERE group LIKE 'api/billing%'
195
+
196
+ -- Just webhook processing
197
+ WHERE group = 'api/billing/webhooks'
198
+
199
+ -- All API logs
200
+ WHERE group LIKE 'api/%'
201
+
202
+ -- Log volume by subsystem
203
+ SELECT group, count(*) FROM logs.entries GROUP BY group ORDER BY count() DESC
204
+ ```
205
+
206
+ When placing logs across multiple files or modules during an investigation, create a scoped logger per module so logs are naturally organized. Combine `withGroup()` and `withWatcher()` for maximum queryability — the group tells you *where* in the system, the watcher tells you *which investigation*.
207
+
208
+ ```ts
209
+ const dbg = logger.withWatcher("a1b2c3d4-...").withGroup("api/orders/autodispatch");
210
+ dbg.debug("dispatch started", { orderId, availableDrivers: drivers.length });
211
+ dbg.debug("driver scored", { driverId, score, distance, eta });
212
+ dbg.debug("driver assigned", { orderId, driverId, dispatchMs: Date.now() - start });
213
+ ```
214
+
215
+ Use `withTraceId()` to correlate logs across a single request or operation, especially across services.
216
+
217
+ ## SDK Quick Reference
218
+
219
+ ### Setup (already done if the project uses this SDK)
220
+
221
+ **Important:** Look for an existing logger instance in the project before creating a new one. Projects typically have a shared logger in a file like `logger.ts` or `lib/logger.ts` — import from there instead of calling `createLogger()` again.
222
+
223
+ ```ts
224
+ import { createLogger } from '@moonwatch/js';
225
+
226
+ const logger = createLogger({
227
+ logId: 'uuid-from-dashboard', // required — identifies the log stream
228
+ apiKey: 'rl_...', // required — ingestion auth
229
+ group: 'api', // optional: default group for all entries
230
+ traceId: 'req-123', // optional: default trace ID
231
+ level: 'DEBUG', // optional: minimum level (default: 'DEBUG')
232
+ silent: false, // optional: suppress console echo (default: false)
233
+ });
234
+ ```
235
+
236
+ ### Logging
237
+
238
+ ```ts
239
+ logger.debug("cache hit", { key: "user:42" });
240
+ logger.info("request handled", { method: "GET", path: "/api/users", ms: 12 });
241
+ logger.warn("slow query", { duration: 1200 });
242
+ logger.error(err as Error, { orderId: order.id }); // Error objects extract stack + error_type
243
+ logger.fatal(new Error("database unreachable"));
244
+ ```
245
+
246
+ ### Console Interception
247
+
248
+ ```ts
249
+ logger.interceptConsole(); // captures console.debug/log/info/warn + unhandled errors
250
+ logger.interceptConsole("app"); // with a custom group name
251
+ logger.restoreConsole(); // undo
252
+ ```
253
+
254
+ Use this to capture logs from third-party code or existing console.log statements without modifying them.
255
+
256
+ ## Reading Logs via MCP
257
+
258
+ You have two MCP tools available:
259
+
260
+ - **`logs_list_log_files`** — Lists all log files with data. Call this first to find the log file ID.
261
+ - **`logs_query`** — Execute SQL (ClickHouse) against a log file. Tenant/log filtering is automatic.
262
+
263
+ ### Common Queries
264
+
265
+ ```sql
266
+ -- Recent errors
267
+ SELECT timestamp, level, message, stack FROM logs.entries WHERE level = 'ERROR' ORDER BY timestamp DESC LIMIT 20
268
+
269
+ -- Errors by type
270
+ SELECT error_type, count(*) as cnt FROM logs.entries WHERE error_type IS NOT NULL GROUP BY error_type ORDER BY cnt DESC
271
+
272
+ -- Search for a specific message pattern
273
+ SELECT timestamp, level, group, message FROM logs.entries WHERE message LIKE '%ECONNREFUSED%' ORDER BY timestamp DESC LIMIT 20
274
+
275
+ -- Error rate over time
276
+ SELECT toStartOfMinute(timestamp) as minute, count(*) FROM logs.entries WHERE level = 'ERROR' GROUP BY minute ORDER BY minute
277
+
278
+ -- Logs by group
279
+ SELECT group, count(*) FROM logs.entries GROUP BY group ORDER BY count() DESC
280
+
281
+ -- Trace a specific request across services
282
+ SELECT timestamp, group, level, message FROM logs.entries WHERE trace_id = 'req-abc-123' ORDER BY timestamp
283
+
284
+ -- Recent logs from a specific group
285
+ SELECT timestamp, level, message FROM logs.entries WHERE group = 'auth' ORDER BY timestamp DESC LIMIT 50
286
+
287
+ -- Error with source context (see the original code around the error)
288
+ SELECT timestamp, message, stack, source_context FROM logs.entries WHERE level = 'ERROR' AND source_context IS NOT NULL ORDER BY timestamp DESC LIMIT 5
289
+
290
+ -- All logs for a watcher investigation
291
+ SELECT timestamp, message, metadata FROM logs.entries WHERE watcher_id = 'a1b2c3d4-...' ORDER BY timestamp
292
+
293
+ -- Watcher logs filtered by time window
294
+ SELECT timestamp, message, metadata FROM logs.entries WHERE watcher_id = 'a1b2c3d4-...' AND timestamp >= '2026-02-16 10:00:00' ORDER BY timestamp
295
+ ```
296
+
297
+ **Tip:** When investigating production errors, include `source_context` in your SELECT. It contains the original source code around the error line (resolved from source maps at ingestion time), so you can see exactly what code triggered the error without needing to open the file.
298
+
299
+ ### Table Schema
300
+
301
+ ```
302
+ seq UInt64 -- sequence number
303
+ timestamp DateTime64(3) -- e.g., '2025-01-22 14:30:00.123'
304
+ level String -- DEBUG, INFO, WARN, ERROR, FATAL
305
+ trace_id String -- request correlation
306
+ group String -- categorization (e.g., 'api/billing', 'main', 'renderer')
307
+ message String -- log content
308
+ stack Nullable(String) -- stack trace (resolved via source maps if available)
309
+ error_type String -- exception class name
310
+ release_id String -- build release ID (for source map resolution)
311
+ source_context String -- JSON: { file, line, code: { "lineNum": "source line" } } — original source around the error
312
+ watcher_id String -- watcher UUID (empty string if not tagged)
313
+ metadata JSON -- arbitrary structured data
314
+ ```
315
+
316
+ ## Do NOT
317
+
318
+ - Do not pass `batchSize`, `flushInterval`, `httpEndpoint`, `wsEndpoint`, or `transport` — these do not exist.
319
+ - Do not call `errorWithStack()` — it does not exist. Use `logger.error(err)` instead.
320
+ - Do not call `logger.shutdown()` unless explicitly tearing down the logger mid-process. It is not required.
@@ -0,0 +1,11 @@
1
+ var __typeError = (msg) => {
2
+ throw TypeError(msg);
3
+ };
4
+ var __accessCheck = (obj, member, msg) => member.has(obj) || __typeError("Cannot " + msg);
5
+ var __privateAdd = (obj, member, value) => member.has(obj) ? __typeError("Cannot add the same private member more than once") : member instanceof WeakSet ? member.add(obj) : member.set(obj, value);
6
+ var __privateMethod = (obj, member, method) => (__accessCheck(obj, member, "access private method"), method);
7
+
8
+ export {
9
+ __privateAdd,
10
+ __privateMethod
11
+ };
@@ -0,0 +1,132 @@
1
+ type LogLevel = 'DEBUG' | 'INFO' | 'WARN' | 'ERROR' | 'FATAL';
2
+ interface LoggerConfig {
3
+ logId: string;
4
+ apiKey?: string;
5
+ group?: string;
6
+ /** Only send logs at this level and above to the remote service (default: 'DEBUG' — sends everything) */
7
+ level?: LogLevel;
8
+ onError?: (error: Error, logs: InternalLogEntry[]) => void;
9
+ traceId?: string;
10
+ /** When true, skip console output from logger.* methods; default false */
11
+ silent?: boolean;
12
+ /** When true, log SDK internal operations to console for troubleshooting */
13
+ debug?: boolean;
14
+ /** @internal Override base URL for development (e.g. 'http://localhost:3500') */
15
+ _endpoint?: string;
16
+ /** @internal Override WebSocket base URL for development (e.g. 'ws://localhost:3501') */
17
+ _wsEndpoint?: string;
18
+ /** @internal Full URL for log ingestion proxy (e.g. '/api/internal/ingest'). When set, uses this as the POST URL directly (no /ingest/http appended), skips WebSocket. */
19
+ _ingestUrl?: string;
20
+ }
21
+ interface InternalLogEntry {
22
+ timestamp: string;
23
+ level: LogLevel;
24
+ message: string;
25
+ logId: string;
26
+ group?: string;
27
+ trace_id?: string;
28
+ stack?: string;
29
+ error_type?: string;
30
+ release_id?: string;
31
+ watcher_id?: string;
32
+ metadata?: Record<string, unknown>;
33
+ }
34
+ /** Object form for logger.info({ message, group?, metadata? }) */
35
+ interface LogEntryObject {
36
+ message: string;
37
+ group?: string;
38
+ traceId?: string;
39
+ watcherId?: string;
40
+ metadata?: Record<string, unknown>;
41
+ }
42
+ type ConnectionState = 'disconnected' | 'connecting' | 'connected' | 'failed';
43
+ declare class Logger {
44
+ private config;
45
+ private baseUrl;
46
+ private httpIngestUrl;
47
+ private wsIngestUrl;
48
+ private proxyMode;
49
+ private buffer;
50
+ private flushTimer;
51
+ private flushPromise;
52
+ private consoleIntercepted;
53
+ private httpBackoffMs;
54
+ private maxBackoffMs;
55
+ private consecutiveHttpFailures;
56
+ private _onError;
57
+ private _onUnhandledRejection;
58
+ private ws;
59
+ private wsState;
60
+ private wsReconnectTimer;
61
+ private wsReconnectAttempts;
62
+ private maxWsReconnectAttempts;
63
+ private _onVisibilityChange;
64
+ private _onBeforeUnload;
65
+ constructor(config: LoggerConfig);
66
+ private debugLog;
67
+ private connectWebSocket;
68
+ private startFlushTimer;
69
+ private shouldLog;
70
+ private formatArgs;
71
+ private static readonly CONSOLE_METHOD;
72
+ private log;
73
+ private parseArgs;
74
+ debug(entry: LogEntryObject): void;
75
+ debug(message: string, metadata?: Record<string, unknown>): void;
76
+ info(entry: LogEntryObject): void;
77
+ info(message: string, metadata?: Record<string, unknown>): void;
78
+ warn(entry: LogEntryObject): void;
79
+ warn(message: string, metadata?: Record<string, unknown>): void;
80
+ error(entry: LogEntryObject): void;
81
+ error(message: string, metadata?: Record<string, unknown>): void;
82
+ error(err: Error, metadata?: Record<string, unknown>): void;
83
+ fatal(entry: LogEntryObject): void;
84
+ fatal(message: string, metadata?: Record<string, unknown>): void;
85
+ fatal(err: Error, metadata?: Record<string, unknown>): void;
86
+ withTraceId(traceId: string): ScopedLogger;
87
+ withGroup(group: string): ScopedLogger;
88
+ withWatcher(watcherId: string): ScopedLogger;
89
+ setTraceId(traceId: string | undefined): void;
90
+ setGroup(group: string | undefined): void;
91
+ interceptConsole(group?: string, options?: {
92
+ wrapErrors?: boolean;
93
+ }): void;
94
+ restoreConsole(): void;
95
+ flush(): Promise<void>;
96
+ private doFlush;
97
+ private sendViaWebSocket;
98
+ private sendViaHttp;
99
+ /** Fire-and-forget flush using fetch with keepalive (works during page unload) */
100
+ private flushSync;
101
+ private registerLifecycleHooks;
102
+ /** Check server connectivity and API key validity */
103
+ ping(): Promise<{
104
+ ok: boolean;
105
+ latencyMs: number;
106
+ error?: string;
107
+ }>;
108
+ /** Get current connection status */
109
+ getConnectionStatus(): {
110
+ transport: 'websocket' | 'http';
111
+ state: ConnectionState;
112
+ };
113
+ }
114
+ declare class ScopedLogger {
115
+ private parent;
116
+ private group?;
117
+ private traceId?;
118
+ private watcherId?;
119
+ constructor(parent: Logger, group?: string | undefined, traceId?: string | undefined, watcherId?: string | undefined);
120
+ withGroup(group: string): ScopedLogger;
121
+ withTraceId(traceId: string): ScopedLogger;
122
+ withWatcher(watcherId: string): ScopedLogger;
123
+ private opts;
124
+ debug(message: string, metadata?: Record<string, unknown>): void;
125
+ info(message: string, metadata?: Record<string, unknown>): void;
126
+ warn(message: string, metadata?: Record<string, unknown>): void;
127
+ error(message: string, metadata?: Record<string, unknown>): void;
128
+ fatal(message: string, metadata?: Record<string, unknown>): void;
129
+ }
130
+ declare function createLogger(config: LoggerConfig): Logger;
131
+
132
+ export { type InternalLogEntry, type LogEntryObject, type LogLevel, Logger, type LoggerConfig, createLogger, createLogger as default };