@ironbee-ai/cli 0.6.2 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/README.md +167 -39
- package/dist/analysis/code-changes.js.map +1 -1
- package/dist/analysis/cross-session.js.map +1 -1
- package/dist/analysis/fix-effectiveness.js.map +1 -1
- package/dist/analysis/time-analysis.js.map +1 -1
- package/dist/analysis/verdict-details.js.map +1 -1
- package/dist/analysis/verification-quality.js.map +1 -1
- package/dist/analytics/classifier.d.ts +99 -0
- package/dist/analytics/classifier.d.ts.map +1 -0
- package/dist/analytics/classifier.js +380 -0
- package/dist/analytics/classifier.js.map +1 -0
- package/dist/analytics/emit.d.ts +67 -0
- package/dist/analytics/emit.d.ts.map +1 -0
- package/dist/analytics/emit.js +901 -0
- package/dist/analytics/emit.js.map +1 -0
- package/dist/analytics/errors.d.ts +33 -0
- package/dist/analytics/errors.d.ts.map +1 -0
- package/dist/analytics/errors.js +93 -0
- package/dist/analytics/errors.js.map +1 -0
- package/dist/analytics/hook-trigger.d.ts +39 -0
- package/dist/analytics/hook-trigger.d.ts.map +1 -0
- package/dist/analytics/hook-trigger.js +127 -0
- package/dist/analytics/hook-trigger.js.map +1 -0
- package/dist/analytics/log.d.ts +44 -0
- package/dist/analytics/log.d.ts.map +1 -0
- package/dist/analytics/log.js +158 -0
- package/dist/analytics/log.js.map +1 -0
- package/dist/analytics/merge.d.ts +40 -0
- package/dist/analytics/merge.d.ts.map +1 -0
- package/dist/analytics/merge.js +527 -0
- package/dist/analytics/merge.js.map +1 -0
- package/dist/analytics/pricing.d.ts +149 -0
- package/dist/analytics/pricing.d.ts.map +1 -0
- package/dist/analytics/pricing.js +179 -0
- package/dist/analytics/pricing.js.map +1 -0
- package/dist/analytics/projection.d.ts +356 -0
- package/dist/analytics/projection.d.ts.map +1 -0
- package/dist/analytics/projection.js +2281 -0
- package/dist/analytics/projection.js.map +1 -0
- package/dist/analytics/spawn.d.ts +28 -0
- package/dist/analytics/spawn.d.ts.map +1 -0
- package/dist/analytics/spawn.js +57 -0
- package/dist/analytics/spawn.js.map +1 -0
- package/dist/analytics/state.d.ts +58 -0
- package/dist/analytics/state.d.ts.map +1 -0
- package/dist/analytics/state.js +329 -0
- package/dist/analytics/state.js.map +1 -0
- package/dist/analytics/transcript.d.ts +150 -0
- package/dist/analytics/transcript.d.ts.map +1 -0
- package/dist/analytics/transcript.js +276 -0
- package/dist/analytics/transcript.js.map +1 -0
- package/dist/analytics/types.d.ts +875 -0
- package/dist/analytics/types.d.ts.map +1 -0
- package/dist/analytics/types.js +31 -0
- package/dist/analytics/types.js.map +1 -0
- package/dist/clients/base.d.ts +21 -2
- package/dist/clients/base.d.ts.map +1 -1
- package/dist/clients/claude/commands/ironbee-verify.md +15 -7
- package/dist/clients/claude/fragments/command-verify.node.md +33 -0
- package/dist/clients/claude/fragments/rule.node.md +29 -0
- package/dist/clients/claude/fragments/skill.node.md +77 -0
- package/dist/clients/claude/hooks/activity-end.d.ts +13 -0
- package/dist/clients/claude/hooks/activity-end.d.ts.map +1 -0
- package/dist/clients/claude/hooks/activity-end.js +42 -0
- package/dist/clients/claude/hooks/activity-end.js.map +1 -0
- package/dist/clients/claude/hooks/require-verdict.d.ts +3 -2
- package/dist/clients/claude/hooks/require-verdict.d.ts.map +1 -1
- package/dist/clients/claude/hooks/require-verdict.js +6 -5
- package/dist/clients/claude/hooks/require-verdict.js.map +1 -1
- package/dist/clients/claude/hooks/require-verification.d.ts +7 -4
- package/dist/clients/claude/hooks/require-verification.d.ts.map +1 -1
- package/dist/clients/claude/hooks/require-verification.js +44 -22
- package/dist/clients/claude/hooks/require-verification.js.map +1 -1
- package/dist/clients/claude/hooks/session-end.d.ts.map +1 -1
- package/dist/clients/claude/hooks/session-end.js +17 -2
- package/dist/clients/claude/hooks/session-end.js.map +1 -1
- package/dist/clients/claude/hooks/session-start.d.ts.map +1 -1
- package/dist/clients/claude/hooks/session-start.js +2 -1
- package/dist/clients/claude/hooks/session-start.js.map +1 -1
- package/dist/clients/claude/hooks/track-action-monitor.d.ts +27 -0
- package/dist/clients/claude/hooks/track-action-monitor.d.ts.map +1 -0
- package/dist/clients/claude/hooks/track-action-monitor.js +126 -0
- package/dist/clients/claude/hooks/track-action-monitor.js.map +1 -0
- package/dist/clients/claude/hooks/track-action.d.ts.map +1 -1
- package/dist/clients/claude/hooks/track-action.js +29 -20
- package/dist/clients/claude/hooks/track-action.js.map +1 -1
- package/dist/clients/claude/hooks/verify-gate.d.ts.map +1 -1
- package/dist/clients/claude/hooks/verify-gate.js +18 -1
- package/dist/clients/claude/hooks/verify-gate.js.map +1 -1
- package/dist/clients/claude/index.d.ts +4 -1
- package/dist/clients/claude/index.d.ts.map +1 -1
- package/dist/clients/claude/index.js +171 -94
- package/dist/clients/claude/index.js.map +1 -1
- package/dist/clients/claude/rules/ironbee-verification.md +41 -33
- package/dist/clients/claude/skills/ironbee-verification.md +93 -76
- package/dist/clients/cursor/commands/ironbee-verify/SKILL.md +18 -10
- package/dist/clients/cursor/fragments/command-verify.node.md +33 -0
- package/dist/clients/cursor/fragments/rule.node.md +29 -0
- package/dist/clients/cursor/fragments/skill.node.md +77 -0
- package/dist/clients/cursor/hooks/activity-end.d.ts +14 -0
- package/dist/clients/cursor/hooks/activity-end.d.ts.map +1 -0
- package/dist/clients/cursor/hooks/activity-end.js +45 -0
- package/dist/clients/cursor/hooks/activity-end.js.map +1 -0
- package/dist/clients/cursor/hooks/require-verdict.d.ts +1 -1
- package/dist/clients/cursor/hooks/require-verdict.js +4 -4
- package/dist/clients/cursor/hooks/require-verification.d.ts.map +1 -1
- package/dist/clients/cursor/hooks/require-verification.js +42 -16
- package/dist/clients/cursor/hooks/require-verification.js.map +1 -1
- package/dist/clients/cursor/hooks/session-end.d.ts.map +1 -1
- package/dist/clients/cursor/hooks/session-end.js +18 -2
- package/dist/clients/cursor/hooks/session-end.js.map +1 -1
- package/dist/clients/cursor/hooks/session-start.d.ts.map +1 -1
- package/dist/clients/cursor/hooks/session-start.js +2 -1
- package/dist/clients/cursor/hooks/session-start.js.map +1 -1
- package/dist/clients/cursor/hooks/track-action-monitor.d.ts +27 -0
- package/dist/clients/cursor/hooks/track-action-monitor.d.ts.map +1 -0
- package/dist/clients/cursor/hooks/track-action-monitor.js +133 -0
- package/dist/clients/cursor/hooks/track-action-monitor.js.map +1 -0
- package/dist/clients/cursor/hooks/track-action.d.ts.map +1 -1
- package/dist/clients/cursor/hooks/track-action.js +51 -23
- package/dist/clients/cursor/hooks/track-action.js.map +1 -1
- package/dist/clients/cursor/hooks/verify-gate.d.ts.map +1 -1
- package/dist/clients/cursor/hooks/verify-gate.js +14 -1
- package/dist/clients/cursor/hooks/verify-gate.js.map +1 -1
- package/dist/clients/cursor/index.d.ts +4 -1
- package/dist/clients/cursor/index.d.ts.map +1 -1
- package/dist/clients/cursor/index.js +117 -71
- package/dist/clients/cursor/index.js.map +1 -1
- package/dist/clients/cursor/rules/ironbee-verification.mdc +37 -29
- package/dist/clients/cursor/skills/ironbee-verification.md +93 -76
- package/dist/clients/registry.d.ts +14 -0
- package/dist/clients/registry.d.ts.map +1 -1
- package/dist/clients/registry.js +34 -0
- package/dist/clients/registry.js.map +1 -1
- package/dist/commands/analyze.d.ts.map +1 -1
- package/dist/commands/analyze.js +40 -0
- package/dist/commands/analyze.js.map +1 -1
- package/dist/commands/backend-toggle.d.ts +45 -0
- package/dist/commands/backend-toggle.d.ts.map +1 -0
- package/dist/commands/backend-toggle.js +192 -0
- package/dist/commands/backend-toggle.js.map +1 -0
- package/dist/commands/disable-backend.d.ts +14 -0
- package/dist/commands/disable-backend.d.ts.map +1 -0
- package/dist/commands/disable-backend.js +34 -0
- package/dist/commands/disable-backend.js.map +1 -0
- package/dist/commands/disable-verification.d.ts +16 -0
- package/dist/commands/disable-verification.d.ts.map +1 -0
- package/dist/commands/disable-verification.js +36 -0
- package/dist/commands/disable-verification.js.map +1 -0
- package/dist/commands/enable-backend.d.ts +15 -0
- package/dist/commands/enable-backend.d.ts.map +1 -0
- package/dist/commands/enable-backend.js +35 -0
- package/dist/commands/enable-backend.js.map +1 -0
- package/dist/commands/enable-verification.d.ts +14 -0
- package/dist/commands/enable-verification.d.ts.map +1 -0
- package/dist/commands/enable-verification.js +34 -0
- package/dist/commands/enable-verification.js.map +1 -0
- package/dist/commands/hook.d.ts.map +1 -1
- package/dist/commands/hook.js +60 -0
- package/dist/commands/hook.js.map +1 -1
- package/dist/commands/import.d.ts +39 -0
- package/dist/commands/import.d.ts.map +1 -0
- package/dist/commands/import.js +369 -0
- package/dist/commands/import.js.map +1 -0
- package/dist/commands/install.d.ts.map +1 -1
- package/dist/commands/install.js +15 -20
- package/dist/commands/install.js.map +1 -1
- package/dist/commands/process-analytics.d.ts +18 -0
- package/dist/commands/process-analytics.d.ts.map +1 -0
- package/dist/commands/process-analytics.js +57 -0
- package/dist/commands/process-analytics.js.map +1 -0
- package/dist/commands/queue.d.ts +2 -3
- package/dist/commands/queue.d.ts.map +1 -1
- package/dist/commands/queue.js +2 -3
- package/dist/commands/queue.js.map +1 -1
- package/dist/commands/status.d.ts.map +1 -1
- package/dist/commands/status.js +29 -1
- package/dist/commands/status.js.map +1 -1
- package/dist/commands/verification-toggle.d.ts +47 -0
- package/dist/commands/verification-toggle.d.ts.map +1 -0
- package/dist/commands/verification-toggle.js +113 -0
- package/dist/commands/verification-toggle.js.map +1 -0
- package/dist/commands/verify.d.ts.map +1 -1
- package/dist/commands/verify.js +28 -0
- package/dist/commands/verify.js.map +1 -1
- package/dist/hooks/core/actions.d.ts +64 -67
- package/dist/hooks/core/actions.d.ts.map +1 -1
- package/dist/hooks/core/actions.js +39 -24
- package/dist/hooks/core/actions.js.map +1 -1
- package/dist/hooks/core/activity-end.d.ts +20 -0
- package/dist/hooks/core/activity-end.d.ts.map +1 -0
- package/dist/hooks/core/activity-end.js +23 -0
- package/dist/hooks/core/activity-end.js.map +1 -0
- package/dist/hooks/core/required-tools.d.ts +30 -0
- package/dist/hooks/core/required-tools.d.ts.map +1 -0
- package/dist/hooks/core/required-tools.js +70 -0
- package/dist/hooks/core/required-tools.js.map +1 -0
- package/dist/hooks/core/session-state.d.ts +12 -3
- package/dist/hooks/core/session-state.d.ts.map +1 -1
- package/dist/hooks/core/session-state.js +59 -0
- package/dist/hooks/core/session-state.js.map +1 -1
- package/dist/hooks/core/submit-verdict.d.ts.map +1 -1
- package/dist/hooks/core/submit-verdict.js +16 -12
- package/dist/hooks/core/submit-verdict.js.map +1 -1
- package/dist/hooks/core/verify-gate.d.ts +17 -3
- package/dist/hooks/core/verify-gate.d.ts.map +1 -1
- package/dist/hooks/core/verify-gate.js +312 -116
- package/dist/hooks/core/verify-gate.js.map +1 -1
- package/dist/import/claude/analytics-runner.d.ts +42 -0
- package/dist/import/claude/analytics-runner.d.ts.map +1 -0
- package/dist/import/claude/analytics-runner.js +213 -0
- package/dist/import/claude/analytics-runner.js.map +1 -0
- package/dist/import/claude/discovery.d.ts +22 -0
- package/dist/import/claude/discovery.d.ts.map +1 -0
- package/dist/import/claude/discovery.js +197 -0
- package/dist/import/claude/discovery.js.map +1 -0
- package/dist/import/claude/encoding.d.ts +50 -0
- package/dist/import/claude/encoding.d.ts.map +1 -0
- package/dist/import/claude/encoding.js +110 -0
- package/dist/import/claude/encoding.js.map +1 -0
- package/dist/import/claude/events/file-change.d.ts +28 -0
- package/dist/import/claude/events/file-change.d.ts.map +1 -0
- package/dist/import/claude/events/file-change.js +112 -0
- package/dist/import/claude/events/file-change.js.map +1 -0
- package/dist/import/claude/events/tool-call.d.ts +61 -0
- package/dist/import/claude/events/tool-call.d.ts.map +1 -0
- package/dist/import/claude/events/tool-call.js +119 -0
- package/dist/import/claude/events/tool-call.js.map +1 -0
- package/dist/import/claude/runner.d.ts +31 -0
- package/dist/import/claude/runner.d.ts.map +1 -0
- package/dist/import/claude/runner.js +280 -0
- package/dist/import/claude/runner.js.map +1 -0
- package/dist/import/claude/summary.d.ts +23 -0
- package/dist/import/claude/summary.d.ts.map +1 -0
- package/dist/import/claude/summary.js +186 -0
- package/dist/import/claude/summary.js.map +1 -0
- package/dist/import/claude/transcript-walk.d.ts +52 -0
- package/dist/import/claude/transcript-walk.d.ts.map +1 -0
- package/dist/import/claude/transcript-walk.js +187 -0
- package/dist/import/claude/transcript-walk.js.map +1 -0
- package/dist/import/concurrent-pool.d.ts +45 -0
- package/dist/import/concurrent-pool.d.ts.map +1 -0
- package/dist/import/concurrent-pool.js +95 -0
- package/dist/import/concurrent-pool.js.map +1 -0
- package/dist/import/emitter.d.ts +29 -0
- package/dist/import/emitter.d.ts.map +1 -0
- package/dist/import/emitter.js +66 -0
- package/dist/import/emitter.js.map +1 -0
- package/dist/import/events/activity.d.ts +23 -0
- package/dist/import/events/activity.d.ts.map +1 -0
- package/dist/import/events/activity.js +45 -0
- package/dist/import/events/activity.js.map +1 -0
- package/dist/import/events/session.d.ts +24 -0
- package/dist/import/events/session.d.ts.map +1 -0
- package/dist/import/events/session.js +47 -0
- package/dist/import/events/session.js.map +1 -0
- package/dist/import/filter.d.ts +47 -0
- package/dist/import/filter.d.ts.map +1 -0
- package/dist/import/filter.js +90 -0
- package/dist/import/filter.js.map +1 -0
- package/dist/import/ids.d.ts +56 -0
- package/dist/import/ids.d.ts.map +1 -0
- package/dist/import/ids.js +87 -0
- package/dist/import/ids.js.map +1 -0
- package/dist/import/index.d.ts +29 -0
- package/dist/import/index.d.ts.map +1 -0
- package/dist/import/index.js +52 -0
- package/dist/import/index.js.map +1 -0
- package/dist/import/marker.d.ts +20 -0
- package/dist/import/marker.d.ts.map +1 -0
- package/dist/import/marker.js +71 -0
- package/dist/import/marker.js.map +1 -0
- package/dist/import/pipeline.d.ts +41 -0
- package/dist/import/pipeline.d.ts.map +1 -0
- package/dist/import/pipeline.js +47 -0
- package/dist/import/pipeline.js.map +1 -0
- package/dist/import/progress.d.ts +20 -0
- package/dist/import/progress.d.ts.map +1 -0
- package/dist/import/progress.js +69 -0
- package/dist/import/progress.js.map +1 -0
- package/dist/import/skip.d.ts +13 -0
- package/dist/import/skip.d.ts.map +1 -0
- package/dist/import/skip.js +24 -0
- package/dist/import/skip.js.map +1 -0
- package/dist/import/types.d.ts +125 -0
- package/dist/import/types.d.ts.map +1 -0
- package/dist/import/types.js +28 -0
- package/dist/import/types.js.map +1 -0
- package/dist/index.js +21 -2
- package/dist/index.js.map +1 -1
- package/dist/lib/collector.d.ts +29 -3
- package/dist/lib/collector.d.ts.map +1 -1
- package/dist/lib/collector.js +118 -8
- package/dist/lib/collector.js.map +1 -1
- package/dist/lib/config.d.ts +240 -83
- package/dist/lib/config.d.ts.map +1 -1
- package/dist/lib/config.js +482 -89
- package/dist/lib/config.js.map +1 -1
- package/dist/lib/event.d.ts +72 -0
- package/dist/lib/event.d.ts.map +1 -0
- package/dist/lib/event.js +42 -0
- package/dist/lib/event.js.map +1 -0
- package/dist/lib/gitignore.d.ts +21 -0
- package/dist/lib/gitignore.d.ts.map +1 -0
- package/dist/lib/gitignore.js +54 -0
- package/dist/lib/gitignore.js.map +1 -0
- package/dist/lib/runtime-section.d.ts +118 -0
- package/dist/lib/runtime-section.d.ts.map +1 -0
- package/dist/lib/runtime-section.js +256 -0
- package/dist/lib/runtime-section.js.map +1 -0
- package/dist/lib/telemetry.d.ts +1 -1
- package/dist/lib/telemetry.d.ts.map +1 -1
- package/dist/lib/telemetry.js +4 -1
- package/dist/lib/telemetry.js.map +1 -1
- package/dist/queue/dead-letter.d.ts +5 -1
- package/dist/queue/dead-letter.d.ts.map +1 -1
- package/dist/queue/dead-letter.js +5 -1
- package/dist/queue/dead-letter.js.map +1 -1
- package/dist/queue/drain.d.ts +3 -2
- package/dist/queue/drain.d.ts.map +1 -1
- package/dist/queue/drain.js +3 -2
- package/dist/queue/drain.js.map +1 -1
- package/dist/queue/flush.d.ts +28 -12
- package/dist/queue/flush.d.ts.map +1 -1
- package/dist/queue/flush.js +43 -18
- package/dist/queue/flush.js.map +1 -1
- package/dist/queue/handlers/send-event.d.ts.map +1 -1
- package/dist/queue/handlers/send-event.js.map +1 -1
- package/dist/queue/index.d.ts +1 -2
- package/dist/queue/index.d.ts.map +1 -1
- package/dist/queue/index.js +2 -2
- package/dist/queue/index.js.map +1 -1
- package/dist/queue/paths.d.ts +4 -2
- package/dist/queue/paths.d.ts.map +1 -1
- package/dist/queue/paths.js +4 -2
- package/dist/queue/paths.js.map +1 -1
- package/dist/queue/process-file.d.ts +5 -1
- package/dist/queue/process-file.d.ts.map +1 -1
- package/dist/queue/process-file.js +5 -1
- package/dist/queue/process-file.js.map +1 -1
- package/dist/queue/snapshot.d.ts +4 -1
- package/dist/queue/snapshot.d.ts.map +1 -1
- package/dist/queue/snapshot.js +4 -1
- package/dist/queue/snapshot.js.map +1 -1
- package/dist/queue/spawn.d.ts +1 -3
- package/dist/queue/spawn.d.ts.map +1 -1
- package/dist/queue/spawn.js +1 -3
- package/dist/queue/spawn.js.map +1 -1
- package/dist/queue/submit.d.ts +6 -1
- package/dist/queue/submit.d.ts.map +1 -1
- package/dist/queue/submit.js +6 -1
- package/dist/queue/submit.js.map +1 -1
- package/dist/queue/types.d.ts +5 -1
- package/dist/queue/types.d.ts.map +1 -1
- package/dist/queue/types.js +5 -1
- package/dist/queue/types.js.map +1 -1
- package/dist/queue/worker-log.d.ts +3 -1
- package/dist/queue/worker-log.d.ts.map +1 -1
- package/dist/queue/worker-log.js +3 -1
- package/dist/queue/worker-log.js.map +1 -1
- package/package.json +1 -1
|
@@ -0,0 +1,901 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* IronBee CLI — Analytics Emit Orchestrator
|
|
4
|
+
*
|
|
5
|
+
* Per-trigger algorithm:
|
|
6
|
+
*
|
|
7
|
+
* 1. Load state (or initialState on miss)
|
|
8
|
+
* 2. Reset detection: schema_version, inode, size, first-KB hash
|
|
9
|
+
* 3. Tail-parse new transcript bytes (with truncate-during-read sanity)
|
|
10
|
+
* 4. Turn-safe boundary detection
|
|
11
|
+
* 5. Project delta + merge into accumulated
|
|
12
|
+
* 6. Substantive filter
|
|
13
|
+
* 7. Idempotency check (offset + final-flag)
|
|
14
|
+
* 8. EMIT-FIRST then atomic-rename state file
|
|
15
|
+
*
|
|
16
|
+
* Hook adapters call {@link emitAnalytics} after their existing primary
|
|
17
|
+
* work; failures here log and swallow — never affect the hook outcome.
|
|
18
|
+
*
|
|
19
|
+
* Submission goes DIRECTLY to the collector (`src/lib/collector.ts:sendToCollector`)
|
|
20
|
+
* — bypasses the queue intentionally. The queue's 4 KB per-line limit fits
|
|
21
|
+
* `tool_call` events but is too tight for full session aggregates. Direct
|
|
22
|
+
* HTTPS POST has no size limit; emit's own retry semantics (state isn't
|
|
23
|
+
* advanced on submit failure → next trigger retries) replace the queue's
|
|
24
|
+
* retry layer for this event type.
|
|
25
|
+
*/
|
|
26
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
|
+
exports.__test = void 0;
|
|
28
|
+
exports.emitAnalytics = emitAnalytics;
|
|
29
|
+
const logger_1 = require("../lib/logger");
|
|
30
|
+
const actions_1 = require("../hooks/core/actions");
|
|
31
|
+
const types_1 = require("./types");
|
|
32
|
+
const transcript_1 = require("./transcript");
|
|
33
|
+
const state_1 = require("./state");
|
|
34
|
+
const merge_1 = require("./merge");
|
|
35
|
+
const projection_1 = require("./projection");
|
|
36
|
+
const collector_1 = require("../lib/collector");
|
|
37
|
+
const config_1 = require("../lib/config");
|
|
38
|
+
/**
|
|
39
|
+
* Drive a full per-trigger projection and (maybe) emit. Fail-safe:
|
|
40
|
+
* any internal exception is caught + logged + swallowed; returns an
|
|
41
|
+
* outcome describing what happened.
|
|
42
|
+
*/
|
|
43
|
+
async function emitAnalytics(opts) {
|
|
44
|
+
try {
|
|
45
|
+
return await runEmit(opts);
|
|
46
|
+
}
|
|
47
|
+
catch (e) {
|
|
48
|
+
logger_1.logger.debug(`analytics emit: unexpected error: ${e instanceof Error ? e.message : e}`);
|
|
49
|
+
return { status: "error", reason: e instanceof Error ? e.message : String(e) };
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
// ─────────────────────────────────────────────────────────────────────────
|
|
53
|
+
// Internal: the algorithm
|
|
54
|
+
// ─────────────────────────────────────────────────────────────────────────
|
|
55
|
+
async function runEmit(opts) {
|
|
56
|
+
const { projectDir, sessionId, triggerType, endReason, log } = opts;
|
|
57
|
+
const isFinal = triggerType === "SessionEnd";
|
|
58
|
+
const transcriptSource = opts.transcriptSource ?? "claude-code";
|
|
59
|
+
const projectName = (0, actions_1.resolveProjectName)(projectDir);
|
|
60
|
+
// Per-event-type opt-ins (default false). Resolved once per emit so the
|
|
61
|
+
// gating decisions in the projection / force-close / drain branches
|
|
62
|
+
// below all see a consistent snapshot of the operator's config.
|
|
63
|
+
const turnEventsEnabled = (0, config_1.isAnalyticsTurnEventsEnabled)(projectDir);
|
|
64
|
+
const stepEventsEnabled = (0, config_1.isAnalyticsStepEventsEnabled)(projectDir);
|
|
65
|
+
const apiRequestEventsEnabled = (0, config_1.isAnalyticsApiRequestEventsEnabled)(projectDir);
|
|
66
|
+
log?.debug(`gating: emitTurnEvents=${turnEventsEnabled} emitStepEvents=${stepEventsEnabled} emitApiRequestEvents=${apiRequestEventsEnabled}`);
|
|
67
|
+
// ── Resolve transcript path ──────────────────────────────────────────
|
|
68
|
+
const transcriptPath = transcriptSource === "claude-code"
|
|
69
|
+
? (0, transcript_1.findClaudeTranscriptPath)(projectDir, sessionId)
|
|
70
|
+
: null; // Cursor TBD — return early below
|
|
71
|
+
if (transcriptPath === null) {
|
|
72
|
+
logger_1.logger.debug(`analytics emit: transcript not found for session ${sessionId}`);
|
|
73
|
+
log?.warn(`transcript: not found (source=${transcriptSource}) — skipping`);
|
|
74
|
+
return { status: "no-transcript", reason: "transcript path unresolved or missing" };
|
|
75
|
+
}
|
|
76
|
+
log?.info(`transcript: path=${transcriptPath} source=${transcriptSource} project=${projectName}`);
|
|
77
|
+
// ── 1. Load state (or fresh init) ────────────────────────────────────
|
|
78
|
+
const loaded = (0, state_1.readState)(projectDir, sessionId);
|
|
79
|
+
let state = loaded
|
|
80
|
+
?? (0, state_1.initialState)(sessionId, transcriptPath, projectName, transcriptSource);
|
|
81
|
+
if (loaded) {
|
|
82
|
+
log?.info(`state: loaded offset=${loaded.offset} last_emitted_offset=${loaded.last_emitted_offset} `
|
|
83
|
+
+ `last_emitted_is_final=${loaded.last_emitted_is_final} schema=${loaded.accumulated.schema_version} `
|
|
84
|
+
+ `user_count=${loaded.accumulated.turns.user_count} duration_min=${loaded.accumulated.time.duration_minutes}`);
|
|
85
|
+
}
|
|
86
|
+
else {
|
|
87
|
+
log?.info("state: fresh init (no existing state.json)");
|
|
88
|
+
}
|
|
89
|
+
// ── 2. Reset detection ───────────────────────────────────────────────
|
|
90
|
+
let stat;
|
|
91
|
+
try {
|
|
92
|
+
stat = (0, transcript_1.statTranscript)(transcriptPath);
|
|
93
|
+
}
|
|
94
|
+
catch (e) {
|
|
95
|
+
logger_1.logger.debug(`analytics emit: stat failed for ${transcriptPath}: ${e instanceof Error ? e.message : e}`);
|
|
96
|
+
log?.error(`transcript: stat failed: ${e instanceof Error ? e.message : e}`);
|
|
97
|
+
return { status: "no-transcript", reason: "stat failed (file missing or unreadable)" };
|
|
98
|
+
}
|
|
99
|
+
// For the reset check we MUST hash the SAME byte range that produced
|
|
100
|
+
// the recorded `transcript_first_kb_sha256`. Recorded hash covered
|
|
101
|
+
// min(1024, transcript_size_at_last_read) bytes — using a different
|
|
102
|
+
// range here would spuriously trigger "content_replaced" on growing
|
|
103
|
+
// small files (size < 1KB) where the new hash naturally covers more
|
|
104
|
+
// bytes. The fresh persist below uses min(1024, current size) so the
|
|
105
|
+
// window expands as the file grows past 1KB and locks at 1KB after.
|
|
106
|
+
const recordedHashLen = Math.min(transcript_1.FIRST_KB_HASH_LENGTH, state.transcript_size_at_last_read);
|
|
107
|
+
const currentRecheckHash = recordedHashLen > 0
|
|
108
|
+
? (0, transcript_1.firstKbSha256)(transcriptPath, recordedHashLen)
|
|
109
|
+
: "";
|
|
110
|
+
const currentFirstKbHash = (0, transcript_1.firstKbSha256)(transcriptPath);
|
|
111
|
+
log?.debug(`transcript: stat inode=${stat.inode} size=${stat.size} first_kb_hash=${currentFirstKbHash.slice(0, 12)}…`);
|
|
112
|
+
let resetReason = null;
|
|
113
|
+
if (state.accumulated.schema_version !== types_1.SCHEMA_VERSION) {
|
|
114
|
+
resetReason = "schema_version_changed";
|
|
115
|
+
}
|
|
116
|
+
else if (stat.inode !== state.transcript_inode && state.transcript_inode !== 0) {
|
|
117
|
+
resetReason = "inode_changed";
|
|
118
|
+
}
|
|
119
|
+
else if (stat.size < state.offset) {
|
|
120
|
+
resetReason = "size_shrunk";
|
|
121
|
+
}
|
|
122
|
+
else if (currentRecheckHash !== ""
|
|
123
|
+
&& state.transcript_first_kb_sha256 !== ""
|
|
124
|
+
&& currentRecheckHash !== state.transcript_first_kb_sha256) {
|
|
125
|
+
resetReason = "content_replaced";
|
|
126
|
+
}
|
|
127
|
+
if (resetReason !== null) {
|
|
128
|
+
logger_1.logger.debug(`analytics emit: reset state for session ${sessionId}: ${resetReason}`);
|
|
129
|
+
log?.warn(`reset: ${resetReason} — discarding accumulated state and re-parsing from offset 0`);
|
|
130
|
+
state = (0, state_1.initialState)(sessionId, transcriptPath, projectName, transcriptSource);
|
|
131
|
+
// Re-stat after reset to capture current values.
|
|
132
|
+
stat = (0, transcript_1.statTranscript)(transcriptPath);
|
|
133
|
+
}
|
|
134
|
+
// ── 2b. SessionEnd carve-out for stat.size == state.offset ──────────
|
|
135
|
+
let cameFromCarveOut = false;
|
|
136
|
+
let consumedLines = [];
|
|
137
|
+
let merged = state.accumulated;
|
|
138
|
+
let newInternal = state.internal;
|
|
139
|
+
let readSize = stat.size;
|
|
140
|
+
let boundaryOffset = state.offset;
|
|
141
|
+
if (stat.size === state.offset) {
|
|
142
|
+
if (!isFinal) {
|
|
143
|
+
log?.debug(`carve-out: no new bytes (offset=${state.offset}) and trigger=Stop — skipping`);
|
|
144
|
+
return { status: "skipped", reason: "no new bytes" };
|
|
145
|
+
}
|
|
146
|
+
if (state.last_emitted_is_final) {
|
|
147
|
+
log?.debug("carve-out: session already finalized — skipping");
|
|
148
|
+
return { status: "skipped", reason: "session already finalized" };
|
|
149
|
+
}
|
|
150
|
+
// Fall through with empty delta — emit is_final=true on existing accumulated.
|
|
151
|
+
cameFromCarveOut = true;
|
|
152
|
+
readSize = state.offset;
|
|
153
|
+
log?.info(`carve-out: SessionEnd with no new bytes (offset=${state.offset}) — emitting is_final on existing accumulated`);
|
|
154
|
+
// Skip steps 3-5; merged stays = state.accumulated, internal unchanged.
|
|
155
|
+
}
|
|
156
|
+
else {
|
|
157
|
+
// ── 3. Read new bytes (with truncate-during-read sanity) ────────
|
|
158
|
+
const slice = (0, transcript_1.readSlice)(transcriptPath, state.offset, stat.size);
|
|
159
|
+
if (slice.truncated) {
|
|
160
|
+
logger_1.logger.debug(`analytics emit: file truncated mid-read (session ${sessionId}); deferring to next trigger`);
|
|
161
|
+
log?.warn(`read: file truncated mid-read (offset=${state.offset}, expected_end=${stat.size}) — deferring`);
|
|
162
|
+
return { status: "skipped", reason: "truncated mid-read; eventual consistency" };
|
|
163
|
+
}
|
|
164
|
+
readSize = stat.size;
|
|
165
|
+
log?.debug(`read: bytes=${slice.bytes.length} (offset ${state.offset} → ${stat.size})`);
|
|
166
|
+
// ── 4. Parse + boundary detection ───────────────────────────────
|
|
167
|
+
const parseResult = (0, transcript_1.parseJsonl)(slice.bytes, state.offset);
|
|
168
|
+
boundaryOffset = (0, transcript_1.findTurnSafeBoundary)(parseResult.lines, parseResult.last_complete_byte, isFinal);
|
|
169
|
+
log?.debug(`parse: lines=${parseResult.lines.length} last_complete_byte=${parseResult.last_complete_byte} `
|
|
170
|
+
+ `boundary_offset=${boundaryOffset} is_final=${isFinal}`);
|
|
171
|
+
// ── 5. Project + merge ──────────────────────────────────────────
|
|
172
|
+
consumedLines = parseResult.lines.filter((ln) => ln.parsed !== null && ln.end <= boundaryOffset);
|
|
173
|
+
const linesForProjection = consumedLines
|
|
174
|
+
.filter((ln) => ln.parsed !== null)
|
|
175
|
+
.map((ln) => ln.parsed);
|
|
176
|
+
// Cross-slice idle attribution needs the prior session's last
|
|
177
|
+
// activity timestamp. Parse from accumulated.last_activity_time —
|
|
178
|
+
// empty / unparseable → undefined (first slice case).
|
|
179
|
+
const priorLastActivityParsed = state.accumulated.last_activity_time !== ""
|
|
180
|
+
? Date.parse(state.accumulated.last_activity_time)
|
|
181
|
+
: Number.NaN;
|
|
182
|
+
const priorLastActivityTsMs = Number.isFinite(priorLastActivityParsed)
|
|
183
|
+
? priorLastActivityParsed
|
|
184
|
+
: undefined;
|
|
185
|
+
const projectionInput = {
|
|
186
|
+
lines: linesForProjection,
|
|
187
|
+
startingTurnIndex: state.accumulated.turns.assistant_count + 1,
|
|
188
|
+
sessionId,
|
|
189
|
+
projectName,
|
|
190
|
+
transcriptSource,
|
|
191
|
+
priorLastAssistantTsMs: state.internal.last_assistant_ts_ms,
|
|
192
|
+
priorPendingToolUses: state.internal.pending_tool_uses,
|
|
193
|
+
priorLastActivityTsMs,
|
|
194
|
+
priorCurrentTurn: state.internal.current_turn,
|
|
195
|
+
priorNextTurnIndex: state.internal.next_turn_index,
|
|
196
|
+
priorSeenAssistantMessageIds: state.internal.seen_assistant_message_ids,
|
|
197
|
+
};
|
|
198
|
+
const delta = (0, projection_1.projectDelta)(projectionInput);
|
|
199
|
+
const deltaInternal = (0, projection_1.projectDeltaInternal)(projectionInput);
|
|
200
|
+
// Gate per-event-type opt-ins BEFORE merge: drop completed turn / step
|
|
201
|
+
// records from the delta when the flag is off so the pending queues
|
|
202
|
+
// never accumulate them. Session-level aggregation upstream of
|
|
203
|
+
// `delta.completed_turns` (turns_with_retry, one_shot_turns,
|
|
204
|
+
// category_breakdown) is unaffected — projection has already folded
|
|
205
|
+
// those into the delta's session-level fields.
|
|
206
|
+
if (!turnEventsEnabled && delta.completed_turns.length > 0) {
|
|
207
|
+
log?.debug(`gating: emitTurnEvents=false → dropping ${delta.completed_turns.length} completed turn(s) from delta`);
|
|
208
|
+
delta.completed_turns = [];
|
|
209
|
+
}
|
|
210
|
+
if (!stepEventsEnabled && delta.completed_steps.length > 0) {
|
|
211
|
+
log?.debug(`gating: emitStepEvents=false → dropping ${delta.completed_steps.length} completed step(s) from delta`);
|
|
212
|
+
delta.completed_steps = [];
|
|
213
|
+
}
|
|
214
|
+
if (!apiRequestEventsEnabled && delta.api_request_events.length > 0) {
|
|
215
|
+
log?.debug(`gating: emitApiRequestEvents=false → dropping ${delta.api_request_events.length} api_request event(s) from delta`);
|
|
216
|
+
delta.api_request_events = [];
|
|
217
|
+
}
|
|
218
|
+
log?.debug(`delta: user_count+=${delta.turns.user_count} assistant_msgs+=${delta.turns.assistant_count} `
|
|
219
|
+
+ `tool_uses=${Object.values(delta.tools).reduce((a, t) => a + t.count, 0)} `
|
|
220
|
+
+ `files_modified+=${delta.code_changes.files_modified} lines_added+=${delta.code_changes.lines_added} lines_removed+=${delta.code_changes.lines_removed} `
|
|
221
|
+
+ `errors+=${delta.errors.tool_errors_total} interruptions+=${delta.errors.user_interruptions} `
|
|
222
|
+
+ `pending_tool_uses=${Object.keys(delta.closing_pending_tool_uses).length}`);
|
|
223
|
+
const result = (0, merge_1.mergeAccumulated)(state.accumulated, state.internal, delta, deltaInternal);
|
|
224
|
+
merged = result.accumulated;
|
|
225
|
+
newInternal = result.internal;
|
|
226
|
+
const mergedToolCalls = Object.values(merged.tools).reduce((a, t) => a + t.count, 0);
|
|
227
|
+
log?.debug(`merged: user_count=${merged.turns.user_count} duration_min=${merged.time.duration_minutes.toFixed(2)} `
|
|
228
|
+
+ `tool_calls=${mergedToolCalls} files_modified=${merged.code_changes.files_modified} `
|
|
229
|
+
+ `lines_added=${merged.code_changes.lines_added} lines_removed=${merged.code_changes.lines_removed} `
|
|
230
|
+
+ `ctx_latest=${merged.context_tokens.latest} ctx_peak=${merged.context_tokens.peak}`);
|
|
231
|
+
}
|
|
232
|
+
// ── Resolve collector target ONCE per emit ──────────────────────────
|
|
233
|
+
// Avoid N redundant `loadConfig` fs reads (each drainPendingTurns /
|
|
234
|
+
// drainPendingSteps invocation, plus each per-batch
|
|
235
|
+
// sendEventsBatchToCollector call inside them, would otherwise re-read
|
|
236
|
+
// config from disk).
|
|
237
|
+
const collectorTarget = (0, collector_1.getCollectorTarget)(projectDir);
|
|
238
|
+
// ── 7. Idempotency check (offset + final-flag) ──────────────────────
|
|
239
|
+
const effectiveOffset = cameFromCarveOut ? state.offset : boundaryOffset;
|
|
240
|
+
const sameOffset = effectiveOffset === state.last_emitted_offset;
|
|
241
|
+
const finalNewlyTrue = isFinal && !state.last_emitted_is_final;
|
|
242
|
+
if (sameOffset && !finalNewlyTrue) {
|
|
243
|
+
// Idempotency: same offset + same final-flag → no new session_analytics
|
|
244
|
+
// payload to send. BUT if there are pending_turn_events OR
|
|
245
|
+
// pending_step_events queued from a prior trigger (session_analytics
|
|
246
|
+
// succeeded but individual turn / step sends failed), drain them
|
|
247
|
+
// here. Otherwise queues would only retry when fresh transcript
|
|
248
|
+
// bytes arrived — for an idle session they would stagnate
|
|
249
|
+
// indefinitely. Backend dedupes via deterministic turn_id /
|
|
250
|
+
// step_id so re-sends are safe.
|
|
251
|
+
//
|
|
252
|
+
// Force-close pre-flight: under Stop-closes-turn semantics every
|
|
253
|
+
// successful prior emit clears `current_turn` from state, so
|
|
254
|
+
// normally there is none to close here. But if state was restored
|
|
255
|
+
// from a sub-substantive snapshot (or other partial-persist edge
|
|
256
|
+
// case), an open turn might be carried over. Run the same §7b
|
|
257
|
+
// logic so the queue drain below covers it. Empty (0-assistant)
|
|
258
|
+
// turns are intentionally skipped — see §7b for the rationale.
|
|
259
|
+
if (newInternal.current_turn !== undefined && newInternal.current_turn.assistant_messages > 0) {
|
|
260
|
+
const openTurnState = newInternal.current_turn;
|
|
261
|
+
const closeEndTime = openTurnState.last_activity_time !== ""
|
|
262
|
+
? openTurnState.last_activity_time
|
|
263
|
+
: merged.last_activity_time;
|
|
264
|
+
const triggerEndReason = isFinal ? "session_end" : "stop";
|
|
265
|
+
const r = (0, projection_1.closeTurn)(openTurnState, {
|
|
266
|
+
endTime: closeEndTime,
|
|
267
|
+
endReason: triggerEndReason,
|
|
268
|
+
sessionId, projectName, transcriptSource,
|
|
269
|
+
});
|
|
270
|
+
(0, projection_1.applyBreakdownDelta)(merged.classification.category_breakdown, r);
|
|
271
|
+
merged.turns.with_retry += r.turns_with_retry_inc;
|
|
272
|
+
merged.turns.one_shot += r.one_shot_inc;
|
|
273
|
+
// Gate per-event-type opt-ins: session-level aggregates above
|
|
274
|
+
// (category_breakdown, turns.with_retry, turns.one_shot) are
|
|
275
|
+
// ALWAYS applied — they belong to session_analytics. The wire
|
|
276
|
+
// records below (turn_event, step_events) only push to their
|
|
277
|
+
// pending queues when the corresponding flag is on.
|
|
278
|
+
const finalQueue = turnEventsEnabled
|
|
279
|
+
? [...(newInternal.pending_turn_events ?? []), r.turn_event]
|
|
280
|
+
: (newInternal.pending_turn_events ?? []);
|
|
281
|
+
const finalStepQueue = stepEventsEnabled && r.step_events.length > 0
|
|
282
|
+
? [...(newInternal.pending_step_events ?? []), ...r.step_events]
|
|
283
|
+
: (newInternal.pending_step_events ?? []);
|
|
284
|
+
newInternal = {
|
|
285
|
+
...newInternal,
|
|
286
|
+
...(finalQueue.length > 0 ? { pending_turn_events: finalQueue } : {}),
|
|
287
|
+
...(finalStepQueue.length > 0 ? { pending_step_events: finalStepQueue } : {}),
|
|
288
|
+
};
|
|
289
|
+
delete newInternal.current_turn;
|
|
290
|
+
log?.debug(`force-close (idempotent path): turn_index=${r.turn_event.turn_index} `
|
|
291
|
+
+ `turn_id=${r.turn_event.turn_id} end_reason=${triggerEndReason} `
|
|
292
|
+
+ `category=${r.turn_event.category} duration=${r.turn_event.duration}ms `
|
|
293
|
+
+ `step_events=${r.step_events.length} `
|
|
294
|
+
+ `gates: turn=${turnEventsEnabled} step=${stepEventsEnabled}`);
|
|
295
|
+
}
|
|
296
|
+
else if (newInternal.current_turn !== undefined) {
|
|
297
|
+
// Open turn with 0 assistants — drop silently. Decrement
|
|
298
|
+
// merged.turns.user_count so the session-level count matches
|
|
299
|
+
// the number of turn events the backend will actually see.
|
|
300
|
+
const dropped = newInternal.current_turn;
|
|
301
|
+
log?.debug(`force-close (idempotent path): skipping empty turn `
|
|
302
|
+
+ `turn_index=${dropped.turn_index} (assistant_messages=0)`);
|
|
303
|
+
if (merged.turns.user_count > 0) {
|
|
304
|
+
merged.turns.user_count -= 1;
|
|
305
|
+
}
|
|
306
|
+
newInternal = { ...newInternal };
|
|
307
|
+
delete newInternal.current_turn;
|
|
308
|
+
}
|
|
309
|
+
const queuedTurns = newInternal.pending_turn_events ?? [];
|
|
310
|
+
const queuedSteps = newInternal.pending_step_events ?? [];
|
|
311
|
+
const queuedApiRequests = newInternal.pending_api_request_events ?? [];
|
|
312
|
+
if (queuedTurns.length === 0 && queuedSteps.length === 0 && queuedApiRequests.length === 0) {
|
|
313
|
+
const persistedState = {
|
|
314
|
+
...state,
|
|
315
|
+
offset: effectiveOffset,
|
|
316
|
+
accumulated: merged,
|
|
317
|
+
internal: newInternal,
|
|
318
|
+
last_read_at: new Date().toISOString(),
|
|
319
|
+
transcript_inode: cameFromCarveOut ? state.transcript_inode : stat.inode,
|
|
320
|
+
transcript_size_at_last_read: cameFromCarveOut ? state.transcript_size_at_last_read : readSize,
|
|
321
|
+
transcript_first_kb_sha256: cameFromCarveOut ? state.transcript_first_kb_sha256 : currentFirstKbHash,
|
|
322
|
+
};
|
|
323
|
+
(0, state_1.writeState)(projectDir, sessionId, persistedState);
|
|
324
|
+
log?.info(`skip: idempotent — offset=${effectiveOffset} matches last_emitted_offset and is_final unchanged`);
|
|
325
|
+
return { status: "skipped", reason: "offset unchanged and final-flag unchanged" };
|
|
326
|
+
}
|
|
327
|
+
// Drain-only path: re-attempt every queued turn + step + api_request
|
|
328
|
+
// via the shared helpers. State is persisted with the residual queues
|
|
329
|
+
// so the next trigger can keep retrying.
|
|
330
|
+
log?.info(`idempotent-with-drain: offset=${effectiveOffset} pending_turn_events=${queuedTurns.length} `
|
|
331
|
+
+ `pending_step_events=${queuedSteps.length} pending_api_request_events=${queuedApiRequests.length} — attempting drain`);
|
|
332
|
+
const drainBase = (0, actions_1.baseFields)(`${projectDir}/.ironbee/sessions/${sessionId}/actions.jsonl`);
|
|
333
|
+
let postDrainInternal = newInternal;
|
|
334
|
+
let turnDrainSent = 0;
|
|
335
|
+
let stepDrainSent = 0;
|
|
336
|
+
let apiRequestDrainSent = 0;
|
|
337
|
+
let turnDrainRetained = queuedTurns.length;
|
|
338
|
+
let stepDrainRetained = queuedSteps.length;
|
|
339
|
+
let apiRequestDrainRetained = queuedApiRequests.length;
|
|
340
|
+
// Defense-in-depth: when the per-event-type flag is OFF but the
|
|
341
|
+
// queue carries stale events from a prior session/state.json (e.g.
|
|
342
|
+
// operator just toggled the flag from true to false), DROP without
|
|
343
|
+
// sending — the contract is "flag false ⇒ zero events on the wire."
|
|
344
|
+
// The remainder is cleared from state so subsequent triggers don't
|
|
345
|
+
// re-attempt. (When the flag was already false during projection,
|
|
346
|
+
// the queues are empty and this branch is a no-op.)
|
|
347
|
+
if (queuedTurns.length > 0) {
|
|
348
|
+
if (turnEventsEnabled) {
|
|
349
|
+
const drain = await drainPendingTurns(queuedTurns, drainBase, sessionId, projectDir, log, "drain submit", collectorTarget);
|
|
350
|
+
postDrainInternal = withPendingTurnEvents(postDrainInternal, drain.remaining);
|
|
351
|
+
turnDrainSent = drain.sent;
|
|
352
|
+
turnDrainRetained = drain.remaining.length;
|
|
353
|
+
}
|
|
354
|
+
else {
|
|
355
|
+
log?.info(`gating: emitTurnEvents=false → dropping ${queuedTurns.length} stale pending turn event(s) without sending`);
|
|
356
|
+
postDrainInternal = withPendingTurnEvents(postDrainInternal, []);
|
|
357
|
+
turnDrainRetained = 0;
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
if (queuedSteps.length > 0) {
|
|
361
|
+
if (stepEventsEnabled) {
|
|
362
|
+
const stepDrain = await drainPendingSteps(queuedSteps, drainBase, sessionId, projectDir, log, "drain submit", collectorTarget);
|
|
363
|
+
postDrainInternal = withPendingStepEvents(postDrainInternal, stepDrain.remaining);
|
|
364
|
+
stepDrainSent = stepDrain.sent;
|
|
365
|
+
stepDrainRetained = stepDrain.remaining.length;
|
|
366
|
+
}
|
|
367
|
+
else {
|
|
368
|
+
log?.info(`gating: emitStepEvents=false → dropping ${queuedSteps.length} stale pending step event(s) without sending`);
|
|
369
|
+
postDrainInternal = withPendingStepEvents(postDrainInternal, []);
|
|
370
|
+
stepDrainRetained = 0;
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
if (queuedApiRequests.length > 0) {
|
|
374
|
+
if (apiRequestEventsEnabled) {
|
|
375
|
+
const apiDrain = await drainPendingApiRequests(queuedApiRequests, drainBase, sessionId, projectDir, log, "drain submit", collectorTarget);
|
|
376
|
+
postDrainInternal = withPendingApiRequestEvents(postDrainInternal, apiDrain.remaining);
|
|
377
|
+
apiRequestDrainSent = apiDrain.sent;
|
|
378
|
+
apiRequestDrainRetained = apiDrain.remaining.length;
|
|
379
|
+
}
|
|
380
|
+
else {
|
|
381
|
+
log?.info(`gating: emitApiRequestEvents=false → dropping ${queuedApiRequests.length} stale pending api_request event(s) without sending`);
|
|
382
|
+
postDrainInternal = withPendingApiRequestEvents(postDrainInternal, []);
|
|
383
|
+
apiRequestDrainRetained = 0;
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
const persistedState = {
|
|
387
|
+
...state,
|
|
388
|
+
offset: effectiveOffset,
|
|
389
|
+
accumulated: merged,
|
|
390
|
+
internal: postDrainInternal,
|
|
391
|
+
last_read_at: new Date().toISOString(),
|
|
392
|
+
transcript_inode: cameFromCarveOut ? state.transcript_inode : stat.inode,
|
|
393
|
+
transcript_size_at_last_read: cameFromCarveOut ? state.transcript_size_at_last_read : readSize,
|
|
394
|
+
transcript_first_kb_sha256: cameFromCarveOut ? state.transcript_first_kb_sha256 : currentFirstKbHash,
|
|
395
|
+
};
|
|
396
|
+
(0, state_1.writeState)(projectDir, sessionId, persistedState);
|
|
397
|
+
log?.info(`idempotent-with-drain: turn_sent=${turnDrainSent}/${queuedTurns.length} retained=${turnDrainRetained} `
|
|
398
|
+
+ `step_sent=${stepDrainSent}/${queuedSteps.length} retained=${stepDrainRetained} `
|
|
399
|
+
+ `api_request_sent=${apiRequestDrainSent}/${queuedApiRequests.length} retained=${apiRequestDrainRetained}`);
|
|
400
|
+
const reasonParts = ["offset unchanged"];
|
|
401
|
+
if (queuedTurns.length > 0) {
|
|
402
|
+
reasonParts.push(`drained ${turnDrainSent}/${queuedTurns.length} pending turn events`);
|
|
403
|
+
}
|
|
404
|
+
if (queuedSteps.length > 0) {
|
|
405
|
+
reasonParts.push(`drained ${stepDrainSent}/${queuedSteps.length} pending step events`);
|
|
406
|
+
}
|
|
407
|
+
if (queuedApiRequests.length > 0) {
|
|
408
|
+
reasonParts.push(`drained ${apiRequestDrainSent}/${queuedApiRequests.length} pending api_request events`);
|
|
409
|
+
}
|
|
410
|
+
return { status: "skipped", reason: reasonParts.join("; ") };
|
|
411
|
+
}
|
|
412
|
+
// ── 7b. Force-close open turn on EVERY trigger ───────────────────────
|
|
413
|
+
// Claude Code's "Stop" semantically means the agent has finished its
|
|
414
|
+
// current task — for analytics purposes, each Stop = one completed
|
|
415
|
+
// agent run = one closed turn. SessionEnd is the same shape with a
|
|
416
|
+
// different `end_reason`. This eliminates the open-turn discipline's
|
|
417
|
+
// wait-for-next-prompt latency (without it, step events for the just-
|
|
418
|
+
// finished agent run would not flow until the user's next prompt).
|
|
419
|
+
//
|
|
420
|
+
// closeTurn is pure — we build the per-turn record + per-step events
|
|
421
|
+
// from the projection's in-flight `current_turn`, apply the per-
|
|
422
|
+
// category breakdown delta, then drop `current_turn` from the persisted
|
|
423
|
+
// state so the next slice's projection starts fresh at the next user
|
|
424
|
+
// message.
|
|
425
|
+
//
|
|
426
|
+
// `end_reason` reflects which trigger fired:
|
|
427
|
+
// - SessionEnd → "session_end"
|
|
428
|
+
// - Stop → "stop"
|
|
429
|
+
//
|
|
430
|
+
// Guard: skip force-close when the open turn has zero assistant
|
|
431
|
+
// messages. Slash command host-meta filtering (isHumanTextUser →
|
|
432
|
+
// false on `<command-message>` blocks) eliminates the common cause
|
|
433
|
+
// of empty leading turns, but defense-in-depth: rare cases like
|
|
434
|
+
// host-killed-before-asst or a slice ending right at a user msg
|
|
435
|
+
// could still leave an empty open turn. Emitting it would pollute
|
|
436
|
+
// analytics with a 0-asst turn record (and 0 step events).
|
|
437
|
+
if (newInternal.current_turn !== undefined && newInternal.current_turn.assistant_messages > 0) {
|
|
438
|
+
const openTurnState = newInternal.current_turn;
|
|
439
|
+
const closeEndTime = openTurnState.last_activity_time !== ""
|
|
440
|
+
? openTurnState.last_activity_time
|
|
441
|
+
: merged.last_activity_time;
|
|
442
|
+
const triggerEndReason = isFinal ? "session_end" : "stop";
|
|
443
|
+
const r = (0, projection_1.closeTurn)(openTurnState, {
|
|
444
|
+
endTime: closeEndTime,
|
|
445
|
+
endReason: triggerEndReason,
|
|
446
|
+
sessionId, projectName, transcriptSource,
|
|
447
|
+
});
|
|
448
|
+
(0, projection_1.applyBreakdownDelta)(merged.classification.category_breakdown, r);
|
|
449
|
+
merged.turns.with_retry += r.turns_with_retry_inc;
|
|
450
|
+
merged.turns.one_shot += r.one_shot_inc;
|
|
451
|
+
// Gate per-event-type opt-ins: session-level aggregates above are
|
|
452
|
+
// ALWAYS applied; the wire records only push when their flag is on.
|
|
453
|
+
const finalQueue = turnEventsEnabled
|
|
454
|
+
? [...(newInternal.pending_turn_events ?? []), r.turn_event]
|
|
455
|
+
: (newInternal.pending_turn_events ?? []);
|
|
456
|
+
// Step events from this finalized turn — append to the step pending
|
|
457
|
+
// queue alongside any prior pending steps. The last entry in
|
|
458
|
+
// r.step_events carries `is_last_step=true` + the turn-bookend
|
|
459
|
+
// fields (turn_end_reason, turn_end_time, etc.).
|
|
460
|
+
const finalStepQueue = stepEventsEnabled && r.step_events.length > 0
|
|
461
|
+
? [...(newInternal.pending_step_events ?? []), ...r.step_events]
|
|
462
|
+
: (newInternal.pending_step_events ?? []);
|
|
463
|
+
newInternal = {
|
|
464
|
+
...newInternal,
|
|
465
|
+
...(finalQueue.length > 0 ? { pending_turn_events: finalQueue } : {}),
|
|
466
|
+
...(finalStepQueue.length > 0 ? { pending_step_events: finalStepQueue } : {}),
|
|
467
|
+
};
|
|
468
|
+
delete newInternal.current_turn;
|
|
469
|
+
log?.debug(`force-close: turn_index=${r.turn_event.turn_index} turn_id=${r.turn_event.turn_id} `
|
|
470
|
+
+ `end_reason=${triggerEndReason} category=${r.turn_event.category} `
|
|
471
|
+
+ `duration=${r.turn_event.duration}ms step_events=${r.step_events.length} `
|
|
472
|
+
+ `gates: turn=${turnEventsEnabled} step=${stepEventsEnabled}`);
|
|
473
|
+
}
|
|
474
|
+
else if (newInternal.current_turn !== undefined) {
|
|
475
|
+
// Open turn with 0 assistants (defensive: the slash-command empty
|
|
476
|
+
// leading turn case is already filtered upstream in
|
|
477
|
+
// isHumanTextUser, so this is rare). Drop without emitting — a
|
|
478
|
+
// 0-asst turn carries no analytic signal and would only pollute
|
|
479
|
+
// the backend's per-turn series. Decrement merged.user_turns so the
|
|
480
|
+
// session-level count stays in sync with the wire — an inflated
|
|
481
|
+
// user_turns vs zero corresponding turn event would confuse backend
|
|
482
|
+
// dashboards.
|
|
483
|
+
const dropped = newInternal.current_turn;
|
|
484
|
+
log?.debug(`force-close: skipping empty turn turn_index=${dropped.turn_index} `
|
|
485
|
+
+ `(assistant_messages=0) — no event emitted`);
|
|
486
|
+
if (merged.turns.user_count > 0) {
|
|
487
|
+
merged.turns.user_count -= 1;
|
|
488
|
+
}
|
|
489
|
+
newInternal = { ...newInternal };
|
|
490
|
+
delete newInternal.current_turn;
|
|
491
|
+
}
|
|
492
|
+
// ── 8. Compose snapshot, emit, then persist ─────────────────────────
|
|
493
|
+
// EMIT FIRST. Compose the full Event-shaped envelope. baseFields()
|
|
494
|
+
// populates id + session_id + project_name + user_email from the per-session
|
|
495
|
+
// actions.jsonl path; we add type + timestamp + the analytics body. This gives
|
|
496
|
+
// every IronBee event the same wire shape — collector schema stays uniform.
|
|
497
|
+
const actionsFile = `${projectDir}/.ironbee/sessions/${sessionId}/actions.jsonl`;
|
|
498
|
+
const base = (0, actions_1.baseFields)(actionsFile);
|
|
499
|
+
// Compose snapshot in canonical SessionAnalytics field order — per-trigger
|
|
500
|
+
// fields (`user_email`, `is_final`, `snapshot_at`, `offset`, `end_reason`)
|
|
501
|
+
// need to be slotted in their correct positions.
|
|
502
|
+
const snapshot = {
|
|
503
|
+
// Identity / provenance (flat)
|
|
504
|
+
session_id: merged.session_id,
|
|
505
|
+
project_name: merged.project_name,
|
|
506
|
+
// user_email is per-trigger composition (NOT in accumulated) — single
|
|
507
|
+
// source is `getUserEmail(sessionDir)` via baseFields. Omit when unset.
|
|
508
|
+
...(base.user_email !== undefined ? { user_email: base.user_email } : {}),
|
|
509
|
+
schema_version: types_1.SCHEMA_VERSION,
|
|
510
|
+
transcript_source: merged.transcript_source,
|
|
511
|
+
// Snapshot metadata
|
|
512
|
+
is_final: isFinal,
|
|
513
|
+
snapshot_at: new Date().toISOString(),
|
|
514
|
+
offset: effectiveOffset,
|
|
515
|
+
end_reason: isFinal ? endReason : undefined,
|
|
516
|
+
// Time bounds (flat ISO strings)
|
|
517
|
+
start_time: merged.start_time,
|
|
518
|
+
last_activity_time: merged.last_activity_time,
|
|
519
|
+
// Logical groups
|
|
520
|
+
time: merged.time,
|
|
521
|
+
turns: merged.turns,
|
|
522
|
+
classification: merged.classification,
|
|
523
|
+
usage: merged.usage,
|
|
524
|
+
// Per-model + user_messages
|
|
525
|
+
models: merged.models,
|
|
526
|
+
user_messages: merged.user_messages,
|
|
527
|
+
// Per-tool breakdown maps (top-level)
|
|
528
|
+
tools: merged.tools,
|
|
529
|
+
mcp_servers: merged.mcp_servers,
|
|
530
|
+
skills: merged.skills,
|
|
531
|
+
sub_agents: merged.sub_agents,
|
|
532
|
+
bash_binaries: merged.bash_binaries,
|
|
533
|
+
// Group rollups
|
|
534
|
+
tool_meta: merged.tool_meta,
|
|
535
|
+
code_changes: merged.code_changes,
|
|
536
|
+
errors: merged.errors,
|
|
537
|
+
user_activity: merged.user_activity,
|
|
538
|
+
context_tokens: merged.context_tokens,
|
|
539
|
+
process_errors: merged.process_errors,
|
|
540
|
+
};
|
|
541
|
+
const eventPayload = {
|
|
542
|
+
...base,
|
|
543
|
+
// Deterministic Event.id — re-emits at the same `(session_id, offset)`
|
|
544
|
+
// produce the same id so backend treats them as updates to the same
|
|
545
|
+
// logical checkpoint (interim → final, crash-recovery re-project,
|
|
546
|
+
// concurrent-worker race). `snapshot_at` lives in the body for
|
|
547
|
+
// collision tiebreaking, not in the id.
|
|
548
|
+
id: (0, projection_1.deriveSessionAnalyticsEventId)(merged.session_id, effectiveOffset),
|
|
549
|
+
type: actions_1.EventType.SESSION_ANALYTICS,
|
|
550
|
+
timestamp: Date.now(),
|
|
551
|
+
analytics: snapshot,
|
|
552
|
+
};
|
|
553
|
+
const wireBytes = Buffer.byteLength(JSON.stringify(eventPayload), "utf-8");
|
|
554
|
+
const totalToolCalls = Object.values(snapshot.tools).reduce((a, t) => a + t.count, 0);
|
|
555
|
+
const modelLabels = Object.entries(snapshot.models)
|
|
556
|
+
.sort((a, b) => b[1].count - a[1].count)
|
|
557
|
+
.map(([m, u]) => `${m}:${u.count}@$${u.cost_usd.toFixed(4)}`)
|
|
558
|
+
.join(",") || "none";
|
|
559
|
+
// Sort by output_size (best proxy for "tool footprint" / context
|
|
560
|
+
// contribution since per-tool cost isn't tracked).
|
|
561
|
+
const toolLabels = Object.entries(snapshot.tools)
|
|
562
|
+
.sort((a, b) => b[1].output_size - a[1].output_size)
|
|
563
|
+
.slice(0, 5)
|
|
564
|
+
.map(([n, u]) => `${n}:${u.count}/err${u.errors}/in:${u.input_size}/out:${u.output_size}`)
|
|
565
|
+
.join(",") || "none";
|
|
566
|
+
const mcpLabels = Object.entries(snapshot.mcp_servers)
|
|
567
|
+
.sort((a, b) => b[1].output_size - a[1].output_size)
|
|
568
|
+
.map(([n, u]) => `${n}:${u.count}/err${u.errors}/in:${u.input_size}/out:${u.output_size}`)
|
|
569
|
+
.join(",") || "none";
|
|
570
|
+
const errCount = Object.values(snapshot.process_errors.items)
|
|
571
|
+
.reduce((a, e) => a + e.count, 0);
|
|
572
|
+
const errSignatures = Object.keys(snapshot.process_errors.items).length;
|
|
573
|
+
const topCategories = Object.entries(snapshot.classification.category_breakdown)
|
|
574
|
+
.sort((a, b) => b[1].turns - a[1].turns)
|
|
575
|
+
.slice(0, 5)
|
|
576
|
+
.map(([n, c]) => `${n}:${c.turns}`)
|
|
577
|
+
.join(",") || "none";
|
|
578
|
+
log?.info(`submit: type=session_analytics event_id=${eventPayload.id} offset=${effectiveOffset} `
|
|
579
|
+
+ `is_final=${isFinal}${endReason ? ` end_reason=${endReason}` : ""} `
|
|
580
|
+
+ `wire_bytes=${wireBytes} user_count=${snapshot.turns.user_count} `
|
|
581
|
+
+ `duration_min=${snapshot.time.duration_minutes} active_min=${snapshot.time.active_minutes} `
|
|
582
|
+
+ `idle_min=${snapshot.time.idle_minutes} tool_calls=${totalToolCalls} `
|
|
583
|
+
+ `session_type=${snapshot.classification.session_type} categories=${topCategories} `
|
|
584
|
+
+ `retries=${snapshot.turns.with_retry} one_shot=${snapshot.turns.one_shot} `
|
|
585
|
+
+ `tokens=in:${snapshot.usage.input_tokens}/out:${snapshot.usage.output_tokens}/`
|
|
586
|
+
+ `cache_w:${snapshot.usage.cache_creation_tokens}/cache_r:${snapshot.usage.cache_read_tokens} `
|
|
587
|
+
+ `cost_usd=${snapshot.usage.cost_usd.toFixed(4)} `
|
|
588
|
+
+ `models=${modelLabels} mcp_servers=${mcpLabels} top_tools=${toolLabels}`
|
|
589
|
+
+ (snapshot.process_errors.has ? ` process_errors=${errCount}/${errSignatures}` : ""));
|
|
590
|
+
let submitted = false;
|
|
591
|
+
try {
|
|
592
|
+
// Direct HTTPS POST to collector — bypasses the 4 KB queue line
|
|
593
|
+
// limit. `sendEventsBatchToCollector` no-ops silently when the
|
|
594
|
+
// collector is disabled / not configured. Throws on network /
|
|
595
|
+
// timeout / non-2xx responses; we leave state un-advanced so the
|
|
596
|
+
// next trigger retries. Single-event array here keeps the wire
|
|
597
|
+
// shape uniform with the turn / step batch path so the collector
|
|
598
|
+
// sees one consistent endpoint contract.
|
|
599
|
+
//
|
|
600
|
+
// Pass the once-resolved `collectorTarget` so the helper does not
|
|
601
|
+
// re-load config from disk (mirrors the drain helpers below).
|
|
602
|
+
await (0, collector_1.sendEventsBatchToCollector)([eventPayload], sessionId, projectDir, collectorTarget);
|
|
603
|
+
submitted = true;
|
|
604
|
+
}
|
|
605
|
+
catch (e) {
|
|
606
|
+
logger_1.logger.debug(`analytics emit: send failed: ${e instanceof Error ? e.message : e}`);
|
|
607
|
+
log?.error(`submit: failed event_id=${eventPayload.id}: ${e instanceof Error ? e.message : e}`);
|
|
608
|
+
}
|
|
609
|
+
// ── 8b. Drain pending_turn_events + pending_step_events AFTER ────────
|
|
610
|
+
// session_analytics. Each SessionTurnEvent / SessionTurnStepEvent
|
|
611
|
+
// goes out as its own POST (deterministic turn_id / step_id →
|
|
612
|
+
// backend LWW deduplicates re-emits). Independent of the
|
|
613
|
+
// session_analytics submission: failed entries stay in their
|
|
614
|
+
// respective queue for retry next trigger; succeeded entries
|
|
615
|
+
// are removed. We only attempt drain when session_analytics
|
|
616
|
+
// succeeded — if the collector is unreachable for the primary
|
|
617
|
+
// event, no point hammering it with N+M more requests; the
|
|
618
|
+
// next trigger will retry the whole batch. Same helpers as the
|
|
619
|
+
// §7 idempotency-skip path uses.
|
|
620
|
+
const pendingTurns = newInternal.pending_turn_events ?? [];
|
|
621
|
+
const pendingSteps = newInternal.pending_step_events ?? [];
|
|
622
|
+
if (submitted && pendingTurns.length > 0) {
|
|
623
|
+
if (turnEventsEnabled) {
|
|
624
|
+
const drain = await drainPendingTurns(pendingTurns, base, sessionId, projectDir, log, "submit", collectorTarget);
|
|
625
|
+
log?.info(`turn-events: sent=${drain.sent}/${pendingTurns.length} retained=${drain.remaining.length}`);
|
|
626
|
+
newInternal = withPendingTurnEvents(newInternal, drain.remaining);
|
|
627
|
+
}
|
|
628
|
+
else {
|
|
629
|
+
// Defense-in-depth: stale queue from a prior toggle-true state.
|
|
630
|
+
// Drop without sending so the contract holds; clear the queue
|
|
631
|
+
// so subsequent triggers don't re-process.
|
|
632
|
+
log?.info(`gating: emitTurnEvents=false → dropping ${pendingTurns.length} stale pending turn event(s) without sending`);
|
|
633
|
+
newInternal = withPendingTurnEvents(newInternal, []);
|
|
634
|
+
}
|
|
635
|
+
}
|
|
636
|
+
if (submitted && pendingSteps.length > 0) {
|
|
637
|
+
if (stepEventsEnabled) {
|
|
638
|
+
const stepDrain = await drainPendingSteps(pendingSteps, base, sessionId, projectDir, log, "submit", collectorTarget);
|
|
639
|
+
log?.info(`step-events: sent=${stepDrain.sent}/${pendingSteps.length} retained=${stepDrain.remaining.length}`);
|
|
640
|
+
newInternal = withPendingStepEvents(newInternal, stepDrain.remaining);
|
|
641
|
+
}
|
|
642
|
+
else {
|
|
643
|
+
log?.info(`gating: emitStepEvents=false → dropping ${pendingSteps.length} stale pending step event(s) without sending`);
|
|
644
|
+
newInternal = withPendingStepEvents(newInternal, []);
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
// Per-API-request drain — same gate/drop pattern as turn/step. Only attempt
|
|
648
|
+
// when session_analytics succeeded; on submit failure leave queue intact.
|
|
649
|
+
const pendingApiRequests = newInternal.pending_api_request_events ?? [];
|
|
650
|
+
if (submitted && pendingApiRequests.length > 0) {
|
|
651
|
+
if (apiRequestEventsEnabled) {
|
|
652
|
+
const apiDrain = await drainPendingApiRequests(pendingApiRequests, base, sessionId, projectDir, log, "submit", collectorTarget);
|
|
653
|
+
log?.info(`api-request-events: sent=${apiDrain.sent}/${pendingApiRequests.length} retained=${apiDrain.remaining.length}`);
|
|
654
|
+
newInternal = withPendingApiRequestEvents(newInternal, apiDrain.remaining);
|
|
655
|
+
}
|
|
656
|
+
else {
|
|
657
|
+
log?.info(`gating: emitApiRequestEvents=false → dropping ${pendingApiRequests.length} stale pending api_request event(s) without sending`);
|
|
658
|
+
newInternal = withPendingApiRequestEvents(newInternal, []);
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
// (When submitted is false, all three queues stay intact for next trigger.)
|
|
662
|
+
if (submitted) {
|
|
663
|
+
// THEN write state. A crash between send and write means the next
|
|
664
|
+
// trigger re-derives the same delta and re-sends — backend's LWW
|
|
665
|
+
// overwrites with identical content (idempotent). No data loss.
|
|
666
|
+
const persistedState = {
|
|
667
|
+
session_id: sessionId,
|
|
668
|
+
transcript_path: transcriptPath,
|
|
669
|
+
transcript_inode: cameFromCarveOut ? state.transcript_inode : stat.inode,
|
|
670
|
+
transcript_size_at_last_read: cameFromCarveOut ? state.transcript_size_at_last_read : readSize,
|
|
671
|
+
transcript_first_kb_sha256: cameFromCarveOut ? state.transcript_first_kb_sha256 : currentFirstKbHash,
|
|
672
|
+
offset: effectiveOffset,
|
|
673
|
+
last_read_at: new Date().toISOString(),
|
|
674
|
+
last_emitted_offset: effectiveOffset,
|
|
675
|
+
last_emitted_is_final: isFinal,
|
|
676
|
+
accumulated: merged,
|
|
677
|
+
internal: newInternal,
|
|
678
|
+
};
|
|
679
|
+
(0, state_1.writeState)(projectDir, sessionId, persistedState);
|
|
680
|
+
log?.info(`state persisted: offset=${effectiveOffset} last_emitted_offset=${effectiveOffset} last_emitted_is_final=${isFinal}`);
|
|
681
|
+
return { status: "emitted", reason: isFinal ? "final snapshot" : "interim snapshot", offset: effectiveOffset };
|
|
682
|
+
}
|
|
683
|
+
return { status: "error", reason: "submit failed; state not advanced" };
|
|
684
|
+
}
|
|
685
|
+
/**
|
|
686
|
+
* Default per-POST batch size for turn / step drains. Used when
|
|
687
|
+
* `collector.batchSize` isn't configured. Mirrors the default used by
|
|
688
|
+
* the queue's `send-event` handler (`DEFAULT_BATCH_SIZE = 100`).
|
|
689
|
+
*
|
|
690
|
+
* Why batched POSTs: a 100-turn (or 100-step) drain at 1 event per POST
|
|
691
|
+
* = 100 RTTs ≈ 20s sequential or ~2.5s with concurrency 8 — easily
|
|
692
|
+
* exceeds the host's SessionEnd hook timeout. Batching collapses that
|
|
693
|
+
* to 1 RTT (~200ms) for the same payload size, fitting comfortably in
|
|
694
|
+
* any reasonable hook budget.
|
|
695
|
+
*/
|
|
696
|
+
const DEFAULT_DRAIN_BATCH_SIZE = 100;
|
|
697
|
+
/** Resolves the configured `collector.batchSize` or falls back to the default. */
|
|
698
|
+
function resolveBatchSize(target) {
|
|
699
|
+
if (typeof target.batchSize === "number" && target.batchSize >= 1) {
|
|
700
|
+
return Math.floor(target.batchSize);
|
|
701
|
+
}
|
|
702
|
+
return DEFAULT_DRAIN_BATCH_SIZE;
|
|
703
|
+
}
|
|
704
|
+
/**
|
|
705
|
+
* Batched POST of every queued turn event to the collector. Events are
|
|
706
|
+
* chunked into {@link resolveBatchSize}-sized slices and each slice is
|
|
707
|
+
* sent as a SINGLE HTTP POST containing the full array — collapses an
|
|
708
|
+
* N-event drain from N RTTs to ~`ceil(N/batchSize)` RTTs.
|
|
709
|
+
*
|
|
710
|
+
* Per-batch failure granularity: if a batch POST fails (network /
|
|
711
|
+
* timeout / non-2xx), every event in that batch is retained for the
|
|
712
|
+
* next trigger. Backend deduplicates via deterministic `turn_id` so
|
|
713
|
+
* re-sending events that were partially delivered (collector accepted
|
|
714
|
+
* some but the response was lost) is safe.
|
|
715
|
+
*
|
|
716
|
+
* Pure with respect to `state.json` — caller is responsible for
|
|
717
|
+
* persisting `remaining` back into `state.internal.pending_turn_events`.
|
|
718
|
+
*/
|
|
719
|
+
async function drainPendingTurns(pending, base, sessionId, projectDir, log, logPrefix, target) {
|
|
720
|
+
const events = pending.map((turn) => ({
|
|
721
|
+
...base,
|
|
722
|
+
// Deterministic — derived from turn_id (sha256(session|index|start)).
|
|
723
|
+
// Re-emits of the same turn produce the same Event.id.
|
|
724
|
+
id: (0, projection_1.deriveTurnEventId)(sessionId, turn.turn_id),
|
|
725
|
+
type: actions_1.EventType.SESSION_TURN,
|
|
726
|
+
timestamp: Date.now(),
|
|
727
|
+
turn,
|
|
728
|
+
}));
|
|
729
|
+
// Resolve batch size from collector config (default 100). When the
|
|
730
|
+
// collector isn't configured, sendEventsBatchToCollector is a silent
|
|
731
|
+
// no-op below — but we still chunk by some sensible default to keep
|
|
732
|
+
// the loop bounded.
|
|
733
|
+
const batchSize = target !== null ? resolveBatchSize(target) : DEFAULT_DRAIN_BATCH_SIZE;
|
|
734
|
+
const remaining = [];
|
|
735
|
+
let sent = 0;
|
|
736
|
+
for (let i = 0; i < events.length; i += batchSize) {
|
|
737
|
+
const batchEnd = Math.min(i + batchSize, events.length);
|
|
738
|
+
const batch = events.slice(i, batchEnd);
|
|
739
|
+
const batchPending = pending.slice(i, batchEnd);
|
|
740
|
+
try {
|
|
741
|
+
await (0, collector_1.sendEventsBatchToCollector)(batch, sessionId, projectDir, target);
|
|
742
|
+
sent += batch.length;
|
|
743
|
+
log?.debug(`${logPrefix}: type=session_turn_analytics batch_count=${batch.length} `
|
|
744
|
+
+ `first_turn_index=${batchPending[0].turn_index}`);
|
|
745
|
+
}
|
|
746
|
+
catch (e) {
|
|
747
|
+
// Whole-batch retain on failure — backend dedupes via turn_id
|
|
748
|
+
// so re-sending events that may have partially landed is safe.
|
|
749
|
+
for (const t of batchPending) {
|
|
750
|
+
remaining.push(t);
|
|
751
|
+
}
|
|
752
|
+
logger_1.logger.debug(`analytics emit: turn batch send failed: ${e instanceof Error ? e.message : e}`);
|
|
753
|
+
log?.error(`${logPrefix} failed: type=session_turn_analytics batch_count=${batch.length}: `
|
|
754
|
+
+ `${e instanceof Error ? e.message : e}`);
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
return { sent, remaining };
|
|
758
|
+
}
|
|
759
|
+
/**
|
|
760
|
+
* Apply a `DrainResult` back onto an `internal` map. Returns a fresh
|
|
761
|
+
* internal copy with `pending_turn_events` either replaced (still has
|
|
762
|
+
* remaining items) or fully removed (queue cleared).
|
|
763
|
+
*/
|
|
764
|
+
function withPendingTurnEvents(internal, remaining) {
|
|
765
|
+
if (remaining.length > 0) {
|
|
766
|
+
return { ...internal, pending_turn_events: remaining };
|
|
767
|
+
}
|
|
768
|
+
const next = { ...internal };
|
|
769
|
+
delete next.pending_turn_events;
|
|
770
|
+
return next;
|
|
771
|
+
}
|
|
772
|
+
/**
|
|
773
|
+
* Batched POST of every queued step event. Mirrors {@link drainPendingTurns}
|
|
774
|
+
* one level deeper. Step queues are typically larger (N×K entries — N
|
|
775
|
+
* turns × K steps/turn) so batching is the bigger win here: a 100-step
|
|
776
|
+
* drain becomes a single ~200ms POST instead of 100 round-trips.
|
|
777
|
+
*
|
|
778
|
+
* Per-batch failure: every event in the failed batch is retained for
|
|
779
|
+
* retry. Backend dedupes via deterministic `step_id` so re-sends are
|
|
780
|
+
* idempotent.
|
|
781
|
+
*/
|
|
782
|
+
async function drainPendingSteps(pending, base, sessionId, projectDir, log, logPrefix, target) {
|
|
783
|
+
const events = pending.map((step) => ({
|
|
784
|
+
...base,
|
|
785
|
+
// Deterministic — derived from step_id (sha256(turn|index|start)).
|
|
786
|
+
// Re-emits of the same step produce the same Event.id.
|
|
787
|
+
id: (0, projection_1.deriveStepEventId)(sessionId, step.step_id),
|
|
788
|
+
type: actions_1.EventType.SESSION_TURN_STEP,
|
|
789
|
+
timestamp: Date.now(),
|
|
790
|
+
step,
|
|
791
|
+
}));
|
|
792
|
+
const batchSize = target !== null ? resolveBatchSize(target) : DEFAULT_DRAIN_BATCH_SIZE;
|
|
793
|
+
const remaining = [];
|
|
794
|
+
let sent = 0;
|
|
795
|
+
for (let i = 0; i < events.length; i += batchSize) {
|
|
796
|
+
const batchEnd = Math.min(i + batchSize, events.length);
|
|
797
|
+
const batch = events.slice(i, batchEnd);
|
|
798
|
+
const batchPending = pending.slice(i, batchEnd);
|
|
799
|
+
try {
|
|
800
|
+
await (0, collector_1.sendEventsBatchToCollector)(batch, sessionId, projectDir, target);
|
|
801
|
+
sent += batch.length;
|
|
802
|
+
log?.debug(`${logPrefix}: type=session_turn_step_analytics batch_count=${batch.length} `
|
|
803
|
+
+ `first=(turn_index=${batchPending[0].turn_index},step_index=${batchPending[0].step_index})`);
|
|
804
|
+
}
|
|
805
|
+
catch (e) {
|
|
806
|
+
for (const s of batchPending) {
|
|
807
|
+
remaining.push(s);
|
|
808
|
+
}
|
|
809
|
+
logger_1.logger.debug(`analytics emit: step batch send failed: ${e instanceof Error ? e.message : e}`);
|
|
810
|
+
log?.error(`${logPrefix} failed: type=session_turn_step_analytics batch_count=${batch.length}: `
|
|
811
|
+
+ `${e instanceof Error ? e.message : e}`);
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
return { sent, remaining };
|
|
815
|
+
}
|
|
816
|
+
/**
|
|
817
|
+
* Same shape as {@link withPendingTurnEvents} but for the step queue.
|
|
818
|
+
*/
|
|
819
|
+
function withPendingStepEvents(internal, remaining) {
|
|
820
|
+
if (remaining.length > 0) {
|
|
821
|
+
return { ...internal, pending_step_events: remaining };
|
|
822
|
+
}
|
|
823
|
+
const next = { ...internal };
|
|
824
|
+
delete next.pending_step_events;
|
|
825
|
+
return next;
|
|
826
|
+
}
|
|
827
|
+
/**
|
|
828
|
+
* Batched POST of every queued api_request body. Each batch wraps the
|
|
829
|
+
* body-only `APIRequestAnalytics` records with the {@link Event} envelope
|
|
830
|
+
* (id from `api.id` — transcript line uuid, deterministic across re-
|
|
831
|
+
* projections; type=`api_request`; timestamp from the body's
|
|
832
|
+
* `timestamp_ms` so the wire timestamp reflects when the API call
|
|
833
|
+
* completed — NOT when the wire envelope was constructed).
|
|
834
|
+
*
|
|
835
|
+
* Per-batch failure: every record in the failed batch is retained for
|
|
836
|
+
* retry. Backend dedup key is `(session_id, request_id)` for successes;
|
|
837
|
+
* failures (request_id=null) rely on (session_id, id) best-effort.
|
|
838
|
+
*/
|
|
839
|
+
async function drainPendingApiRequests(pending, base, sessionId, projectDir, log, logPrefix, target) {
|
|
840
|
+
const events = pending.map((api) => ({
|
|
841
|
+
...base,
|
|
842
|
+
// Event.id is the transcript line's deterministic uuid (carried
|
|
843
|
+
// through projection). Re-projections of the same bytes produce
|
|
844
|
+
// identical ids — backend can dedup on `(session_id, id)`.
|
|
845
|
+
id: api.id,
|
|
846
|
+
type: actions_1.EventType.API_REQUEST,
|
|
847
|
+
timestamp: api.timestamp_ms,
|
|
848
|
+
request_id: api.request_id,
|
|
849
|
+
success: api.success,
|
|
850
|
+
error: api.error,
|
|
851
|
+
status_code: api.status_code,
|
|
852
|
+
model: api.model,
|
|
853
|
+
speed: api.speed,
|
|
854
|
+
input_tokens: api.input_tokens,
|
|
855
|
+
output_tokens: api.output_tokens,
|
|
856
|
+
cache_read_tokens: api.cache_read_tokens,
|
|
857
|
+
cache_creation_tokens: api.cache_creation_tokens,
|
|
858
|
+
cost_usd: api.cost_usd,
|
|
859
|
+
duration: api.duration,
|
|
860
|
+
}));
|
|
861
|
+
const batchSize = target !== null ? resolveBatchSize(target) : DEFAULT_DRAIN_BATCH_SIZE;
|
|
862
|
+
const remaining = [];
|
|
863
|
+
let sent = 0;
|
|
864
|
+
for (let i = 0; i < events.length; i += batchSize) {
|
|
865
|
+
const batchEnd = Math.min(i + batchSize, events.length);
|
|
866
|
+
const batch = events.slice(i, batchEnd);
|
|
867
|
+
const batchPending = pending.slice(i, batchEnd);
|
|
868
|
+
try {
|
|
869
|
+
await (0, collector_1.sendEventsBatchToCollector)(batch, sessionId, projectDir, target);
|
|
870
|
+
sent += batch.length;
|
|
871
|
+
log?.debug(`${logPrefix}: type=api_request batch_count=${batch.length} `
|
|
872
|
+
+ `first_request_id=${batchPending[0].request_id ?? "<null>"} `
|
|
873
|
+
+ `success_count=${batchPending.filter((a) => a.success).length}`);
|
|
874
|
+
}
|
|
875
|
+
catch (e) {
|
|
876
|
+
for (const a of batchPending) {
|
|
877
|
+
remaining.push(a);
|
|
878
|
+
}
|
|
879
|
+
logger_1.logger.debug(`analytics emit: api_request batch send failed: ${e instanceof Error ? e.message : e}`);
|
|
880
|
+
log?.error(`${logPrefix} failed: type=api_request batch_count=${batch.length}: `
|
|
881
|
+
+ `${e instanceof Error ? e.message : e}`);
|
|
882
|
+
}
|
|
883
|
+
}
|
|
884
|
+
return { sent, remaining };
|
|
885
|
+
}
|
|
886
|
+
/**
|
|
887
|
+
* Same shape as {@link withPendingTurnEvents} but for the api_request queue.
|
|
888
|
+
*/
|
|
889
|
+
function withPendingApiRequestEvents(internal, remaining) {
|
|
890
|
+
if (remaining.length > 0) {
|
|
891
|
+
return { ...internal, pending_api_request_events: remaining };
|
|
892
|
+
}
|
|
893
|
+
const next = { ...internal };
|
|
894
|
+
delete next.pending_api_request_events;
|
|
895
|
+
return next;
|
|
896
|
+
}
|
|
897
|
+
/** Exposed for tests — never call from prod code. */
|
|
898
|
+
exports.__test = {
|
|
899
|
+
emptyAccumulated: state_1.emptyAccumulated,
|
|
900
|
+
};
|
|
901
|
+
//# sourceMappingURL=emit.js.map
|