sensorium-mcp 3.0.3 → 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/dist/dashboard/routes/data.d.ts.map +1 -1
  2. package/dist/dashboard/routes/data.js +2 -1
  3. package/dist/dashboard/routes/data.js.map +1 -1
  4. package/dist/dashboard/routes/threads.js +1 -1
  5. package/dist/dashboard/routes/threads.js.map +1 -1
  6. package/dist/dashboard/routes.d.ts.map +1 -1
  7. package/dist/dashboard/routes.js +1 -3
  8. package/dist/dashboard/routes.js.map +1 -1
  9. package/dist/data/memory/migration-runner.d.ts +1 -1
  10. package/dist/data/memory/migration-runner.d.ts.map +1 -1
  11. package/dist/data/memory/migration-runner.js +59 -3
  12. package/dist/data/memory/migration-runner.js.map +1 -1
  13. package/dist/data/memory/schema-ddl.d.ts +1 -1
  14. package/dist/data/memory/schema-ddl.d.ts.map +1 -1
  15. package/dist/data/memory/schema-ddl.js +2 -1
  16. package/dist/data/memory/schema-ddl.js.map +1 -1
  17. package/dist/data/memory/thread-registry.js +1 -1
  18. package/dist/data/memory/thread-registry.js.map +1 -1
  19. package/dist/http-server.d.ts.map +1 -1
  20. package/dist/http-server.js +1 -9
  21. package/dist/http-server.js.map +1 -1
  22. package/dist/index.js +3 -6
  23. package/dist/index.js.map +1 -1
  24. package/dist/server/factory.js +1 -1
  25. package/dist/server/factory.js.map +1 -1
  26. package/dist/services/agent-spawn.service.d.ts +7 -1
  27. package/dist/services/agent-spawn.service.d.ts.map +1 -1
  28. package/dist/services/agent-spawn.service.js +69 -45
  29. package/dist/services/agent-spawn.service.js.map +1 -1
  30. package/dist/services/consolidation.service.d.ts.map +1 -1
  31. package/dist/services/consolidation.service.js +49 -35
  32. package/dist/services/consolidation.service.js.map +1 -1
  33. package/dist/services/keeper.service.d.ts +21 -0
  34. package/dist/services/keeper.service.d.ts.map +1 -0
  35. package/dist/services/keeper.service.js +195 -0
  36. package/dist/services/keeper.service.js.map +1 -0
  37. package/dist/services/maintenance-signal.d.ts +2 -0
  38. package/dist/services/maintenance-signal.d.ts.map +1 -1
  39. package/dist/services/maintenance-signal.js +7 -1
  40. package/dist/services/maintenance-signal.js.map +1 -1
  41. package/dist/services/process.service.d.ts +19 -2
  42. package/dist/services/process.service.d.ts.map +1 -1
  43. package/dist/services/process.service.js +104 -10
  44. package/dist/services/process.service.js.map +1 -1
  45. package/dist/services/reconnect-snapshot.service.d.ts.map +1 -1
  46. package/dist/services/reconnect-snapshot.service.js +20 -3
  47. package/dist/services/reconnect-snapshot.service.js.map +1 -1
  48. package/dist/services/thread-lifecycle.service.d.ts +5 -0
  49. package/dist/services/thread-lifecycle.service.d.ts.map +1 -1
  50. package/dist/services/thread-lifecycle.service.js +33 -8
  51. package/dist/services/thread-lifecycle.service.js.map +1 -1
  52. package/dist/services/worker-cleanup.service.d.ts +14 -1
  53. package/dist/services/worker-cleanup.service.d.ts.map +1 -1
  54. package/dist/services/worker-cleanup.service.js +48 -27
  55. package/dist/services/worker-cleanup.service.js.map +1 -1
  56. package/dist/sessions.d.ts +0 -5
  57. package/dist/sessions.d.ts.map +1 -1
  58. package/dist/sessions.js +0 -7
  59. package/dist/sessions.js.map +1 -1
  60. package/dist/stdio-server.d.ts.map +1 -1
  61. package/dist/stdio-server.js +1 -7
  62. package/dist/stdio-server.js.map +1 -1
  63. package/dist/tools/delegate-tool.d.ts.map +1 -1
  64. package/dist/tools/delegate-tool.js +2 -2
  65. package/dist/tools/delegate-tool.js.map +1 -1
  66. package/dist/tools/session-tools.js +1 -1
  67. package/dist/tools/session-tools.js.map +1 -1
  68. package/dist/tools/start-session-tool.d.ts.map +1 -1
  69. package/dist/tools/start-session-tool.js +8 -9
  70. package/dist/tools/start-session-tool.js.map +1 -1
  71. package/dist/tools/wait/message-processing.d.ts.map +1 -1
  72. package/dist/tools/wait/message-processing.js +28 -0
  73. package/dist/tools/wait/message-processing.js.map +1 -1
  74. package/dist/tools/wait/poll-loop.js +1 -1
  75. package/dist/tools/wait/poll-loop.js.map +1 -1
  76. package/package.json +1 -1
  77. package/dist/tools/thread-lifecycle.d.ts +0 -6
  78. package/dist/tools/thread-lifecycle.d.ts.map +0 -1
  79. package/dist/tools/thread-lifecycle.js +0 -6
  80. package/dist/tools/thread-lifecycle.js.map +0 -1
  81. package/supervisor/config.go +0 -253
  82. package/supervisor/config_test.go +0 -78
  83. package/supervisor/go.mod +0 -15
  84. package/supervisor/go.sum +0 -20
  85. package/supervisor/health.go +0 -433
  86. package/supervisor/health_test.go +0 -93
  87. package/supervisor/keeper.go +0 -309
  88. package/supervisor/keeper_test.go +0 -27
  89. package/supervisor/lock.go +0 -57
  90. package/supervisor/lock_test.go +0 -54
  91. package/supervisor/log.go +0 -195
  92. package/supervisor/log_test.go +0 -125
  93. package/supervisor/main.go +0 -461
  94. package/supervisor/main_test.go +0 -130
  95. package/supervisor/notify.go +0 -53
  96. package/supervisor/process.go +0 -294
  97. package/supervisor/process_test.go +0 -108
  98. package/supervisor/process_unix.go +0 -14
  99. package/supervisor/process_windows.go +0 -15
  100. package/supervisor/secrets.go +0 -95
  101. package/supervisor/secrets_securevault_test.go +0 -98
  102. package/supervisor/secrets_test.go +0 -119
  103. package/supervisor/self_update.go +0 -282
  104. package/supervisor/self_update_test.go +0 -177
  105. package/supervisor/service_restart_stub.go +0 -9
  106. package/supervisor/service_restart_windows.go +0 -63
  107. package/supervisor/service_stub.go +0 -15
  108. package/supervisor/service_windows.go +0 -194
  109. package/supervisor/update_state.go +0 -264
  110. package/supervisor/update_state_test.go +0 -306
  111. package/supervisor/updater.go +0 -613
  112. package/supervisor/updater_test.go +0 -64
@@ -1,613 +0,0 @@
1
- package main
2
-
3
- import (
4
- "context"
5
- "encoding/json"
6
- "fmt"
7
- "io"
8
- "net/http"
9
- "os"
10
- "os/exec"
11
- "path/filepath"
12
- "runtime"
13
- "strings"
14
- "syscall"
15
- "time"
16
- )
17
-
18
- const registryURL = "https://registry.npmjs.org/sensorium-mcp/latest"
19
- const supervisorReleaseURL = "https://api.github.com/repos/andriyshevchenko/remote-copilot-mcp/releases/tags/supervisor-latest"
20
-
21
- var (
22
- notifyUpdaterOperator = NotifyOperator
23
- mcpUpdateReadyPollInterval = 3 * time.Second
24
- mcpUpdateReadyTimeout = 60 * time.Second
25
- )
26
-
27
- // Updater checks the npm registry for new versions and performs updates.
28
- type Updater struct {
29
- cfg Config
30
- mcp *MCPClient
31
- log *Logger
32
- state *UpdateStateStore
33
- startAt time.Time
34
- cancel context.CancelFunc
35
- done chan struct{}
36
- }
37
-
38
- func NewUpdater(cfg Config, mcp *MCPClient, log *Logger) *Updater {
39
- return &Updater{
40
- cfg: cfg,
41
- mcp: mcp,
42
- log: log,
43
- state: NewUpdateStateStore(cfg.Paths.UpdateState, log),
44
- startAt: time.Now(),
45
- done: make(chan struct{}),
46
- }
47
- }
48
-
49
- // Start begins the update check loop.
50
- func (u *Updater) Start() {
51
- ctx, cancel := context.WithCancel(context.Background())
52
- u.cancel = cancel
53
- go u.run(ctx)
54
- }
55
-
56
- // Stop signals the updater to shut down and waits.
57
- func (u *Updater) Stop() {
58
- if u.cancel != nil {
59
- u.cancel()
60
- }
61
- <-u.done
62
- }
63
-
64
- func (u *Updater) run(ctx context.Context) {
65
- defer close(u.done)
66
- u.log.Info("Updater started (mode=%s)", u.cfg.Mode)
67
-
68
- // In development mode, check every PollInterval.
69
- // In production, check once per day at PollAtHour.
70
- for {
71
- var sleepDuration time.Duration
72
- if u.cfg.Mode == "development" {
73
- sleepDuration = u.cfg.PollInterval
74
- } else {
75
- sleepDuration = u.timeUntilNextPoll()
76
- }
77
- u.log.Debug("Updater: next version check in %v", sleepDuration.Round(time.Second))
78
-
79
- select {
80
- case <-ctx.Done():
81
- return
82
- case <-time.After(sleepDuration):
83
- }
84
-
85
- if ctx.Err() != nil {
86
- return
87
- }
88
-
89
- u.checkAndUpdate(ctx)
90
- if ctx.Err() != nil {
91
- return
92
- }
93
- u.checkSupervisorUpdate(ctx)
94
- }
95
- }
96
-
97
- func (u *Updater) timeUntilNextPoll() time.Duration {
98
- now := time.Now()
99
- next := time.Date(now.Year(), now.Month(), now.Day(), u.cfg.PollAtHour, 0, 0, 0, now.Location())
100
- if next.Before(now) {
101
- next = next.Add(24 * time.Hour)
102
- }
103
- return time.Until(next)
104
- }
105
-
106
- // getRemoteVersion fetches the latest version from npm registry.
107
- func (u *Updater) getRemoteVersion(ctx context.Context) (string, error) {
108
- ctx2, cancel := context.WithTimeout(ctx, 15*time.Second)
109
- defer cancel()
110
-
111
- req, err := http.NewRequestWithContext(ctx2, "GET", registryURL, nil)
112
- if err != nil {
113
- return "", err
114
- }
115
-
116
- resp, err := http.DefaultClient.Do(req)
117
- if err != nil {
118
- return "", err
119
- }
120
- defer resp.Body.Close()
121
-
122
- if resp.StatusCode != 200 {
123
- return "", fmt.Errorf("npm registry HTTP %d", resp.StatusCode)
124
- }
125
-
126
- var pkg struct {
127
- Version string `json:"version"`
128
- }
129
- if err := json.NewDecoder(resp.Body).Decode(&pkg); err != nil {
130
- return "", err
131
- }
132
- return pkg.Version, nil
133
- }
134
-
135
- // getLocalVersion reads the current version from the version file.
136
- func (u *Updater) getLocalVersion() string {
137
- data, err := os.ReadFile(u.cfg.Paths.VersionFile)
138
- if err != nil {
139
- return ""
140
- }
141
- return strings.TrimSpace(string(data))
142
- }
143
-
144
- func (u *Updater) setLocalVersion(v string) {
145
- os.MkdirAll(u.cfg.DataDir, 0755)
146
- if err := atomicWrite(u.cfg.Paths.VersionFile, []byte(v)); err != nil {
147
- u.log.Warn("Failed to write version file: %v", err)
148
- }
149
- }
150
-
151
- func (u *Updater) checkAndUpdate(ctx context.Context) {
152
- // Enforce minimum uptime before updating
153
- uptime := time.Since(u.startAt)
154
- if uptime < u.cfg.MinUptime {
155
- u.log.Info("Deferring update — too early (uptime %v < %v)", uptime.Round(time.Second), u.cfg.MinUptime)
156
- return
157
- }
158
-
159
- remote, err := u.getRemoteVersion(ctx)
160
- if err != nil {
161
- u.log.Warn("Failed to check npm registry: %v", err)
162
- return
163
- }
164
-
165
- local := u.getLocalVersion()
166
- if local == "" {
167
- u.log.Info("No local version recorded — storing %s", remote)
168
- u.setLocalVersion(remote)
169
- return
170
- }
171
-
172
- if local == remote {
173
- u.log.Debug("Updater: version %s is up to date", local)
174
- return
175
- }
176
-
177
- u.log.Info("Update available: %s → %s", local, remote)
178
- coordLock, ok := AcquireUpdateCoordinatorLock(u.cfg.Paths.UpdateApplyLock, updateScopeMCP, u.log)
179
- if !ok {
180
- u.log.Info("Deferring MCP update %s → %s due to active update apply lock", local, remote)
181
- return
182
- }
183
- defer coordLock.Release()
184
-
185
- u.state.Transition(updateScopeMCP, updatePhaseApplying, remote, local, "")
186
- markFailed := func(err error) {
187
- u.state.Transition(updateScopeMCP, updatePhaseFailed, remote, local, err.Error())
188
- }
189
-
190
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("⚙️ Supervisor: updating sensorium v%s → v%s. Grace period %v...", local, remote, u.cfg.GracePeriod), 0)
191
-
192
- // Grace period
193
- u.log.Info("Grace period %v...", u.cfg.GracePeriod)
194
- select {
195
- case <-ctx.Done():
196
- markFailed(ctx.Err())
197
- return
198
- case <-time.After(u.cfg.GracePeriod):
199
- }
200
-
201
- // Set maintenance flag — always clean up on exit.
202
- // Written as JSON so TypeScript's checkMaintenanceFlag() can parse the
203
- // version and timestamp fields for accurate maintenance notifications.
204
- maintenanceJSON, err := json.Marshal(map[string]string{
205
- "version": remote,
206
- "timestamp": time.Now().Format(time.RFC3339),
207
- })
208
- if err != nil {
209
- u.log.Warn("Failed to marshal maintenance flag: %v", err)
210
- } else if err := atomicWrite(u.cfg.Paths.MaintenanceFlag, maintenanceJSON); err != nil {
211
- u.log.Warn("Failed to write maintenance flag: %v", err)
212
- }
213
- defer os.Remove(u.cfg.Paths.MaintenanceFlag)
214
-
215
- // Kill the current MCP server
216
- if ctx.Err() != nil {
217
- markFailed(ctx.Err())
218
- return
219
- }
220
- u.state.Transition(updateScopeMCP, updatePhaseRestarting, remote, local, "")
221
- u.killServer()
222
-
223
- // Clean npx cache
224
- if ctx.Err() != nil {
225
- markFailed(ctx.Err())
226
- return
227
- }
228
- u.clearNpxCache()
229
-
230
- // Spawn new server — retry up to 3 times on failure
231
- var pid int
232
- for attempt := 1; attempt <= 3; attempt++ {
233
- if ctx.Err() != nil {
234
- markFailed(ctx.Err())
235
- return
236
- }
237
- pid, err = SpawnMCPServer(u.cfg, u.log)
238
- if err == nil {
239
- break
240
- }
241
- u.log.Error("Failed to spawn updated MCP server (attempt %d/3): %v", attempt, err)
242
- if attempt < 3 {
243
- time.Sleep(2 * time.Second)
244
- }
245
- }
246
- if err != nil {
247
- u.log.Error("All spawn attempts failed — server is down!")
248
- markFailed(err)
249
- notifyUpdaterOperator(u.cfg, u.log, "🔴 Supervisor: update FAILED — server is down! Manual intervention required.", 0)
250
- return
251
- }
252
-
253
- if !u.verifyUpdatedMCPServerReady(ctx, remote, local, pid) {
254
- return
255
- }
256
-
257
- u.setLocalVersion(remote)
258
- u.state.Transition(updateScopeMCP, updatePhaseIdle, remote, local, "")
259
-
260
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("✅ Supervisor: update to v%s complete. Server ready.", remote), 0)
261
- u.log.Info("Update complete: v%s → v%s", local, remote)
262
-
263
- // Reset start time for min uptime tracking
264
- u.startAt = time.Now()
265
- }
266
-
267
- func (u *Updater) verifyUpdatedMCPServerReady(ctx context.Context, remote, local string, pid int) bool {
268
- u.state.Transition(updateScopeMCP, updatePhaseVerifying, remote, local, "")
269
- if u.mcp.WaitForReady(ctx, mcpUpdateReadyPollInterval, mcpUpdateReadyTimeout) {
270
- u.log.Info("Updated MCP server ready (PID %d)", pid)
271
- return true
272
- }
273
-
274
- errMsg := fmt.Sprintf("updated MCP server did not become ready within %v after restart (pid=%d)", mcpUpdateReadyTimeout, pid)
275
- u.log.Error(errMsg)
276
- u.state.Transition(updateScopeMCP, updatePhaseFailed, remote, local, errMsg)
277
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("🔴 Supervisor: update to v%s FAILED verification. Server did not become ready after restart.", remote), 0)
278
- return false
279
- }
280
-
281
- type githubRelease struct {
282
- TagName string `json:"tag_name"`
283
- Name string `json:"name"`
284
- Assets []struct {
285
- Name string `json:"name"`
286
- URL string `json:"browser_download_url"`
287
- Size int64 `json:"size"`
288
- } `json:"assets"`
289
- }
290
-
291
- func (u *Updater) getSupervisorRelease(ctx context.Context) (string, string, error) {
292
- ctx2, cancel := context.WithTimeout(ctx, 20*time.Second)
293
- defer cancel()
294
-
295
- req, err := http.NewRequestWithContext(ctx2, http.MethodGet, supervisorReleaseURL, nil)
296
- if err != nil {
297
- return "", "", err
298
- }
299
- req.Header.Set("Accept", "application/vnd.github+json")
300
- req.Header.Set("User-Agent", "sensorium-supervisor-updater")
301
-
302
- resp, err := http.DefaultClient.Do(req)
303
- if err != nil {
304
- return "", "", err
305
- }
306
- defer resp.Body.Close()
307
-
308
- if resp.StatusCode != http.StatusOK {
309
- return "", "", fmt.Errorf("GitHub releases HTTP %d", resp.StatusCode)
310
- }
311
-
312
- var release githubRelease
313
- if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
314
- return "", "", err
315
- }
316
-
317
- assetName := supervisorAssetName()
318
- for _, asset := range release.Assets {
319
- if asset.Name != assetName {
320
- continue
321
- }
322
-
323
- version := strings.TrimSpace(release.Name)
324
- if version == "" {
325
- version = strings.TrimSpace(release.TagName)
326
- }
327
- if version == "" {
328
- return "", "", fmt.Errorf("release version missing for %s", assetName)
329
- }
330
- if strings.TrimSpace(asset.URL) == "" {
331
- return "", "", fmt.Errorf("release asset URL missing for %s", assetName)
332
- }
333
-
334
- return version, asset.URL, nil
335
- }
336
-
337
- return "", "", fmt.Errorf("release asset %q not found", assetName)
338
- }
339
-
340
- func supervisorAssetName() string {
341
- suffix := ""
342
- if runtime.GOOS == "windows" {
343
- suffix = ".exe"
344
- }
345
- return fmt.Sprintf("sensorium-supervisor-%s-%s%s", runtime.GOOS, runtime.GOARCH, suffix)
346
- }
347
-
348
- func (u *Updater) getLocalSupervisorVersion() string {
349
- data, err := os.ReadFile(u.cfg.Paths.SupervisorVersion)
350
- if err != nil {
351
- return ""
352
- }
353
- return strings.TrimSpace(string(data))
354
- }
355
-
356
- func (u *Updater) setLocalSupervisorVersion(v string) {
357
- os.MkdirAll(u.cfg.DataDir, 0755)
358
- if err := atomicWrite(u.cfg.Paths.SupervisorVersion, []byte(v)); err != nil {
359
- u.log.Warn("Failed to write supervisor version file: %v", err)
360
- }
361
- }
362
-
363
- func (u *Updater) stagePendingSupervisorVersion(v string) error {
364
- if err := os.MkdirAll(filepath.Dir(u.cfg.Paths.PendingVersion), 0755); err != nil {
365
- return fmt.Errorf("create pending supervisor version dir: %w", err)
366
- }
367
- if err := atomicWrite(u.cfg.Paths.PendingVersion, []byte(v)); err != nil {
368
- return fmt.Errorf("write pending supervisor version: %w", err)
369
- }
370
- return nil
371
- }
372
-
373
- func (u *Updater) checkSupervisorUpdate(ctx context.Context) {
374
- uptime := time.Since(u.startAt)
375
- if uptime < u.cfg.MinUptime {
376
- u.log.Info("Deferring supervisor update — too early (uptime %v < %v)", uptime.Round(time.Second), u.cfg.MinUptime)
377
- return
378
- }
379
-
380
- remote, downloadURL, err := u.getSupervisorRelease(ctx)
381
- if err != nil {
382
- u.log.Warn("Failed to check supervisor release: %v", err)
383
- return
384
- }
385
-
386
- local := u.getLocalSupervisorVersion()
387
- if local == "" {
388
- u.log.Info("No local supervisor version recorded — storing %s", remote)
389
- u.setLocalSupervisorVersion(remote)
390
- return
391
- }
392
-
393
- if local == remote {
394
- u.log.Debug("Supervisor updater: version %s is up to date", local)
395
- return
396
- }
397
-
398
- u.log.Info("Supervisor update available: %s → %s", local, remote)
399
- coordLock, ok := AcquireUpdateCoordinatorLock(u.cfg.Paths.UpdateApplyLock, updateScopeSupervisor, u.log)
400
- if !ok {
401
- u.log.Info("Deferring supervisor binary update %s → %s due to active update apply lock", local, remote)
402
- return
403
- }
404
- defer coordLock.Release()
405
-
406
- markFailed := func(err error) {
407
- u.state.Transition(updateScopeSupervisor, updatePhaseFailed, remote, local, err.Error())
408
- }
409
-
410
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("⚙️ Supervisor: updating binary %s → %s. Grace period %v...", local, remote, u.cfg.GracePeriod), 0)
411
-
412
- select {
413
- case <-ctx.Done():
414
- markFailed(ctx.Err())
415
- return
416
- case <-time.After(u.cfg.GracePeriod):
417
- }
418
-
419
- if err := u.downloadSupervisorBinary(ctx, downloadURL); err != nil {
420
- markFailed(err)
421
- u.log.Error("Supervisor binary download failed: %v", err)
422
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("🔴 Supervisor: binary update to %s failed during download.", remote), 0)
423
- return
424
- }
425
-
426
- if err := u.stagePendingSupervisorVersion(remote); err != nil {
427
- _ = os.Remove(u.cfg.Paths.PendingBinary)
428
- markFailed(err)
429
- u.log.Error("Failed to stage supervisor version %s: %v", remote, err)
430
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("🔴 Supervisor: binary update to %s failed during staging.", remote), 0)
431
- return
432
- }
433
- u.state.Transition(updateScopeSupervisor, updatePhaseStaged, remote, local, "")
434
- notifyUpdaterOperator(u.cfg, u.log, fmt.Sprintf("⚙️ Supervisor: downloaded %s. Restarting supervisor to apply update...", remote), 0)
435
-
436
- // Reset start time so minimum uptime is re-enforced after restart
437
- u.startAt = time.Now()
438
-
439
- isService, err := isWindowsService()
440
- if err != nil {
441
- markFailed(err)
442
- u.log.Error("Failed to detect service mode for restart: %v", err)
443
- notifyUpdaterOperator(u.cfg, u.log, "🔴 Supervisor: update downloaded but service detection failed.", 0)
444
- return
445
- }
446
- u.state.Transition(updateScopeSupervisor, updatePhaseRestarting, remote, local, "")
447
-
448
- if isService {
449
- if err := scheduleServiceRestartForUpdate(u.log); err != nil {
450
- markFailed(err)
451
- u.log.Error("Failed to schedule service restart: %v", err)
452
- notifyUpdaterOperator(u.cfg, u.log, "🔴 Supervisor: update downloaded but service restart scheduling failed.", 0)
453
- }
454
- return
455
- }
456
-
457
- if err := requestSupervisorRestart(u.log); err != nil {
458
- markFailed(err)
459
- u.log.Error("Failed to signal supervisor for restart: %v", err)
460
- notifyUpdaterOperator(u.cfg, u.log, "🔴 Supervisor: update downloaded but restart signal failed.", 0)
461
- }
462
- }
463
-
464
- func (u *Updater) downloadSupervisorBinary(ctx context.Context, downloadURL string) error {
465
- if err := os.MkdirAll(u.cfg.Paths.BinaryDir, 0755); err != nil {
466
- return fmt.Errorf("create binary dir: %w", err)
467
- }
468
-
469
- tmpPath := u.cfg.Paths.PendingBinary + ".download"
470
- defer os.Remove(tmpPath)
471
-
472
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil)
473
- if err != nil {
474
- return err
475
- }
476
- req.Header.Set("User-Agent", "sensorium-supervisor-updater")
477
-
478
- resp, err := http.DefaultClient.Do(req)
479
- if err != nil {
480
- return err
481
- }
482
- defer resp.Body.Close()
483
-
484
- if resp.StatusCode != http.StatusOK {
485
- return fmt.Errorf("download HTTP %d", resp.StatusCode)
486
- }
487
-
488
- f, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0755)
489
- if err != nil {
490
- return err
491
- }
492
-
493
- written, copyErr := io.Copy(f, resp.Body)
494
- closeErr := f.Close()
495
- if copyErr != nil {
496
- return copyErr
497
- }
498
- if closeErr != nil {
499
- return closeErr
500
- }
501
- if written <= 0 {
502
- return fmt.Errorf("downloaded empty binary")
503
- }
504
-
505
- if err := os.Remove(u.cfg.Paths.PendingBinary); err != nil && !os.IsNotExist(err) {
506
- return err
507
- }
508
- if err := os.Rename(tmpPath, u.cfg.Paths.PendingBinary); err != nil {
509
- return err
510
- }
511
-
512
- u.log.Info("Supervisor binary downloaded to %s (%d bytes)", u.cfg.Paths.PendingBinary, written)
513
- return nil
514
- }
515
-
516
- func signalSelf(sig os.Signal) error {
517
- proc, err := os.FindProcess(os.Getpid())
518
- if err != nil {
519
- return err
520
- }
521
- return proc.Signal(sig)
522
- }
523
-
524
- func requestSupervisorRestart(log *Logger) error {
525
- if runtime.GOOS != "windows" {
526
- return signalSelf(syscall.SIGTERM)
527
- }
528
-
529
- exePath, err := os.Executable()
530
- if err != nil {
531
- return fmt.Errorf("resolve executable path: %w", err)
532
- }
533
-
534
- cmd := exec.Command(exePath)
535
- cmd.Env = os.Environ()
536
- setSysProcAttr(cmd)
537
- if err := cmd.Start(); err != nil {
538
- return fmt.Errorf("start replacement supervisor: %w", err)
539
- }
540
-
541
- if log != nil {
542
- log.Info("Spawned replacement supervisor process PID %d", cmd.Process.Pid)
543
- }
544
-
545
- go func() {
546
- time.Sleep(2 * time.Second)
547
- os.Exit(0)
548
- }()
549
-
550
- return nil
551
- }
552
-
553
- func (u *Updater) killServer() {
554
- u.log.Info("Updater: stopping current MCP server for update")
555
-
556
- // Ask the MCP server to write a reconnect snapshot before we kill it.
557
- // On Windows, taskkill /F doesn't allow graceful shutdown, so this is
558
- // the only way the snapshot gets written.
559
- u.mcp.PrepareShutdown(context.Background())
560
-
561
- pid, err := ReadPIDFile(u.cfg.Paths.ServerPID)
562
- if err != nil {
563
- u.log.Warn("Could not read server PID file: %v", err)
564
- // Try killing by port as fallback
565
- KillByPort(u.cfg.MCPHttpPort, u.log)
566
- return
567
- }
568
- if err := KillProcess(pid, u.log); err != nil {
569
- u.log.Error("Failed to kill server (PID %d): %v", pid, err)
570
- KillByPort(u.cfg.MCPHttpPort, u.log)
571
- }
572
- }
573
-
574
- // clearNpxCache removes the cached sensorium-mcp package from the npx cache
575
- // so the next `npx -y sensorium-mcp@latest` fetches the new version.
576
- func (u *Updater) clearNpxCache() {
577
- u.log.Info("Updater: clearing npx cache")
578
- var base string
579
- if runtime.GOOS == "windows" {
580
- localAppData := os.Getenv("LOCALAPPDATA")
581
- if localAppData == "" {
582
- home, _ := os.UserHomeDir()
583
- localAppData = filepath.Join(home, "AppData", "Local")
584
- }
585
- base = filepath.Join(localAppData, "npm-cache", "_npx")
586
- } else {
587
- home, _ := os.UserHomeDir()
588
- base = filepath.Join(home, ".npm", "_npx")
589
- }
590
-
591
- u.log.Info("Clearing sensorium-mcp from npx cache (%s)", base)
592
-
593
- entries, err := os.ReadDir(base)
594
- if err != nil {
595
- return
596
- }
597
-
598
- for _, e := range entries {
599
- if !e.IsDir() {
600
- continue
601
- }
602
- pkgDir := filepath.Join(base, e.Name(), "node_modules", "sensorium-mcp")
603
- // Validate path doesn't escape base directory
604
- if !strings.HasPrefix(pkgDir, base+string(os.PathSeparator)) {
605
- continue
606
- }
607
- if _, err := os.Stat(pkgDir); err == nil {
608
- if err := os.RemoveAll(pkgDir); err != nil {
609
- u.log.Warn("Failed to clear npx cache entry %s: %v", pkgDir, err)
610
- }
611
- }
612
- }
613
- }
@@ -1,64 +0,0 @@
1
- package main
2
-
3
- import (
4
- "context"
5
- "path/filepath"
6
- "strings"
7
- "testing"
8
- "time"
9
- )
10
-
11
- func TestVerifyUpdatedMCPServerReady_FailureSetsFailedStateAndNoSuccessMessage(t *testing.T) {
12
- dir := t.TempDir()
13
- log := NewLogger(filepath.Join(dir, "test.log"))
14
- defer log.Close()
15
-
16
- cfg := Config{
17
- DataDir: dir,
18
- Paths: Paths{
19
- UpdateState: filepath.Join(dir, "update-state.json"),
20
- },
21
- }
22
-
23
- u := NewUpdater(cfg, NewMCPClient(1, ""), log)
24
- u.state = NewUpdateStateStore(cfg.Paths.UpdateState, log)
25
-
26
- origNotify := notifyUpdaterOperator
27
- origPoll := mcpUpdateReadyPollInterval
28
- origTimeout := mcpUpdateReadyTimeout
29
- defer func() {
30
- notifyUpdaterOperator = origNotify
31
- mcpUpdateReadyPollInterval = origPoll
32
- mcpUpdateReadyTimeout = origTimeout
33
- }()
34
-
35
- mcpUpdateReadyPollInterval = 1 * time.Millisecond
36
- mcpUpdateReadyTimeout = 5 * time.Millisecond
37
-
38
- var messages []string
39
- notifyUpdaterOperator = func(_ Config, _ *Logger, text string, _ int) {
40
- messages = append(messages, text)
41
- }
42
-
43
- ok := u.verifyUpdatedMCPServerReady(context.Background(), "2.0.0", "1.0.0", 4242)
44
- if ok {
45
- t.Fatal("expected verification to fail")
46
- }
47
-
48
- state, err := u.state.Load()
49
- if err != nil {
50
- t.Fatalf("load update state: %v", err)
51
- }
52
- if state.Phase != updatePhaseFailed {
53
- t.Fatalf("state phase = %q, want %q", state.Phase, updatePhaseFailed)
54
- }
55
- if !strings.Contains(state.LastError, "did not become ready") {
56
- t.Fatalf("last error = %q, want readiness failure detail", state.LastError)
57
- }
58
- if len(messages) == 0 {
59
- t.Fatal("expected failure notification message")
60
- }
61
- if strings.Contains(messages[len(messages)-1], "complete") {
62
- t.Fatalf("unexpected success message: %q", messages[len(messages)-1])
63
- }
64
- }