@noego/app 0.0.17 → 0.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@noego/app",
3
- "version": "0.0.17",
3
+ "version": "0.0.19",
4
4
  "description": "Production build tool for Dinner/Forge apps.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -43,6 +43,7 @@
43
43
  "peerDependencies": {
44
44
  "@noego/dinner": "*",
45
45
  "@noego/forge": "*",
46
+ "@noego/ioc": "*",
46
47
  "express": "*"
47
48
  },
48
49
  "devDependencies": {
package/src/client.js CHANGED
@@ -1,5 +1,6 @@
1
1
  import { createRequire } from 'node:module';
2
2
  import path from 'node:path';
3
+ import { getContainer } from './container.js';
3
4
 
4
5
  // Runtime context - set by the runtime before calling server.main
5
6
  let runtimeContext = null;
@@ -44,9 +45,21 @@ export async function boot(assets, options = {}) {
44
45
  server: app,
45
46
  ajv_formats: true,
46
47
  assets: assets,
47
- controller_builder: options.controller_builder || (async (Controller) => {
48
- // Default: try to instantiate with new
49
- return new Controller();
48
+ // Default context_builder creates a scoped container per request
49
+ context_builder: options.context_builder || ((req, res) => {
50
+ const globalContainer = getContainer();
51
+ const scopedContainer = globalContainer.extend();
52
+ // Attach scoped container to request for middleware/decorators to access
53
+ req.container = scopedContainer;
54
+ return { container: scopedContainer };
55
+ }),
56
+ // Default controller_builder uses the scoped container from context
57
+ controller_builder: options.controller_builder || (async (Controller, context) => {
58
+ if (context?.container) {
59
+ return context.container.get(Controller);
60
+ }
61
+ // Fallback: use global container if no scoped container in context
62
+ return getContainer().get(Controller);
50
63
  }),
51
64
  controller_args_provider: options.controller_args_provider || (async (req, res) => ({ req, res })),
52
65
  });
@@ -525,6 +525,7 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
525
525
 
526
526
  // Crash restart tracking
527
527
  const MAX_CRASH_RESTARTS = 3;
528
+ const PORT_RECOVERY_TIMEOUT = 30000; // 30 seconds to wait for port to become free
528
529
  const CRASH_RESTART_DELAY = 2000; // 2 seconds between crash restarts
529
530
  const STABILITY_THRESHOLD = 30000; // Reset crash counter if running 30+ seconds
530
531
  let backendCrashRestarts = 0;
@@ -541,8 +542,10 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
541
542
  if (retryAfterRecovery) {
542
543
  backendPortRetries++;
543
544
  if (backendPortRetries > MAX_PORT_RECOVERY_RETRIES) {
544
- logger.error(`[BACKEND] Exceeded max port recovery retries (${MAX_PORT_RECOVERY_RETRIES}). Shutting down...`);
545
- shutdown('backend-port-recovery-exceeded', 1, 'backend-port-recovery');
545
+ logger.error(`[BACKEND] Exceeded max port recovery retries (${MAX_PORT_RECOVERY_RETRIES}).`);
546
+ logger.info(`[BACKEND] Service is down. Waiting for file change to retry...`);
547
+ backendProc = null;
548
+ // Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
546
549
  return;
547
550
  }
548
551
  logger.warn(`[BACKEND] Port recovery retry ${backendPortRetries}/${MAX_PORT_RECOVERY_RETRIES}...`);
@@ -617,7 +620,7 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
617
620
 
618
621
  // Wait for port to be free before restarting (fixes race condition)
619
622
  backendProc = null;
620
- waitForPortFree(backendPort, 10000, 100).subscribe({
623
+ waitForPortFree(backendPort, PORT_RECOVERY_TIMEOUT, 100).subscribe({
621
624
  next: () => {
622
625
  if (!isShuttingDown) {
623
626
  logger.info(`[BACKEND] Port ${backendPort} is free, restarting...`);
@@ -632,12 +635,12 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
632
635
  logger.warn(`[BACKEND] Port conflict detected - attempting port recovery...`);
633
636
  const recovered = recoverFromPortInUse(backendPort, logger);
634
637
  if (recovered && !isShuttingDown) {
635
- // Wait a moment for OS to fully release the port
638
+ // Wait for OS to fully release the port before retrying
636
639
  setTimeout(() => {
637
640
  if (!isShuttingDown) {
638
641
  startBackend(true); // Mark as recovery retry
639
642
  }
640
- }, 500);
643
+ }, CRASH_RESTART_DELAY);
641
644
  return;
642
645
  }
643
646
  }
@@ -649,8 +652,10 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
649
652
  }
650
653
  });
651
654
  } else {
652
- logger.error(`[BACKEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS}). Shutting down...`);
653
- shutdown('backend-exceeded-restarts', 1, 'backend-crash');
655
+ logger.error(`[BACKEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS}).`);
656
+ logger.info(`[BACKEND] Service is down. Waiting for file change to retry...`);
657
+ backendProc = null;
658
+ // Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
654
659
  }
655
660
  }
656
661
  });
@@ -667,7 +672,7 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
667
672
  if (!isShuttingDown) {
668
673
  startBackend(true);
669
674
  }
670
- }, 500);
675
+ }, CRASH_RESTART_DELAY);
671
676
  }
672
677
  }
673
678
  });
@@ -677,8 +682,10 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
677
682
  if (retryAfterRecovery) {
678
683
  frontendPortRetries++;
679
684
  if (frontendPortRetries > MAX_PORT_RECOVERY_RETRIES) {
680
- logger.error(`[FRONTEND] Exceeded max port recovery retries (${MAX_PORT_RECOVERY_RETRIES}). Shutting down...`);
681
- shutdown('frontend-port-recovery-exceeded', 1, 'frontend-port-recovery');
685
+ logger.error(`[FRONTEND] Exceeded max port recovery retries (${MAX_PORT_RECOVERY_RETRIES}).`);
686
+ logger.info(`[FRONTEND] Service is down. Waiting for file change to retry...`);
687
+ frontendProc = null;
688
+ // Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
682
689
  return;
683
690
  }
684
691
  logger.warn(`[FRONTEND] Port recovery retry ${frontendPortRetries}/${MAX_PORT_RECOVERY_RETRIES}...`);
@@ -753,7 +760,7 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
753
760
 
754
761
  // Wait for port to be free before restarting (fixes race condition)
755
762
  frontendProc = null;
756
- waitForPortFree(frontendPort, 10000, 100).subscribe({
763
+ waitForPortFree(frontendPort, PORT_RECOVERY_TIMEOUT, 100).subscribe({
757
764
  next: () => {
758
765
  if (!isShuttingDown) {
759
766
  logger.info(`[FRONTEND] Port ${frontendPort} is free, restarting...`);
@@ -768,12 +775,12 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
768
775
  logger.warn(`[FRONTEND] Port conflict detected - attempting port recovery...`);
769
776
  const recovered = recoverFromPortInUse(frontendPort, logger);
770
777
  if (recovered && !isShuttingDown) {
771
- // Wait a moment for OS to fully release the port
778
+ // Wait for OS to fully release the port before retrying
772
779
  setTimeout(() => {
773
780
  if (!isShuttingDown) {
774
781
  startFrontend(true); // Mark as recovery retry
775
782
  }
776
- }, 500);
783
+ }, CRASH_RESTART_DELAY);
777
784
  return;
778
785
  }
779
786
  }
@@ -785,8 +792,10 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
785
792
  }
786
793
  });
787
794
  } else {
788
- logger.error(`[FRONTEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS}). Shutting down...`);
789
- shutdown('frontend-exceeded-restarts', 1, 'frontend-crash');
795
+ logger.error(`[FRONTEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS}).`);
796
+ logger.info(`[FRONTEND] Service is down. Waiting for file change to retry...`);
797
+ frontendProc = null;
798
+ // Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
790
799
  }
791
800
  }
792
801
  });
@@ -803,7 +812,7 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
803
812
  if (!isShuttingDown) {
804
813
  startFrontend(true);
805
814
  }
806
- }, 500);
815
+ }, CRASH_RESTART_DELAY);
807
816
  }
808
817
  }
809
818
  });
@@ -1019,6 +1028,9 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
1019
1028
  // Then start the new backend
1020
1029
  of(undefined).pipe(
1021
1030
  tap(() => {
1031
+ // Reset crash counters - file change gives a fresh start
1032
+ backendCrashRestarts = 0;
1033
+ backendPortRetries = 0;
1022
1034
  startBackend();
1023
1035
  logger.info(`✅ Backend restart complete\n`);
1024
1036
  })
@@ -1055,6 +1067,9 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
1055
1067
  // Then start the new frontend
1056
1068
  of(undefined).pipe(
1057
1069
  tap(() => {
1070
+ // Reset crash counters - file change gives a fresh start
1071
+ frontendCrashRestarts = 0;
1072
+ frontendPortRetries = 0;
1058
1073
  startFrontend();
1059
1074
  logger.info(`✅ Frontend restart complete\n`);
1060
1075
  })
@@ -0,0 +1,35 @@
1
+ /**
2
+ * Global IoC container management for @noego/app
3
+ *
4
+ * Uses Symbol.for() to ensure a true singleton across module boundaries,
5
+ * even if multiple versions of the package are loaded.
6
+ */
7
+
8
+ const CONTAINER_KEY = Symbol.for('@noego/app.container');
9
+
10
+ // Lazy-loaded container factory
11
+ let _createContainer = null;
12
+
13
+ /**
14
+ * Get the global IoC container instance.
15
+ * Creates it on first access using @noego/ioc.
16
+ *
17
+ * @returns {import('@noego/ioc').Container} The global container instance
18
+ */
19
+ export function getContainer() {
20
+ if (!globalThis[CONTAINER_KEY]) {
21
+ // Dynamically require @noego/ioc to avoid bundling issues
22
+ // and to use the version from the user's project
23
+ const { createContainer } = require('@noego/ioc');
24
+ globalThis[CONTAINER_KEY] = createContainer();
25
+ }
26
+ return globalThis[CONTAINER_KEY];
27
+ }
28
+
29
+ /**
30
+ * Reset the global container (primarily for testing).
31
+ * This clears the singleton and allows a fresh container to be created.
32
+ */
33
+ export function resetContainer() {
34
+ delete globalThis[CONTAINER_KEY];
35
+ }
package/src/index.js CHANGED
@@ -5,3 +5,4 @@
5
5
 
6
6
  export { runCombinedServices } from './runtime/runtime.js';
7
7
  export { boot, client } from './client.js';
8
+ export { getContainer, resetContainer } from './container.js';