polyforge-cli 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +8 -0
  2. package/dist/core/installer.js +6 -0
  3. package/dist/core/renderer.js +13 -0
  4. package/dist/templates/base/infra/scripts/check.sh +190 -0
  5. package/dist/templates/base/infra/scripts/dev.sh +16 -0
  6. package/dist/templates/modules/auth-center/apps/auth-center/README.md +7 -3
  7. package/dist/templates/modules/auth-center/apps/auth-center/server.js +67 -4
  8. package/dist/templates/modules/cache-redis/infra/cache/examples/cache-client.js +8 -0
  9. package/dist/templates/modules/cache-redis/infra/cache/examples/redis-key-convention.md +5 -0
  10. package/dist/templates/modules/gateway-bff/apps/gateway-bff/server.js +61 -4
  11. package/dist/templates/modules/grpc-service/apps/grpc-service/README.md +4 -3
  12. package/dist/templates/modules/grpc-service/apps/grpc-service/cmd/server/main.go +27 -0
  13. package/dist/templates/modules/grpc-service/apps/grpc-service/go.mod +8 -0
  14. package/dist/templates/modules/mq/apps/mq-worker/README.md +6 -0
  15. package/dist/templates/modules/mq/apps/mq-worker/package.json +16 -0
  16. package/dist/templates/modules/mq/apps/mq-worker/scripts/consumer.js +11 -0
  17. package/dist/templates/modules/mq/apps/mq-worker/scripts/producer.js +15 -0
  18. package/dist/templates/modules/mq/apps/mq-worker/scripts/roundtrip.js +29 -0
  19. package/dist/templates/modules/mq/infra/mq/README.md +2 -1
  20. package/dist/templates/modules/mq/infra/mq/docker-compose.yml +25 -0
  21. package/dist/templates/modules/mq/infra/mq/scripts/consumer-sample.sh +5 -0
  22. package/dist/templates/modules/mq/infra/mq/scripts/producer-sample.sh +5 -0
  23. package/dist/templates/modules/observability/infra/observability/docker-compose.yml +21 -0
  24. package/dist/templates/modules/observability/infra/observability/scripts/down.sh +4 -0
  25. package/dist/templates/modules/observability/infra/observability/scripts/up.sh +4 -0
  26. package/dist/templates/modules/python-worker/apps/worker-python/tasks/sample_task.py +11 -0
  27. package/dist/templates/modules/python-worker/apps/worker-python/tests/test_worker.py +1 -1
  28. package/dist/templates/modules/python-worker/apps/worker-python/worker.py +44 -2
  29. package/dist/templates/modules/worker-go/apps/worker-go/cmd/worker/main.go +6 -0
  30. package/dist/templates/modules/worker-go/apps/worker-go/internal/tasks/heartbeat.go +107 -2
  31. package/package.json +1 -1
package/README.md CHANGED
@@ -63,3 +63,11 @@ npm publish
63
63
  - Vite dev server proxy routes `/api` -> backend API and `/bff` -> gateway-bff.
64
64
  - Frontend page includes demo requests for `/api/v1/ping` and `/bff/ping` with `X-Trace-Id`.
65
65
  - Frontend templates include `src/api/request.ts` (timeout/retry/error mapping), `src/api/services/*`, and `src/api/client.ts`.
66
+
67
+ ## Module Runtime Notes
68
+
69
+ - `grpc-service`: includes runnable Go gRPC server skeleton and proto generation script.
70
+ - `mq`: includes local broker compose (`kafka`, `rabbitmq`, `nats`) and producer/consumer sample scripts.
71
+ - `cache-redis`: includes cache key convention and minimal cache client example.
72
+ - `observability`: includes local compose (`otel-collector`, `prometheus`, `grafana`) and up/down scripts.
73
+ - `auth-center`: includes JWT login/refresh/verify endpoints (minimal flow).
@@ -61,4 +61,10 @@ async function maybeInstallDeps(config) {
61
61
  }
62
62
  }
63
63
  }
64
+ if (config.extraModules.includes("mq")) {
65
+ const mqDir = path_1.default.join(config.targetDir, "apps", "mq-worker");
66
+ if ((0, fs_1.existsSync)(path_1.default.join(mqDir, "package.json"))) {
67
+ await run(config.packageManager, ["install"], mqDir);
68
+ }
69
+ }
64
70
  }
@@ -119,6 +119,19 @@ function dockerServices(config) {
119
119
  if (config.extraModules.includes("auth-center")) {
120
120
  services.push(` auth-center:\n image: node:20-alpine\n working_dir: /app\n volumes:\n - ../apps/auth-center:/app\n command: node server.js\n ports:\n - "8081:8081"`);
121
121
  }
122
+ if (config.extraModules.includes("mq")) {
123
+ services.push(` kafka:\n image: bitnami/kafka:3.7\n environment:\n - KAFKA_CFG_NODE_ID=1\n - KAFKA_CFG_PROCESS_ROLES=broker,controller\n - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER\n - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093\n - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092\n - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT\n - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@kafka:9093\n - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true\n ports:\n - "9092:9092"`);
124
+ services.push(` rabbitmq:\n image: rabbitmq:3-management\n ports:\n - "5672:5672"\n - "15672:15672"`);
125
+ services.push(` nats:\n image: nats:2.10\n ports:\n - "4222:4222"`);
126
+ }
127
+ if (config.extraModules.includes("cache-redis") && !config.dataModules.includes("redis")) {
128
+ services.push(` redis:\n image: redis:7\n ports:\n - "6379:6379"`);
129
+ }
130
+ if (config.extraModules.includes("observability")) {
131
+ services.push(` otel-collector:\n image: otel/opentelemetry-collector:0.108.0\n ports:\n - "4317:4317"\n - "4318:4318"`);
132
+ services.push(` prometheus:\n image: prom/prometheus:v2.54.1\n ports:\n - "9090:9090"`);
133
+ services.push(` grafana:\n image: grafana/grafana:11.1.5\n ports:\n - "3000:3000"`);
134
+ }
122
135
  return services.join("\n\n");
123
136
  }
124
137
  async function writeGeneratedReadme(config) {
@@ -55,4 +55,194 @@ if [[ -f apps/python-ai/app/main.py ]]; then
55
55
  fi
56
56
  fi
57
57
 
58
+ if [[ -f contracts/proto/greeter.proto ]]; then
59
+ if command -v protoc >/dev/null 2>&1; then
60
+ echo "[check] protoc available for grpc-service"
61
+ else
62
+ echo "[check] protoc missing, grpc-service code generation unavailable"
63
+ fi
64
+ fi
65
+
66
+ if [[ -f infra/mq/docker-compose.yml ]]; then
67
+ if command -v docker >/dev/null 2>&1; then
68
+ docker compose -f infra/mq/docker-compose.yml config >/dev/null
69
+ else
70
+ echo "[check] docker missing, skip mq compose validation"
71
+ fi
72
+ fi
73
+
74
+ if [[ -f apps/mq-worker/package.json ]]; then
75
+ if command -v node >/dev/null 2>&1; then
76
+ node --check apps/mq-worker/scripts/producer.js
77
+ node --check apps/mq-worker/scripts/consumer.js
78
+ node --check apps/mq-worker/scripts/roundtrip.js
79
+ else
80
+ echo "[check] node missing, skip mq-worker syntax check"
81
+ fi
82
+ fi
83
+
84
+ if [[ -f infra/observability/docker-compose.yml ]]; then
85
+ if command -v docker >/dev/null 2>&1; then
86
+ docker compose -f infra/observability/docker-compose.yml config >/dev/null
87
+ else
88
+ echo "[check] docker missing, skip observability compose validation"
89
+ fi
90
+ fi
91
+
92
+ RUNTIME_SMOKE="${CHECK_RUNTIME_SMOKE:-1}"
93
+ if [[ "$RUNTIME_SMOKE" == "1" ]]; then
94
+ echo "[check] runtime smoke enabled (set CHECK_RUNTIME_SMOKE=0 to disable)"
95
+
96
+ declare -a PIDS=()
97
+ TMP_NATS_CONTAINER=""
98
+ cleanup() {
99
+ for pid in "${PIDS[@]}"; do
100
+ if kill -0 "$pid" >/dev/null 2>&1; then
101
+ kill "$pid" >/dev/null 2>&1 || true
102
+ wait "$pid" >/dev/null 2>&1 || true
103
+ fi
104
+ done
105
+ if [[ -n "$TMP_NATS_CONTAINER" ]] && command -v docker >/dev/null 2>&1; then
106
+ docker rm -f "$TMP_NATS_CONTAINER" >/dev/null 2>&1 || true
107
+ fi
108
+ }
109
+ trap cleanup EXIT
110
+
111
+ wait_http() {
112
+ local url="$1"
113
+ local retries="$2"
114
+ local delay="$3"
115
+ for _ in $(seq 1 "$retries"); do
116
+ if curl -sf "$url" >/dev/null 2>&1; then
117
+ return 0
118
+ fi
119
+ sleep "$delay"
120
+ done
121
+ return 1
122
+ }
123
+
124
+ wait_tcp() {
125
+ local host="$1"
126
+ local port="$2"
127
+ local retries="$3"
128
+ local delay="$4"
129
+ if ! command -v nc >/dev/null 2>&1; then
130
+ return 1
131
+ fi
132
+ for _ in $(seq 1 "$retries"); do
133
+ if nc -z "$host" "$port" >/dev/null 2>&1; then
134
+ return 0
135
+ fi
136
+ sleep "$delay"
137
+ done
138
+ return 1
139
+ }
140
+
141
+ if [[ -f apps/api/go.mod ]] && command -v go >/dev/null 2>&1; then
142
+ (
143
+ cd apps/api
144
+ APP_PORT=18080 go run ./cmd/server
145
+ ) >/tmp/scaffold-check-api.log 2>&1 &
146
+ PIDS+=("$!")
147
+ if wait_http "http://127.0.0.1:18080/health" 25 1; then
148
+ echo "[check] runtime ok: apps/api /health"
149
+ else
150
+ echo "[check] runtime warn: apps/api health probe failed"
151
+ tail -n 30 /tmp/scaffold-check-api.log || true
152
+ fi
153
+ fi
154
+
155
+ if [[ -f apps/gateway-bff/server.js ]] && command -v node >/dev/null 2>&1; then
156
+ (
157
+ cd apps/gateway-bff
158
+ PORT=13001 node server.js
159
+ ) >/tmp/scaffold-check-bff.log 2>&1 &
160
+ PIDS+=("$!")
161
+ if wait_http "http://127.0.0.1:13001/health" 20 1; then
162
+ echo "[check] runtime ok: apps/gateway-bff /health"
163
+ else
164
+ echo "[check] runtime warn: gateway-bff health probe failed"
165
+ tail -n 30 /tmp/scaffold-check-bff.log || true
166
+ fi
167
+ fi
168
+
169
+ if [[ -f apps/auth-center/server.js ]] && command -v node >/dev/null 2>&1; then
170
+ (
171
+ cd apps/auth-center
172
+ PORT=18081 node server.js
173
+ ) >/tmp/scaffold-check-auth.log 2>&1 &
174
+ PIDS+=("$!")
175
+ if wait_http "http://127.0.0.1:18081/health" 20 1; then
176
+ echo "[check] runtime ok: apps/auth-center /health"
177
+ else
178
+ echo "[check] runtime warn: auth-center health probe failed"
179
+ tail -n 30 /tmp/scaffold-check-auth.log || true
180
+ fi
181
+ fi
182
+
183
+ if [[ -f apps/python-ai/app/main.py ]] && command -v python3 >/dev/null 2>&1; then
184
+ if python3 -c "import uvicorn" >/dev/null 2>&1; then
185
+ (
186
+ cd apps/python-ai
187
+ python3 -m uvicorn app.main:app --host 127.0.0.1 --port 18090
188
+ ) >/tmp/scaffold-check-python-ai.log 2>&1 &
189
+ PIDS+=("$!")
190
+ if wait_http "http://127.0.0.1:18090/health" 20 1; then
191
+ echo "[check] runtime ok: apps/python-ai /health"
192
+ else
193
+ echo "[check] runtime warn: python-ai health probe failed"
194
+ tail -n 30 /tmp/scaffold-check-python-ai.log || true
195
+ fi
196
+ else
197
+ echo "[check] runtime skip: python-ai needs uvicorn"
198
+ fi
199
+ fi
200
+
201
+ if [[ -f apps/grpc-service/cmd/server/main.go ]] && command -v go >/dev/null 2>&1; then
202
+ (
203
+ cd apps/grpc-service
204
+ go run ./cmd/server
205
+ ) >/tmp/scaffold-check-grpc.log 2>&1 &
206
+ PIDS+=("$!")
207
+ if wait_tcp "127.0.0.1" "9090" 20 1; then
208
+ echo "[check] runtime ok: apps/grpc-service :9090"
209
+ else
210
+ echo "[check] runtime warn: grpc-service port probe failed"
211
+ tail -n 30 /tmp/scaffold-check-grpc.log || true
212
+ fi
213
+ fi
214
+
215
+ if [[ -f apps/mq-worker/package.json ]] && command -v npm >/dev/null 2>&1; then
216
+ MQ_NATS_READY="0"
217
+ if ! wait_tcp "127.0.0.1" "4222" 2 1; then
218
+ if command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1; then
219
+ TMP_NATS_CONTAINER="scaffold-mq-check-$(date +%s)"
220
+ if docker run -d --rm --name "$TMP_NATS_CONTAINER" -p 4222:4222 nats:2.10 >/dev/null 2>&1; then
221
+ echo "[check] runtime info: started temporary NATS container ($TMP_NATS_CONTAINER)"
222
+ wait_tcp "127.0.0.1" "4222" 10 1 || true
223
+ fi
224
+ fi
225
+ fi
226
+
227
+ if wait_tcp "127.0.0.1" "4222" 2 1; then
228
+ MQ_NATS_READY="1"
229
+ if [[ -d apps/mq-worker/node_modules ]]; then
230
+ (cd apps/mq-worker && npm run -s roundtrip) || echo "[check] runtime warn: mq-worker roundtrip failed"
231
+ else
232
+ echo "[check] runtime skip: mq-worker dependencies not installed"
233
+ fi
234
+ fi
235
+
236
+ if [[ "$MQ_NATS_READY" != "1" ]]; then
237
+ if command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1; then
238
+ echo "[check] runtime skip: NATS port 4222 unavailable"
239
+ else
240
+ echo "[check] runtime skip: NATS 4222 unavailable and Docker daemon not ready"
241
+ fi
242
+ else
243
+ echo "[check] runtime ok: mq-worker roundtrip completed"
244
+ fi
245
+ fi
246
+ fi
247
+
58
248
  echo "[check] done"
@@ -30,3 +30,19 @@ fi
30
30
  if [[ -f apps/python-ai/app/main.py ]]; then
31
31
  echo "[dev] python-ai available at apps/python-ai"
32
32
  fi
33
+
34
+ if [[ -f apps/auth-center/server.js ]]; then
35
+ echo "[dev] auth-center available at apps/auth-center"
36
+ fi
37
+
38
+ if [[ -f infra/mq/docker-compose.yml ]]; then
39
+ echo "[dev] mq stack available at infra/mq/docker-compose.yml"
40
+ fi
41
+
42
+ if [[ -f apps/mq-worker/package.json ]]; then
43
+ echo "[dev] mq-worker available at apps/mq-worker"
44
+ fi
45
+
46
+ if [[ -f infra/observability/docker-compose.yml ]]; then
47
+ echo "[dev] observability stack available at infra/observability/docker-compose.yml"
48
+ fi
@@ -1,5 +1,9 @@
1
1
  # Auth Center Module
2
2
 
3
- - Purpose: centralize login/token/authorization
4
- - Includes JWT login placeholder endpoint
5
- - Add OAuth2 client configs before production
3
+ - Endpoints:
4
+ - `POST /auth/login`
5
+ - `POST /auth/refresh`
6
+ - `GET /auth/verify`
7
+ - `GET /health`
8
+ - Includes JWT access/refresh token minimal flow.
9
+ - Configure `JWT_SECRET` and `JWT_REFRESH_SECRET` for production.
@@ -5,17 +5,80 @@ const app = express();
5
5
  app.use(express.json());
6
6
 
7
7
  const secret = process.env.JWT_SECRET || "change-me";
8
+ const refreshSecret = process.env.JWT_REFRESH_SECRET || "change-refresh-me";
9
+
10
+ function signAccessToken(subject) {
11
+ return jwt.sign({ sub: subject, role: "user" }, secret, { expiresIn: "1h" });
12
+ }
13
+
14
+ function signRefreshToken(subject) {
15
+ return jwt.sign({ sub: subject, type: "refresh" }, refreshSecret, { expiresIn: "7d" });
16
+ }
8
17
 
9
18
  app.post("/auth/login", (req, res) => {
10
19
  const username = req.body?.username || "demo";
11
- const token = jwt.sign({ sub: username, role: "user" }, secret, { expiresIn: "1h" });
12
- res.json({ code: 0, message: "success", data: { accessToken: token }, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
20
+ const accessToken = signAccessToken(username);
21
+ const refreshToken = signRefreshToken(username);
22
+
23
+ res.json({
24
+ code: 0,
25
+ message: "success",
26
+ data: { accessToken, refreshToken, tokenType: "Bearer", expiresIn: 3600 },
27
+ traceId: req.header("X-Trace-Id") || "auth-trace",
28
+ timestamp: new Date().toISOString(),
29
+ });
30
+ });
31
+
32
+ app.post("/auth/refresh", (req, res) => {
33
+ const token = req.body?.refreshToken;
34
+ if (!token) {
35
+ return res.status(400).json({ code: 10001, message: "refreshToken required", data: null, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
36
+ }
37
+
38
+ try {
39
+ const decoded = jwt.verify(token, refreshSecret);
40
+ const accessToken = signAccessToken(decoded.sub);
41
+ return res.json({ code: 0, message: "success", data: { accessToken, tokenType: "Bearer", expiresIn: 3600 }, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
42
+ } catch (_e) {
43
+ return res.status(401).json({ code: 10002, message: "invalid refresh token", data: null, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
44
+ }
45
+ });
46
+
47
+ app.get("/auth/verify", (req, res) => {
48
+ const auth = req.header("Authorization") || "";
49
+ const token = auth.startsWith("Bearer ") ? auth.slice(7) : "";
50
+ if (!token) {
51
+ return res.status(401).json({ code: 10003, message: "missing bearer token", data: null, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
52
+ }
53
+
54
+ try {
55
+ const decoded = jwt.verify(token, secret);
56
+ return res.json({ code: 0, message: "success", data: { subject: decoded.sub }, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
57
+ } catch (_e) {
58
+ return res.status(401).json({ code: 10004, message: "invalid access token", data: null, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
59
+ }
60
+ });
61
+
62
+ app.get("/auth/me", (req, res) => {
63
+ const auth = req.header("Authorization") || "";
64
+ const token = auth.startsWith("Bearer ") ? auth.slice(7) : "";
65
+ if (!token) {
66
+ return res.status(401).json({ code: 10003, message: "missing bearer token", data: null, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
67
+ }
68
+
69
+ try {
70
+ const decoded = jwt.verify(token, secret);
71
+ return res.json({ code: 0, message: "success", data: { subject: decoded.sub, role: decoded.role }, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
72
+ } catch (_e) {
73
+ return res.status(401).json({ code: 10004, message: "invalid access token", data: null, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
74
+ }
13
75
  });
14
76
 
15
77
  app.get("/health", (req, res) => {
16
78
  res.json({ code: 0, message: "success", data: { status: "ok" }, traceId: req.header("X-Trace-Id") || "auth-trace", timestamp: new Date().toISOString() });
17
79
  });
18
80
 
19
- app.listen(8081, () => {
20
- console.log("auth-center listening on :8081");
81
+ const port = Number(process.env.PORT || 8081);
82
+ app.listen(port, () => {
83
+ console.log(`auth-center listening on :${port}`);
21
84
  });
@@ -0,0 +1,8 @@
1
+ // Minimal cache client wrapper example
2
+ // Replace with your runtime client (ioredis/go-redis/spring-data-redis).
3
+
4
+ function buildCacheKey(domain, entity, id) {
5
+ return `app:${domain}:${entity}:${id}`;
6
+ }
7
+
8
+ module.exports = { buildCacheKey };
@@ -0,0 +1,5 @@
1
+ # Redis Key Convention
2
+
3
+ - user profile: `app:user:profile:{userId}`
4
+ - auth session: `app:auth:session:{sessionId}`
5
+ - rate limit: `app:rate:{route}:{userId}`
@@ -3,15 +3,72 @@ const express = require("express");
3
3
  const app = express();
4
4
  app.use(express.json());
5
5
 
6
+ const API_BASE = process.env.API_BASE || "http://127.0.0.1:8080";
7
+ const port = Number(process.env.PORT || 3001);
8
+
9
+ const rateCounter = new Map();
10
+ setInterval(() => rateCounter.clear(), 60 * 1000);
11
+
12
+ function traceId(req) {
13
+ return req.header("X-Trace-Id") || "bff-trace";
14
+ }
15
+
16
+ function withMeta(req, data) {
17
+ return {
18
+ code: 0,
19
+ message: "success",
20
+ data,
21
+ traceId: traceId(req),
22
+ timestamp: new Date().toISOString(),
23
+ };
24
+ }
25
+
26
+ function authPassthrough(req, res, next) {
27
+ const token = req.header("Authorization");
28
+ if (!token) {
29
+ return res.status(401).json({ code: 10003, message: "missing Authorization header", data: null, traceId: traceId(req), timestamp: new Date().toISOString() });
30
+ }
31
+ return next();
32
+ }
33
+
34
+ function rateLimit(req, res, next) {
35
+ const key = `${req.ip}:${req.path}`;
36
+ const count = (rateCounter.get(key) || 0) + 1;
37
+ rateCounter.set(key, count);
38
+ if (count > 120) {
39
+ return res.status(429).json({ code: 10029, message: "rate limit exceeded", data: null, traceId: traceId(req), timestamp: new Date().toISOString() });
40
+ }
41
+ return next();
42
+ }
43
+
6
44
  app.get("/health", (_req, res) => {
7
45
  res.json({ code: 0, message: "success", data: { status: "ok" }, traceId: "bff-trace", timestamp: new Date().toISOString() });
8
46
  });
9
47
 
10
48
  app.get("/bff/ping", (req, res) => {
11
- const traceId = req.header("X-Trace-Id") || "bff-trace";
12
- res.json({ code: 0, message: "success", data: { message: "bff pong" }, traceId, timestamp: new Date().toISOString() });
49
+ res.json(withMeta(req, { message: "bff pong" }));
50
+ });
51
+
52
+ app.get("/bff/aggregate", rateLimit, authPassthrough, async (req, res) => {
53
+ try {
54
+ const [healthResp, pingResp] = await Promise.all([
55
+ fetch(`${API_BASE}/health`, { headers: { "X-Trace-Id": traceId(req) } }),
56
+ fetch(`${API_BASE}/api/v1/ping`, { headers: { "X-Trace-Id": traceId(req) } }),
57
+ ]);
58
+
59
+ const [healthData, pingData] = await Promise.all([healthResp.json(), pingResp.json()]);
60
+ return res.json(withMeta(req, { health: healthData, ping: pingData }));
61
+ } catch (error) {
62
+ return res.status(502).json({
63
+ code: 10502,
64
+ message: error instanceof Error ? error.message : "upstream call failed",
65
+ data: null,
66
+ traceId: traceId(req),
67
+ timestamp: new Date().toISOString(),
68
+ });
69
+ }
13
70
  });
14
71
 
15
- app.listen(3001, () => {
16
- console.log("gateway-bff listening on :3001");
72
+ app.listen(port, () => {
73
+ console.log(`gateway-bff listening on :${port}`);
17
74
  });
@@ -1,5 +1,6 @@
1
1
  # gRPC Service Module
2
2
 
3
- - Purpose: stable multi-language internal contracts
4
- - Includes proto sample and generation script
5
- - Requires: protoc + language plugins
3
+ - Runtime server: `go run ./cmd/server`
4
+ - Contract: `contracts/proto/greeter.proto`
5
+ - Generation: `bash infra/scripts/gen-proto.sh`
6
+ - Requires `protoc`, `protoc-gen-go`, `protoc-gen-go-grpc`
@@ -0,0 +1,27 @@
1
+ package main
2
+
3
+ import (
4
+ "log"
5
+ "net"
6
+
7
+ "google.golang.org/grpc"
8
+ "google.golang.org/grpc/health"
9
+ grpcHealth "google.golang.org/grpc/health/grpc_health_v1"
10
+ )
11
+
12
+ func main() {
13
+ lis, err := net.Listen("tcp", ":9090")
14
+ if err != nil {
15
+ log.Fatal(err)
16
+ }
17
+
18
+ srv := grpc.NewServer()
19
+ healthSrv := health.NewServer()
20
+ healthSrv.SetServingStatus("grpc-service", grpcHealth.HealthCheckResponse_SERVING)
21
+ grpcHealth.RegisterHealthServer(srv, healthSrv)
22
+
23
+ log.Println("grpc-service listening on :9090")
24
+ if err := srv.Serve(lis); err != nil {
25
+ log.Fatal(err)
26
+ }
27
+ }
@@ -0,0 +1,8 @@
1
+ module {{PROJECT_NAME}}/apps/grpc-service
2
+
3
+ go 1.22
4
+
5
+ require (
6
+ google.golang.org/grpc v1.66.0
7
+ google.golang.org/protobuf v1.34.2
8
+ )
@@ -0,0 +1,6 @@
1
+ # MQ Worker
2
+
3
+ - Consumer: `npm run consume`
4
+ - Producer: `npm run produce`
5
+ - Roundtrip test: `npm run roundtrip`
6
+ - Default NATS URL: `nats://127.0.0.1:4222`
@@ -0,0 +1,16 @@
1
+ {
2
+ "name": "{{PROJECT_NAME}}-mq-worker",
3
+ "private": true,
4
+ "version": "0.1.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "consume": "node scripts/consumer.js",
8
+ "produce": "node scripts/producer.js",
9
+ "roundtrip": "node scripts/roundtrip.js",
10
+ "build": "node -e \"console.log('mq-worker build placeholder')\"",
11
+ "test": "node scripts/roundtrip.js"
12
+ },
13
+ "dependencies": {
14
+ "nats": "^2.29.1"
15
+ }
16
+ }
@@ -0,0 +1,11 @@
1
+ import { connect, StringCodec } from "nats";
2
+
3
+ const nc = await connect({ servers: process.env.NATS_URL || "nats://127.0.0.1:4222" });
4
+ const sc = StringCodec();
5
+
6
+ const sub = nc.subscribe("app.task.created");
7
+ console.log("[mq-worker] listening on subject app.task.created");
8
+
9
+ for await (const msg of sub) {
10
+ console.log("[mq-worker] consumed", sc.decode(msg.data));
11
+ }
@@ -0,0 +1,15 @@
1
+ import { connect, StringCodec } from "nats";
2
+
3
+ const nc = await connect({ servers: process.env.NATS_URL || "nats://127.0.0.1:4222" });
4
+ const sc = StringCodec();
5
+
6
+ const payload = {
7
+ id: `evt-${Date.now()}`,
8
+ type: "app.task.created",
9
+ timestamp: new Date().toISOString(),
10
+ };
11
+
12
+ nc.publish("app.task.created", sc.encode(JSON.stringify(payload)));
13
+ console.log("[mq-worker] produced", payload);
14
+
15
+ await nc.drain();
@@ -0,0 +1,29 @@
1
+ import { connect, StringCodec } from "nats";
2
+
3
+ const nc = await connect({ servers: process.env.NATS_URL || "nats://127.0.0.1:4222" });
4
+ const sc = StringCodec();
5
+
6
+ const subject = "app.task.created";
7
+ const sub = nc.subscribe(subject, { max: 1 });
8
+
9
+ const payload = {
10
+ id: `evt-${Date.now()}`,
11
+ type: subject,
12
+ timestamp: new Date().toISOString(),
13
+ };
14
+
15
+ nc.publish(subject, sc.encode(JSON.stringify(payload)));
16
+
17
+ const timeout = setTimeout(async () => {
18
+ console.error("[mq-worker] roundtrip timeout");
19
+ await nc.drain();
20
+ process.exit(1);
21
+ }, 3000);
22
+
23
+ for await (const msg of sub) {
24
+ const parsed = JSON.parse(sc.decode(msg.data));
25
+ clearTimeout(timeout);
26
+ console.log("[mq-worker] roundtrip ok", parsed.id);
27
+ await nc.drain();
28
+ process.exit(0);
29
+ }
@@ -1,5 +1,6 @@
1
1
  # MQ Module
2
2
 
3
3
  - Purpose: async collaboration between Go/Java/Python services
4
- - Includes placeholders for Kafka, RabbitMQ, NATS
4
+ - Local stack: Kafka + RabbitMQ + NATS (`infra/mq/docker-compose.yml`)
5
+ - Runnable worker example: `apps/mq-worker`
5
6
  - Default capabilities: producer/consumer samples, retry + idempotency + DLQ placeholders
@@ -0,0 +1,25 @@
1
+ services:
2
+ kafka:
3
+ image: bitnami/kafka:3.7
4
+ environment:
5
+ - KAFKA_CFG_NODE_ID=1
6
+ - KAFKA_CFG_PROCESS_ROLES=broker,controller
7
+ - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
8
+ - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
9
+ - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
10
+ - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
11
+ - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@kafka:9093
12
+ - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
13
+ ports:
14
+ - "9092:9092"
15
+
16
+ rabbitmq:
17
+ image: rabbitmq:3-management
18
+ ports:
19
+ - "5672:5672"
20
+ - "15672:15672"
21
+
22
+ nats:
23
+ image: nats:2.10
24
+ ports:
25
+ - "4222:4222"
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ echo "[mq] consumer sample placeholder"
5
+ echo "Consume event: app.task.created"
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ echo "[mq] producer sample placeholder"
5
+ echo "Publish event: app.task.created"
@@ -0,0 +1,21 @@
1
+ services:
2
+ otel-collector:
3
+ image: otel/opentelemetry-collector:0.108.0
4
+ command: ["--config=/etc/otel-collector.yaml"]
5
+ volumes:
6
+ - ./otel-collector.yaml:/etc/otel-collector.yaml
7
+ ports:
8
+ - "4317:4317"
9
+ - "4318:4318"
10
+
11
+ prometheus:
12
+ image: prom/prometheus:v2.54.1
13
+ volumes:
14
+ - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
15
+ ports:
16
+ - "9090:9090"
17
+
18
+ grafana:
19
+ image: grafana/grafana:11.1.5
20
+ ports:
21
+ - "3000:3000"
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ docker compose -f infra/observability/docker-compose.yml down
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ docker compose -f infra/observability/docker-compose.yml up -d
@@ -1,6 +1,17 @@
1
+ import random
2
+ from datetime import datetime, timezone
3
+
4
+
1
5
  def run_sample_task(payload: dict) -> dict:
6
+ if payload.get("force_fail"):
7
+ raise RuntimeError("forced failure")
8
+
9
+ if not payload.get("disable_random") and random.randint(1, 8) == 1:
10
+ raise RuntimeError("simulated intermittent failure")
11
+
2
12
  return {
3
13
  "status": "ok",
4
14
  "traceId": payload.get("traceId", "generated-trace-id"),
15
+ "timestamp": datetime.now(timezone.utc).isoformat(),
5
16
  "data": payload,
6
17
  }
@@ -5,7 +5,7 @@ from tasks.sample_task import run_sample_task
5
5
 
6
6
  class WorkerTestCase(unittest.TestCase):
7
7
  def test_sample_task(self) -> None:
8
- output = run_sample_task({"x": 1})
8
+ output = run_sample_task({"x": 1, "disable_random": True})
9
9
  self.assertEqual(output["status"], "ok")
10
10
 
11
11
 
@@ -1,9 +1,51 @@
1
+ from datetime import datetime, timezone
1
2
  from tasks.sample_task import run_sample_task
2
3
 
3
4
 
5
+ class WorkerState:
6
+ def __init__(self) -> None:
7
+ self.jobs = {}
8
+ self.dead_letters = {}
9
+
10
+
11
+ STATE = WorkerState()
12
+
13
+
14
+ def process_job(job_id: str, payload: dict, max_retry: int = 2) -> None:
15
+ for attempt in range(max_retry + 1):
16
+ try:
17
+ STATE.jobs[job_id] = {
18
+ "id": job_id,
19
+ "attempt": attempt + 1,
20
+ "status": "running",
21
+ "updatedAt": datetime.now(timezone.utc).isoformat(),
22
+ }
23
+ result = run_sample_task(payload)
24
+ STATE.jobs[job_id] = {
25
+ "id": job_id,
26
+ "attempt": attempt + 1,
27
+ "status": "success",
28
+ "result": result,
29
+ "updatedAt": datetime.now(timezone.utc).isoformat(),
30
+ }
31
+ print(f"[worker-python] success: {STATE.jobs[job_id]}")
32
+ return
33
+ except Exception as exc:
34
+ if attempt >= max_retry:
35
+ STATE.dead_letters[job_id] = {
36
+ "id": job_id,
37
+ "status": "dead-letter",
38
+ "error": str(exc),
39
+ "updatedAt": datetime.now(timezone.utc).isoformat(),
40
+ }
41
+ print(f"[worker-python] dead-letter: {STATE.dead_letters[job_id]}")
42
+ return
43
+
44
+
4
45
  def main() -> None:
5
- result = run_sample_task({"input": "demo"})
6
- print(f"worker result: {result}")
46
+ process_job("job-1", {"input": "demo"})
47
+ print(f"[worker-python] jobs={STATE.jobs}")
48
+ print(f"[worker-python] dead_letters={STATE.dead_letters}")
7
49
 
8
50
 
9
51
  if __name__ == "__main__":
@@ -1,6 +1,7 @@
1
1
  package main
2
2
 
3
3
  import (
4
+ "encoding/json"
4
5
  "log"
5
6
  "time"
6
7
 
@@ -11,6 +12,11 @@ func main() {
11
12
  log.Println("worker-go started")
12
13
  for {
13
14
  tasks.RunHeartbeatTask()
15
+
16
+ jobs, _ := json.Marshal(tasks.ListJobs())
17
+ dlq, _ := json.Marshal(tasks.ListDeadLetters())
18
+ log.Printf("worker-go state jobs=%s dead_letters=%s", string(jobs), string(dlq))
19
+
14
20
  time.Sleep(10 * time.Second)
15
21
  }
16
22
  }
@@ -1,7 +1,112 @@
1
1
  package tasks
2
2
 
3
- import "log"
3
+ import (
4
+ "errors"
5
+ "fmt"
6
+ "log"
7
+ "sync"
8
+ "time"
9
+ )
10
+
11
+ type JobStatus string
12
+
13
+ const (
14
+ StatusPending JobStatus = "pending"
15
+ StatusRunning JobStatus = "running"
16
+ StatusSuccess JobStatus = "success"
17
+ StatusFailed JobStatus = "failed"
18
+ StatusDeadLetter JobStatus = "dead-letter"
19
+ )
20
+
21
+ type Job struct {
22
+ ID string
23
+ Name string
24
+ Attempts int
25
+ MaxRetry int
26
+ Status JobStatus
27
+ UpdatedAt time.Time
28
+ }
29
+
30
+ var (
31
+ mu sync.Mutex
32
+ registry = map[string]*Job{}
33
+ deadLetter = map[string]*Job{}
34
+ )
35
+
36
+ func RegisterJob(name string, maxRetry int) string {
37
+ mu.Lock()
38
+ defer mu.Unlock()
39
+ id := fmt.Sprintf("job-%d", time.Now().UnixNano())
40
+ registry[id] = &Job{ID: id, Name: name, MaxRetry: maxRetry, Status: StatusPending, UpdatedAt: time.Now()}
41
+ return id
42
+ }
43
+
44
+ func RunJob(id string, fn func() error) {
45
+ mu.Lock()
46
+ job, ok := registry[id]
47
+ mu.Unlock()
48
+ if !ok {
49
+ log.Printf("job not found: %s", id)
50
+ return
51
+ }
52
+
53
+ for attempt := 1; attempt <= job.MaxRetry+1; attempt++ {
54
+ updateStatus(job, StatusRunning)
55
+ err := fn()
56
+ if err == nil {
57
+ job.Attempts = attempt
58
+ updateStatus(job, StatusSuccess)
59
+ log.Printf("job=%s success attempts=%d", job.ID, attempt)
60
+ return
61
+ }
62
+
63
+ job.Attempts = attempt
64
+ log.Printf("job=%s failed attempts=%d err=%v", job.ID, attempt, err)
65
+ if attempt > job.MaxRetry {
66
+ updateStatus(job, StatusDeadLetter)
67
+ mu.Lock()
68
+ deadLetter[job.ID] = job
69
+ mu.Unlock()
70
+ return
71
+ }
72
+ time.Sleep(500 * time.Millisecond)
73
+ }
74
+ }
75
+
76
+ func updateStatus(job *Job, status JobStatus) {
77
+ mu.Lock()
78
+ defer mu.Unlock()
79
+ job.Status = status
80
+ job.UpdatedAt = time.Now()
81
+ }
82
+
83
+ func ListJobs() []Job {
84
+ mu.Lock()
85
+ defer mu.Unlock()
86
+ items := make([]Job, 0, len(registry))
87
+ for _, job := range registry {
88
+ items = append(items, *job)
89
+ }
90
+ return items
91
+ }
92
+
93
+ func ListDeadLetters() []Job {
94
+ mu.Lock()
95
+ defer mu.Unlock()
96
+ items := make([]Job, 0, len(deadLetter))
97
+ for _, job := range deadLetter {
98
+ items = append(items, *job)
99
+ }
100
+ return items
101
+ }
4
102
 
5
103
  func RunHeartbeatTask() {
6
- log.Println("worker-go heartbeat task executed")
104
+ jobID := RegisterJob("heartbeat", 2)
105
+ RunJob(jobID, func() error {
106
+ if time.Now().Unix()%5 == 0 {
107
+ return errors.New("simulated intermittent failure")
108
+ }
109
+ log.Println("worker-go heartbeat task executed")
110
+ return nil
111
+ })
7
112
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "polyforge-cli",
3
- "version": "0.1.0",
3
+ "version": "0.1.1",
4
4
  "description": "PolyForge hybrid full-stack scaffold CLI for go-gin and springboot",
5
5
  "main": "dist/index.js",
6
6
  "files": [