claude-flow-novice 2.14.36 → 2.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.claude/commands/cfn-loop-cli.md +491 -456
  2. package/.claude/commands/switch-api.md +33 -12
  3. package/.claude/skills/cfn-agent-spawning/get-agent-provider-env.sh +107 -0
  4. package/.claude/skills/cfn-agent-spawning/parse-agent-provider.sh +59 -0
  5. package/.claude/skills/cfn-docker-agent-spawning/spawn-agent.sh +24 -6
  6. package/.claude/skills/cfn-loop-orchestration/helpers/spawn-agents.sh +18 -9
  7. package/.claude/skills/cfn-redis-coordination/invoke-waiting-mode.sh +220 -220
  8. package/claude-assets/agents/cfn-dev-team/developers/backend-developer.md +5 -0
  9. package/claude-assets/agents/custom/claude-code-expert.md +151 -2
  10. package/claude-assets/agents/docker-coordinators/cfn-docker-v3-coordinator.md +43 -3
  11. package/claude-assets/commands/cfn-loop-cli.md +491 -456
  12. package/claude-assets/commands/switch-api.md +33 -12
  13. package/claude-assets/skills/cfn-agent-spawning/get-agent-provider-env.sh +107 -0
  14. package/claude-assets/skills/cfn-agent-spawning/parse-agent-provider.sh +59 -0
  15. package/claude-assets/skills/cfn-docker-agent-spawning/spawn-agent.sh +24 -6
  16. package/claude-assets/skills/cfn-error-logging/SKILL.md +339 -0
  17. package/claude-assets/skills/cfn-error-logging/cleanup-error-logs.sh +334 -0
  18. package/claude-assets/skills/cfn-error-logging/integrate-cli.sh +232 -0
  19. package/claude-assets/skills/cfn-error-logging/integrate-docker.sh +294 -0
  20. package/claude-assets/skills/cfn-error-logging/invoke-error-logging.sh +839 -0
  21. package/claude-assets/skills/cfn-error-logging/test-error-logging.sh +475 -0
  22. package/claude-assets/skills/cfn-loop-orchestration/helpers/spawn-agents.sh +18 -9
  23. package/claude-assets/skills/cfn-process-instrumentation/instrument-process.sh +5 -3
  24. package/claude-assets/skills/cfn-redis-coordination/invoke-waiting-mode.sh +220 -220
  25. package/claude-assets/skills/cfn-task-mode-sanitize/task-mode-env-sanitizer.sh +21 -9
  26. package/claude-assets/skills/cfn-validation-runner-instrumentation/wrapped-executor.sh +3 -1
  27. package/dist/hello.js +27 -3
  28. package/dist/hello.js.map +1 -1
  29. package/dist/server.js +194 -0
  30. package/dist/server.js.map +1 -0
  31. package/dist/server.test.js +207 -0
  32. package/dist/server.test.js.map +1 -0
  33. package/package.json +2 -1
  34. package/scripts/switch-api.sh +140 -12
@@ -0,0 +1,475 @@
1
+ #!/bin/bash
2
+
3
+ ##############################################################################
4
+ # CFN Error Logging - Test Script
5
+ # Version: 1.0.0
6
+ #
7
+ # Comprehensive test suite for CFN error logging functionality
8
+ # Tests error capture, report generation, and cleanup operations
9
+ #
10
+ # Usage: ./test-error-logging.sh [--full] [--quick] [--component <name>]
11
+ ##############################################################################
12
+
13
+ set -euo pipefail
14
+
15
+ # Test configuration
16
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
17
+ ERROR_LOGGING_SCRIPT="$SCRIPT_DIR/invoke-error-logging.sh"
18
+ CLEANUP_SCRIPT="$SCRIPT_DIR/cleanup-error-logs.sh"
19
+ CLI_INTEGRATION="$SCRIPT_DIR/integrate-cli.sh"
20
+ DOCKER_INTEGRATION="$SCRIPT_DIR/integrate-docker.sh"
21
+
22
+ # Test data
23
+ TEST_TASK_ID="cfn-test-$(date +%s%N | tail -c 7)"
24
+ TEST_LOG_BASE_DIR="/tmp/cfn_error_logs"
25
+
26
+ # Test flags
27
+ QUICK_TEST=false
28
+ FULL_TEST=false
29
+ COMPONENT_TEST=""
30
+
31
+ # Colors for output
32
+ RED='\033[0;31m'
33
+ GREEN='\033[0;32m'
34
+ YELLOW='\033[1;33m'
35
+ BLUE='\033[0;34m'
36
+ NC='\033[0m' # No Color
37
+
38
+ # Parse arguments
39
+ while [[ $# -gt 0 ]]; do
40
+ case $1 in
41
+ --quick)
42
+ QUICK_TEST=true
43
+ shift
44
+ ;;
45
+ --full)
46
+ FULL_TEST=true
47
+ shift
48
+ ;;
49
+ --component)
50
+ COMPONENT_TEST="$2"
51
+ shift 2
52
+ ;;
53
+ --help|-h)
54
+ cat << EOF
55
+ CFN Error Logging - Test Script
56
+
57
+ Usage: $0 [OPTIONS]
58
+
59
+ Options:
60
+ --quick Run quick tests only (skip slow operations)
61
+ --full Run full comprehensive test suite
62
+ --component <name> Test specific component only
63
+ --help, -h Show this help message
64
+
65
+ Components:
66
+ invoke Test main error logging script
67
+ cli Test CLI integration
68
+ docker Test Docker integration
69
+ cleanup Test cleanup functionality
70
+ reports Test report generation
71
+
72
+ Examples:
73
+ $0 --quick # Quick validation tests
74
+ $0 --full # Full comprehensive tests
75
+ $0 --component invoke # Test main script only
76
+ $0 --component cleanup # Test cleanup functionality
77
+ EOF
78
+ exit 0
79
+ ;;
80
+ *)
81
+ echo "❌ Unknown option: $1"
82
+ echo "Use --help for usage information"
83
+ exit 1
84
+ ;;
85
+ esac
86
+ done
87
+
88
+ # Test utilities
89
+ test_log() {
90
+ echo -e "${BLUE}[TEST]${NC} $*"
91
+ }
92
+
93
+ pass_log() {
94
+ echo -e "${GREEN}[PASS]${NC} $*"
95
+ }
96
+
97
+ fail_log() {
98
+ echo -e "${RED}[FAIL]${NC} $*"
99
+ }
100
+
101
+ warn_log() {
102
+ echo -e "${YELLOW}[WARN]${NC} $*"
103
+ }
104
+
105
+ info_log() {
106
+ echo -e "${BLUE}[INFO]${NC} $*"
107
+ }
108
+
109
+ # Test result tracking
110
+ TESTS_RUN=0
111
+ TESTS_PASSED=0
112
+ TESTS_FAILED=0
113
+
114
+ run_test() {
115
+ local test_name="$1"
116
+ local test_command="$2"
117
+
118
+ TESTS_RUN=$((TESTS_RUN + 1))
119
+ test_log "Running: $test_name"
120
+
121
+ if eval "$test_command" >/dev/null 2>&1; then
122
+ pass_log "$test_name"
123
+ TESTS_PASSED=$((TESTS_PASSED + 1))
124
+ return 0
125
+ else
126
+ fail_log "$test_name"
127
+ TESTS_FAILED=$((TESTS_FAILED + 1))
128
+ return 1
129
+ fi
130
+ }
131
+
132
+ # Setup test environment
133
+ setup_test_env() {
134
+ info_log "Setting up test environment..."
135
+
136
+ # Create test directory
137
+ mkdir -p "$TEST_LOG_BASE_DIR"
138
+
139
+ # Verify scripts exist and are executable
140
+ for script in "$ERROR_LOGGING_SCRIPT" "$CLEANUP_SCRIPT" "$CLI_INTEGRATION" "$DOCKER_INTEGRATION"; do
141
+ if [ ! -f "$script" ]; then
142
+ fail_log "Script not found: $script"
143
+ exit 1
144
+ fi
145
+ chmod +x "$script"
146
+ done
147
+
148
+ # Set up test task ID
149
+ export TASK_ID="$TEST_TASK_ID"
150
+
151
+ pass_log "Test environment setup complete"
152
+ }
153
+
154
+ # Cleanup test environment
155
+ cleanup_test_env() {
156
+ info_log "Cleaning up test environment..."
157
+
158
+ # Remove test-specific logs
159
+ if [ -n "$TEST_TASK_ID" ]; then
160
+ find "$TEST_LOG_BASE_DIR" -name "*${TEST_TASK_ID}*" -delete 2>/dev/null || true
161
+ fi
162
+
163
+ pass_log "Test environment cleaned up"
164
+ }
165
+
166
+ # Test 1: Script existence and permissions
167
+ test_script_permissions() {
168
+ run_test "Error logging script exists and is executable" "[ -x '$ERROR_LOGGING_SCRIPT' ]"
169
+ run_test "Cleanup script exists and is executable" "[ -x '$CLEANUP_SCRIPT' ]"
170
+ run_test "CLI integration script exists and is executable" "[ -x '$CLI_INTEGRATION' ]"
171
+ run_test "Docker integration script exists and is executable" "[ -x '$DOCKER_INTEGRATION' ]"
172
+ }
173
+
174
+ # Test 2: Help functionality
175
+ test_help_functionality() {
176
+ run_test "Error logging script shows help" "$ERROR_LOGGING_SCRIPT --help"
177
+ run_test "Cleanup script shows help" "$CLEANUP_SCRIPT --help"
178
+ }
179
+
180
+ # Test 3: Dependency validation
181
+ test_dependency_validation() {
182
+ run_test "Validate dependencies check" "$ERROR_LOGGING_SCRIPT --action validate"
183
+
184
+ # Test with missing jq (simulate)
185
+ local original_path="$PATH"
186
+ PATH="/nonexistent:$PATH"
187
+ run_test "Handles missing dependencies gracefully" "$ERROR_LOGGING_SCRIPT --action diagnostics || true"
188
+ PATH="$original_path"
189
+ }
190
+
191
+ # Test 4: Error capture functionality
192
+ test_error_capture() {
193
+ local test_task_id="capture-test-$(date +%s)"
194
+
195
+ run_test "Capture CLI error" "$ERROR_LOGGING_SCRIPT --action capture --task-id '$test_task_id' --error-type 'cli' --error-message 'Test CLI error' --exit-code 1"
196
+
197
+ # Verify error log was created
198
+ run_test "Error log file created" "ls '$TEST_LOG_BASE_DIR/cfn-error-$test_task_id'* 2>/dev/null | grep -q ."
199
+
200
+ # Test with context
201
+ local context_task_id="context-test-$(date +%s)"
202
+ local context_json='{"test": true, "component": "test-suite"}'
203
+
204
+ run_test "Capture error with context" "$ERROR_LOGGING_SCRIPT --action capture --task-id '$context_task_id' --error-type 'orchestrator' --error-message 'Test with context' --context '$context_json'"
205
+ }
206
+
207
+ # Test 5: Report generation
208
+ test_report_generation() {
209
+ local report_task_id="report-test-$(date +%s)"
210
+
211
+ # First capture an error
212
+ "$ERROR_LOGGING_SCRIPT" --action capture --task-id "$report_task_id" --error-type "agent-spawn" --error-message "Test error for report" >/dev/null 2>&1 || true
213
+
214
+ run_test "Generate Markdown report" "$ERROR_LOGGING_SCRIPT --action report --task-id '$report_task_id' --format markdown"
215
+ run_test "Generate JSON report" "$ERROR_LOGGING_SCRIPT --action report --task-id '$report_task_id' --format json"
216
+
217
+ # Test report file creation
218
+ run_test "Markdown report file created" "ls '$TEST_LOG_BASE_DIR/reports/cfn-report-$report_task_id'*'.md' 2>/dev/null | grep -q ."
219
+ run_test "JSON report generation works" "$ERROR_LOGGING_SCRIPT --action report --task-id '$report_task_id' --format json"
220
+ }
221
+
222
+ # Test 6: CLI integration
223
+ test_cli_integration() {
224
+ # Source the CLI integration
225
+ source "$CLI_INTEGRATION"
226
+
227
+ run_test "CLI integration loads without errors" "true"
228
+
229
+ # Test function availability
230
+ run_test "cfn_capture_error function defined" "type cfn_capture_error >/dev/null"
231
+ run_test "cfn_generate_report function defined" "type cfn_generate_report >/dev/null"
232
+ run_test "cfn_cli_wrapper function defined" "type cfn_cli_wrapper >/dev/null"
233
+
234
+ # Test error capture
235
+ local cli_test_id="cli-integration-$(date +%s)"
236
+ run_test "CLI error capture works" "cfn_capture_error '$cli_test_id' 'cli-test' 'CLI integration test' 1"
237
+ }
238
+
239
+ # Test 7: Docker integration
240
+ test_docker_integration() {
241
+ # Source the Docker integration
242
+ source "$DOCKER_INTEGRATION"
243
+
244
+ run_test "Docker integration loads without errors" "true"
245
+
246
+ # Test function availability
247
+ run_test "cfn_capture_docker_error function defined" "type cfn_capture_docker_error >/dev/null"
248
+ run_test "cfn_docker_wrapper function defined" "type cfn_docker_wrapper >/dev/null"
249
+ run_test "cfn_docker_cleanup function defined" "type cfn_docker_cleanup >/dev/null"
250
+
251
+ # Test Docker environment detection
252
+ run_test "Docker environment detection works" "is_docker_environment || true"
253
+ }
254
+
255
+ # Test 8: Cleanup functionality
256
+ test_cleanup_functionality() {
257
+ if [ "$QUICK_TEST" = true ]; then
258
+ warn_log "Skipping cleanup tests in quick mode"
259
+ return 0
260
+ fi
261
+
262
+ # Create some test files
263
+ local test_dir="$TEST_LOG_BASE_DIR/test-cleanup"
264
+ mkdir -p "$test_dir"
265
+
266
+ # Create old test files (using touch to set old timestamp)
267
+ local old_date
268
+ old_date=$(date -d "10 days ago" +%Y-%m-%d 2>/dev/null || date -v-10d +%Y-%m-%d)
269
+
270
+ touch -d "$old_date" "$test_dir/old-error.json" 2>/dev/null || touch -t "${old_date//-}" "$test_dir/old-error.json" 2>/dev/null || touch "$test_dir/old-error.json"
271
+ touch -d "$old_date" "$test_dir/old-report.md" 2>/dev/null || touch -t "${old_date//-}" "$test_dir/old-report.md" 2>/dev/null || touch "$test_dir/old-report.md"
272
+
273
+ # Create recent test files
274
+ echo '{"test": "recent"}' > "$test_dir/recent-error.json"
275
+ echo "# Recent Report" > "$test_dir/recent-report.md"
276
+
277
+ # Test dry-run cleanup
278
+ run_test "Cleanup dry-run works" "$CLEANUP_SCRIPT --dry-run --retention-days 5"
279
+
280
+ # Test actual cleanup
281
+ run_test "Cleanup removes old files" "$CLEANUP_SCRIPT --force --retention-days 5"
282
+
283
+ # Verify recent files still exist
284
+ run_test "Recent files preserved after cleanup" "[ -f '$test_dir/recent-error.json' ] && [ -f '$test_dir/recent-report.md' ]"
285
+
286
+ # Cleanup test directory
287
+ rm -rf "$test_dir"
288
+ }
289
+
290
+ # Test 9: System diagnostics
291
+ test_system_diagnostics() {
292
+ run_test "System diagnostics execute" "$ERROR_LOGGING_SCRIPT --action diagnostics"
293
+
294
+ # Test diagnostics output format
295
+ local diagnostics_output
296
+ diagnostics_output=$("$ERROR_LOGGING_SCRIPT" --action diagnostics 2>&1)
297
+
298
+ run_test "Diagnostics include system info" "echo '$diagnostics_output' | grep -q 'System Information'"
299
+ run_test "Diagnostics include resource info" "echo '$diagnostics_output' | grep -q 'Resources'"
300
+ }
301
+
302
+ # Test 10: List functionality
303
+ test_list_functionality() {
304
+ # Ensure we have some logs to list
305
+ "$ERROR_LOGGING_SCRIPT" --action capture --task-id "list-test-$(date +%s)" --error-type "test" --error-message "Test for list functionality" >/dev/null 2>&1 || true
306
+
307
+ run_test "List recent errors" "$ERROR_LOGGING_SCRIPT --action list --format table"
308
+ run_test "List errors as JSON" "$ERROR_LOGGING_SCRIPT --action list --format json"
309
+ }
310
+
311
+ # Test 11: Error handling
312
+ test_error_handling() {
313
+ run_test "Handles invalid action gracefully" "$ERROR_LOGGING_SCRIPT --action invalid-action || true"
314
+ run_test "Handles missing task ID gracefully" "$ERROR_LOGGING_SCRIPT --action capture || true"
315
+ run_test "Handles invalid JSON context gracefully" "$ERROR_LOGGING_SCRIPT --action capture --task-id 'test' --context 'invalid-json' || true"
316
+ }
317
+
318
+ # Test 12: Performance test (only in full mode)
319
+ test_performance() {
320
+ if [ "$FULL_TEST" != true ]; then
321
+ warn_log "Skipping performance tests (use --full for comprehensive testing)"
322
+ return 0
323
+ fi
324
+
325
+ info_log "Running performance tests..."
326
+
327
+ local start_time
328
+ start_time=$(date +%s)
329
+
330
+ # Capture multiple errors rapidly
331
+ for i in {1..10}; do
332
+ "$ERROR_LOGGING_SCRIPT" --action capture --task-id "perf-test-$i" --error-type "performance" --error-message "Performance test $i" >/dev/null 2>&1 || true
333
+ done
334
+
335
+ local end_time
336
+ end_time=$(date +%s)
337
+ local duration=$((end_time - start_time))
338
+
339
+ run_test "Multiple error captures complete within 30 seconds" "[ $duration -lt 30 ]"
340
+
341
+ # Test report generation performance
342
+ start_time=$(date +%s)
343
+
344
+ for i in {1..5}; do
345
+ "$ERROR_LOGGING_SCRIPT" --action report --task-id "perf-test-$i" --format markdown >/dev/null 2>&1 || true
346
+ done
347
+
348
+ end_time=$(date +%s)
349
+ duration=$((end_time - start_time))
350
+
351
+ run_test "Multiple report generations complete within 20 seconds" "[ $duration -lt 20 ]"
352
+ }
353
+
354
+ # Run specific component test
355
+ run_component_test() {
356
+ case "$COMPONENT_TEST" in
357
+ invoke)
358
+ test_help_functionality
359
+ test_dependency_validation
360
+ test_error_capture
361
+ test_report_generation
362
+ test_list_functionality
363
+ test_error_handling
364
+ ;;
365
+ cli)
366
+ test_cli_integration
367
+ ;;
368
+ docker)
369
+ test_docker_integration
370
+ ;;
371
+ cleanup)
372
+ test_cleanup_functionality
373
+ ;;
374
+ reports)
375
+ test_report_generation
376
+ ;;
377
+ *)
378
+ fail_log "Unknown component: $COMPONENT_TEST"
379
+ echo "Available components: invoke, cli, docker, cleanup, reports"
380
+ exit 1
381
+ ;;
382
+ esac
383
+ }
384
+
385
+ # Run all tests
386
+ run_all_tests() {
387
+ info_log "Starting CFN Error Logging test suite..."
388
+ info_log "Test Task ID: $TEST_TASK_ID"
389
+
390
+ setup_test_env
391
+
392
+ # Run core tests
393
+ test_script_permissions
394
+ test_help_functionality
395
+ test_dependency_validation
396
+ test_error_capture
397
+ test_report_generation
398
+ test_cli_integration
399
+ test_docker_integration
400
+ test_system_diagnostics
401
+ test_list_functionality
402
+ test_error_handling
403
+
404
+ # Run conditional tests
405
+ if [ "$COMPONENT_TEST" = "" ]; then
406
+ test_cleanup_functionality
407
+ if [ "$FULL_TEST" = true ]; then
408
+ test_performance
409
+ fi
410
+ fi
411
+
412
+ cleanup_test_env
413
+ }
414
+
415
+ # Show test results
416
+ show_test_results() {
417
+ echo ""
418
+ echo "=================================="
419
+ echo "🏁 CFN Error Logging Test Results"
420
+ echo "=================================="
421
+ echo "Tests run: $TESTS_RUN"
422
+ echo -e "Passed: ${GREEN}$TESTS_PASSED${NC}"
423
+ echo -e "Failed: ${RED}$TESTS_FAILED${NC}"
424
+
425
+ if [ "$TESTS_FAILED" -eq 0 ]; then
426
+ echo -e "${GREEN}✅ All tests passed!${NC}"
427
+ echo ""
428
+ echo "The CFN Error Logging skill is working correctly."
429
+ echo "You can now use it to capture and diagnose CFN Loop failures."
430
+ else
431
+ echo -e "${RED}❌ Some tests failed.${NC}"
432
+ echo ""
433
+ echo "Please review the failures above and fix any issues."
434
+ echo "Common issues:"
435
+ echo " - Missing dependencies (jq, bc, etc.)"
436
+ echo " - Permission issues"
437
+ echo " - Script syntax errors"
438
+ fi
439
+
440
+ echo ""
441
+ echo "Next steps:"
442
+ echo "1. Test with real CFN Loop failures"
443
+ echo "2. Integrate with your CLI and Docker workflows"
444
+ echo "3. Configure automated cleanup schedules"
445
+
446
+ # Return appropriate exit code
447
+ if [ "$TESTS_FAILED" -eq 0 ]; then
448
+ return 0
449
+ else
450
+ return 1
451
+ fi
452
+ }
453
+
454
+ # Main execution
455
+ main() {
456
+ echo "CFN Error Logging - Test Suite"
457
+ echo "================================"
458
+
459
+ if [ -n "$COMPONENT_TEST" ]; then
460
+ info_log "Testing component: $COMPONENT_TEST"
461
+ setup_test_env
462
+ run_component_test
463
+ cleanup_test_env
464
+ else
465
+ run_all_tests
466
+ fi
467
+
468
+ show_test_results
469
+ }
470
+
471
+ # Handle script interruption
472
+ trap 'echo -e "\n${YELLOW}Test interrupted${NC}"; cleanup_test_env; exit 1' INT TERM
473
+
474
+ # Run main function
475
+ main "$@"
@@ -190,15 +190,24 @@ spawn_agents_with_context() {
190
190
  ((injection_success_count++))
191
191
  fi
192
192
 
193
- # Spawn agent in background with enriched context
194
- npx claude-flow-novice agent "$safe_agent_type" \
195
- --task-id "$safe_task_id" \
196
- --agent-id "$safe_agent_id" \
197
- --iteration "$iteration" \
198
- --context "$context_to_use" &
199
-
200
- # Store PID for monitoring
201
- AGENT_PID=$!
193
+ # Set provider environment variables for this agent (custom routing support)
194
+ # Spawn in subshell to isolate provider env vars per agent
195
+ (
196
+ source .claude/skills/cfn-agent-spawning/get-agent-provider-env.sh "$safe_agent_type"
197
+
198
+ # Spawn agent in background with enriched context and custom provider
199
+ npx claude-flow-novice agent "$safe_agent_type" \
200
+ --task-id "$safe_task_id" \
201
+ --agent-id "$safe_agent_id" \
202
+ --iteration "$iteration" \
203
+ --context "$context_to_use" &
204
+
205
+ AGENT_PID=$!
206
+ echo $AGENT_PID
207
+ )
208
+
209
+ # Capture PID from subshell output
210
+ AGENT_PID=$(jobs -p | tail -1)
202
211
  "$REDIS_COORD_SKILL/store-context.sh" \
203
212
  --task-id "$task_id" \
204
213
  --key "${UNIQUE_AGENT_ID}:pid" \
@@ -19,9 +19,11 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
19
19
  PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
20
20
 
21
21
  # Default limits
22
- DEFAULT_MEMORY_LIMIT="2G"
23
- DEFAULT_CPU_LIMIT="80%"
24
- DEFAULT_TIMEOUT="600"
22
+ # Note: DEFAULT_TIMEOUT may be set by wrapped-executor.sh (300)
23
+ # Only set if not already defined
24
+ : "${DEFAULT_MEMORY_LIMIT:="2G"}"
25
+ : "${DEFAULT_CPU_LIMIT:="80%"}"
26
+ : "${DEFAULT_TIMEOUT:="600"}"
25
27
 
26
28
  # Process tracking
27
29
  AGENT_ID="${AGENT_ID:-$(hostname)-$$}"