claude-flow-novice 2.18.24 → 2.18.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,85 @@
1
+ # Prometheus Configuration for Hybrid AI Infrastructure
2
+ # Version: 2.0.0
3
+
4
+ global:
5
+ scrape_interval: 15s
6
+ evaluation_interval: 15s
7
+ external_labels:
8
+ cluster: 'hybrid-ai-infrastructure'
9
+ environment: 'production'
10
+
11
+ # Alertmanager configuration (optional)
12
+ alerting:
13
+ alertmanagers:
14
+ - static_configs:
15
+ - targets: []
16
+ # - alertmanager:9093
17
+
18
+ # Rule files (optional)
19
+ rule_files:
20
+ # - "alerts/*.yml"
21
+
22
+ # Scrape configurations
23
+ scrape_configs:
24
+ # Prometheus self-monitoring
25
+ - job_name: 'prometheus'
26
+ static_configs:
27
+ - targets: ['localhost:9090']
28
+ labels:
29
+ service: 'prometheus'
30
+
31
+ # Redis monitoring
32
+ - job_name: 'redis'
33
+ static_configs:
34
+ - targets: ['redis:6379']
35
+ labels:
36
+ service: 'redis'
37
+ role: 'coordination'
38
+
39
+ # PostgreSQL monitoring (requires postgres_exporter)
40
+ - job_name: 'postgresql'
41
+ static_configs:
42
+ - targets: ['postgresql:5432']
43
+ labels:
44
+ service: 'postgresql'
45
+ role: 'storage'
46
+
47
+ # Grafana monitoring
48
+ - job_name: 'grafana'
49
+ static_configs:
50
+ - targets: ['grafana:3000']
51
+ labels:
52
+ service: 'grafana'
53
+ role: 'visualization'
54
+
55
+ # Coordinator monitoring (requires agent health endpoints)
56
+ - job_name: 'coordinators'
57
+ scrape_interval: 30s
58
+ static_configs:
59
+ - targets:
60
+ - 'coordinator_marketing:8080'
61
+ - 'coordinator_engineering:8080'
62
+ - 'coordinator_sales:8080'
63
+ - 'coordinator_support:8080'
64
+ - 'coordinator_finance:8080'
65
+ labels:
66
+ role: 'coordinator'
67
+ provider: 'claude-max'
68
+
69
+ # Worker monitoring (dynamic service discovery recommended for production)
70
+ - job_name: 'workers'
71
+ scrape_interval: 60s
72
+ dns_sd_configs:
73
+ - names:
74
+ - 'worker.hybrid_network'
75
+ type: 'A'
76
+ port: 8080
77
+ relabel_configs:
78
+ - source_labels: [__meta_dns_name]
79
+ target_label: instance
80
+ - source_labels: []
81
+ target_label: role
82
+ replacement: 'worker'
83
+ - source_labels: []
84
+ target_label: provider
85
+ replacement: 'z-ai'
@@ -0,0 +1,162 @@
1
+ # Promtail Configuration
2
+ # Task P2-2.3: Centralized Logging with ELK/Loki Stack
3
+ # Version: 2.9.0
4
+ #
5
+ # Promtail is an agent which ships the contents of local logs to a private Loki instance
6
+ # or Grafana Cloud. It is usually deployed to every machine that has applications which
7
+ # need to be monitored.
8
+
9
+ server:
10
+ http_listen_port: 9080
11
+ grpc_listen_port: 0
12
+ log_level: info
13
+ log_format: json
14
+
15
+ positions:
16
+ filename: /tmp/positions.yaml
17
+ sync_period: 10s
18
+ sync_interval: 10s
19
+
20
+ scrape_configs:
21
+ # Scrape logs from systemd journal
22
+ - job_name: journal
23
+ journal:
24
+ max_age: 24h
25
+ labels:
26
+ job: systemd-journal
27
+ source: journal
28
+ relabel_configs:
29
+ - source_labels: ['__journal__systemd_unit']
30
+ target_label: 'unit'
31
+ - source_labels: ['__journal_hostname']
32
+ target_label: 'hostname'
33
+ - source_labels: ['__journal_priority']
34
+ target_label: 'severity'
35
+
36
+ # Scrape Docker container logs
37
+ - job_name: docker
38
+ docker:
39
+ host: unix:///var/run/docker.sock
40
+ labels:
41
+ job: docker-containers
42
+ source: docker
43
+ relabel_configs:
44
+ - source_labels: ['__meta_docker_container_name']
45
+ target_label: 'container_name'
46
+ - source_labels: ['__meta_docker_container_id']
47
+ target_label: 'container_id'
48
+ - source_labels: ['__meta_docker_container_image_name']
49
+ target_label: 'image_name'
50
+ - source_labels: ['__meta_docker_container_network_mode']
51
+ target_label: 'network_mode'
52
+ - source_labels: ['__meta_docker_container_log_stream']
53
+ target_label: 'stream'
54
+
55
+ # Scrape application logs from /var/log
56
+ - job_name: varlog
57
+ static_configs:
58
+ - targets:
59
+ - localhost
60
+ labels:
61
+ job: varlogs
62
+ source: filesystem
63
+ pipeline_stages:
64
+ - multiline:
65
+ line_start_pattern: '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
66
+ - json:
67
+ expressions:
68
+ timestamp: timestamp
69
+ level: level
70
+ message: message
71
+ context: context
72
+ correlationId: correlationId
73
+ taskId: taskId
74
+ agentId: agentId
75
+ traceId: traceId
76
+ on_error: keep
77
+ - labels:
78
+ timestamp: ''
79
+ level: ''
80
+ message: ''
81
+ context: ''
82
+ correlationId: ''
83
+ taskId: ''
84
+ agentId: ''
85
+ traceId: ''
86
+ - drop:
87
+ expression: '.*TLS.*'
88
+ on_error: keep
89
+ file_sd_configs:
90
+ - files:
91
+ - /var/log/cfn/**/*.log
92
+ refresh_interval: 30s
93
+
94
+ # Scrape CFN-specific logs
95
+ - job_name: cfn-logs
96
+ static_configs:
97
+ - targets:
98
+ - localhost
99
+ labels:
100
+ job: cfn-application
101
+ source: cfn
102
+ pipeline_stages:
103
+ - multiline:
104
+ line_start_pattern: '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
105
+ - json:
106
+ expressions:
107
+ timestamp: timestamp
108
+ level: level
109
+ message: message
110
+ context: context
111
+ correlationId: correlationId
112
+ taskId: taskId
113
+ agentId: agentId
114
+ on_error: keep
115
+ - labels:
116
+ timestamp: ''
117
+ level: ''
118
+ message: ''
119
+ context: ''
120
+ correlationId: ''
121
+ taskId: ''
122
+ agentId: ''
123
+ - timestamp:
124
+ format: RFC3339Nano
125
+ source: timestamp
126
+ on_error: keep
127
+ - output:
128
+ source: message
129
+ file_sd_configs:
130
+ - files:
131
+ - /var/log/cfn/*.log
132
+ refresh_interval: 30s
133
+
134
+ clients:
135
+ - url: http://loki:3100/loki/api/v1/push
136
+ batchwait: 1s
137
+ batchsize: 1048576 # 1MB batch size
138
+ backoff_config:
139
+ min_backoff: 100ms
140
+ max_backoff: 10s
141
+ max_retries: 3
142
+ timeout: 10s
143
+ external_labels:
144
+ environment: production
145
+ cluster: cfn
146
+
147
+ # Limits configuration
148
+ limits_config:
149
+ # Maximum size of a log entry
150
+ max_entry_limit_bytes: 262144 # 256KB
151
+ # Maximum number of log entries per second
152
+ max_streams: 1000
153
+ max_global_streams_matched_per_user: 10000
154
+
155
+ # Target discovery settings
156
+ target_config:
157
+ # How often to sync targets
158
+ sync_period: 10s
159
+
160
+ # Tracing (optional)
161
+ # tracing:
162
+ # enabled: false
@@ -0,0 +1,33 @@
1
+ # Redis Production Configuration
2
+
3
+ # Network
4
+ bind 0.0.0.0
5
+ port 6379
6
+ protected-mode no
7
+
8
+ # General
9
+ daemonize no
10
+ pidfile /var/run/redis_6379.pid
11
+ loglevel notice
12
+ logfile ""
13
+
14
+ # Snapshotting
15
+ save 900 1
16
+ save 300 10
17
+ save 60 10000
18
+
19
+ # Memory
20
+ maxmemory 256mb
21
+ maxmemory-policy allkeys-lru
22
+
23
+ # Persistence
24
+ appendonly yes
25
+ appendfsync everysec
26
+
27
+ # Performance
28
+ tcp-keepalive 300
29
+ timeout 0
30
+
31
+ # Client connections
32
+ tcp-backlog 511
33
+ databases 16
@@ -0,0 +1,115 @@
1
+ // Redis Configuration
2
+
3
+ const { createClient } = require('redis');
4
+
5
+ const redisConfig = {
6
+ // Primary connection settings
7
+ primary: {
8
+ url: process.env.REDIS_URL || 'redis://localhost:6379',
9
+ username: process.env.REDIS_USERNAME || '',
10
+ password: process.env.REDIS_PASSWORD || '',
11
+
12
+ // Connection options
13
+ connectionOptions: {
14
+ // Connection timeout in milliseconds
15
+ connectTimeout: 5000,
16
+
17
+ // Retry strategy for connection failures
18
+ retryStrategy: (retries) => {
19
+ if (retries > 3) {
20
+ console.error('Max Redis connection retries exceeded');
21
+ return new Error('Redis connection failed');
22
+ }
23
+ // Exponential backoff
24
+ return Math.min(retries * 100, 3000);
25
+ },
26
+
27
+ // Socket connection settings
28
+ socket: {
29
+ keepAlive: true,
30
+ connectTimeout: 5000,
31
+ reconnectStrategy: (retries) => {
32
+ if (retries > 3) return new Error('Max reconnect attempts');
33
+ return Math.min(retries * 100, 3000);
34
+ }
35
+ }
36
+ },
37
+
38
+ // Logging configuration
39
+ logging: {
40
+ enabled: process.env.REDIS_LOGGING === 'true' || false,
41
+ level: process.env.REDIS_LOG_LEVEL || 'error'
42
+ }
43
+ },
44
+
45
+ // Backup/Fallback Redis configurations
46
+ fallback: [
47
+ {
48
+ url: process.env.REDIS_FALLBACK_1 || '',
49
+ username: process.env.REDIS_FALLBACK_USERNAME_1 || '',
50
+ password: process.env.REDIS_FALLBACK_PASSWORD_1 || ''
51
+ }
52
+ ],
53
+
54
+ // Redis availability check configuration
55
+ healthCheck: {
56
+ interval: 30000, // Check every 30 seconds
57
+ timeout: 5000, // 5-second timeout for health checks
58
+ retries: 3 // Number of retries before marking unavailable
59
+ },
60
+
61
+ // Optional advanced configurations
62
+ advanced: {
63
+ clusterMode: process.env.REDIS_CLUSTER_MODE === 'true' || false,
64
+ sentinelMode: process.env.REDIS_SENTINEL_MODE === 'true' || false
65
+ }
66
+ };
67
+
68
+ // Async Redis client creation with comprehensive error handling
69
+ async function createRedisClient(config = redisConfig.primary) {
70
+ try {
71
+ const client = createClient({
72
+ url: config.url,
73
+ username: config.username,
74
+ password: config.password,
75
+ ...config.connectionOptions
76
+ });
77
+
78
+ // Event handlers for robust connection management
79
+ client.on('error', (err) => {
80
+ console.error('Redis Client Error:', err);
81
+ });
82
+
83
+ client.on('connect', () => {
84
+ console.log('Redis client connected successfully');
85
+ });
86
+
87
+ client.on('reconnecting', () => {
88
+ console.log('Redis client attempting to reconnect');
89
+ });
90
+
91
+ await client.connect();
92
+
93
+ return client;
94
+ } catch (error) {
95
+ console.error('Failed to create Redis client:', error);
96
+ throw error;
97
+ }
98
+ }
99
+
100
+ // Redis availability check
101
+ async function checkRedisAvailability(client) {
102
+ try {
103
+ await client.ping();
104
+ return true;
105
+ } catch (error) {
106
+ console.error('Redis availability check failed:', error);
107
+ return false;
108
+ }
109
+ }
110
+
111
+ module.exports = {
112
+ redisConfig,
113
+ createRedisClient,
114
+ checkRedisAvailability
115
+ };