claude-flow-novice 2.15.1 → 2.15.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/cfn-extras/agents/google-sheets-specialist.md +614 -0
- package/.claude/commands/cfn/create-handoff.md +224 -0
- package/.claude/hooks/cfn-BACKUP_USAGE.md +243 -243
- package/.claude/hooks/cfn-invoke-security-validation.sh +69 -69
- package/.claude/hooks/cfn-post-edit-cfn-retrospective.sh +78 -78
- package/.claude/hooks/cfn-post-edit.config.json +44 -44
- package/.claude/skills/agent-lifecycle/SKILL.md +60 -0
- package/.claude/skills/agent-lifecycle/execute-lifecycle-hook.sh +573 -0
- package/.claude/skills/agent-lifecycle/simple-audit.sh +31 -0
- package/.claude/skills/cfn-hybrid-routing/check-dependencies.sh +51 -51
- package/.claude/skills/cfn-loop-validation/orchestrate-cfn-loop.sh +252 -252
- package/.claude/skills/cfn-redis-coordination/agent-recovery.sh +74 -74
- package/.claude/skills/cfn-redis-coordination/get-context.sh +112 -112
- package/.claude/skills/cfn-transparency-middleware/middleware-config.sh +28 -28
- package/.claude/skills/cfn-transparency-middleware/performance-benchmark.sh +78 -78
- package/.claude/skills/cfn-transparency-middleware/test-integration.sh +161 -161
- package/.claude/skills/cfn-transparency-middleware/test-transparency-skill.sh +367 -367
- package/.claude/skills/cfn-transparency-middleware/tests/input-validation.sh +92 -92
- package/.claude/skills/cfn-transparency-middleware/wrap-agent.sh +131 -131
- package/claude-assets/agents/cfn-dev-team/coordinators/handoff-coordinator.md +662 -0
- package/claude-assets/agents/cfn-dev-team/dev-ops/docker-specialist.md +29 -0
- package/claude-assets/cfn-extras/agents/google-sheets-specialist.md +614 -0
- package/claude-assets/commands/cfn/create-handoff.md +224 -0
- package/claude-assets/hooks/cfn-BACKUP_USAGE.md +243 -243
- package/claude-assets/hooks/cfn-invoke-security-validation.sh +69 -69
- package/claude-assets/hooks/cfn-post-edit-cfn-retrospective.sh +78 -78
- package/claude-assets/hooks/cfn-post-edit.config.json +44 -44
- package/claude-assets/hooks/cfn-post-execution/memory-cleanup.sh +19 -19
- package/claude-assets/hooks/cfn-pre-execution/memory-check.sh +19 -19
- package/claude-assets/skills/agent-lifecycle/execute-lifecycle-hook.sh +572 -572
- package/claude-assets/skills/agent-lifecycle/simple-audit.sh +30 -30
- package/claude-assets/skills/cfn-automatic-memory-persistence/persist-agent-output.sh +48 -48
- package/claude-assets/skills/cfn-automatic-memory-persistence/query-agent-history.sh +34 -34
- package/claude-assets/skills/cfn-deliverable-validation/confidence-calculator.sh +261 -261
- package/claude-assets/skills/cfn-expert-update/update-expert.sh +345 -345
- package/claude-assets/skills/cfn-hybrid-routing/check-dependencies.sh +51 -51
- package/claude-assets/skills/cfn-intervention-detector/detect-intervention.sh +110 -110
- package/claude-assets/skills/cfn-intervention-orchestrator/execute-intervention.sh +58 -58
- package/claude-assets/skills/cfn-loop-validation/orchestrate-cfn-loop.sh +252 -252
- package/claude-assets/skills/cfn-loop2-output-processing/process-validator-output.sh +275 -275
- package/claude-assets/skills/cfn-memory-management/check-memory.sh +159 -159
- package/claude-assets/skills/cfn-memory-management/cleanup-memory.sh +196 -196
- package/claude-assets/skills/cfn-node-heap-sizer/task-mode-heap-limiter.sh +325 -325
- package/claude-assets/skills/cfn-playbook-auto-update/auto-update-playbook.sh +85 -85
- package/claude-assets/skills/cfn-redis-coordination/agent-recovery.sh +74 -74
- package/claude-assets/skills/cfn-redis-coordination/get-context.sh +112 -112
- package/claude-assets/skills/cfn-scope-simplifier/simplify-scope.sh +67 -67
- package/claude-assets/skills/cfn-specialist-injection/recommend-specialist.sh +56 -56
- package/claude-assets/skills/cfn-standardized-error-handling/capture-agent-error.sh +86 -86
- package/claude-assets/skills/cfn-standardized-error-handling/test-error-handling.sh +165 -165
- package/claude-assets/skills/cfn-task-config-init/initialize-config.sh +264 -264
- package/claude-assets/skills/cfn-task-decomposition/task-decomposer.sh +278 -278
- package/claude-assets/skills/cfn-transparency-middleware/middleware-config.sh +28 -28
- package/claude-assets/skills/cfn-transparency-middleware/performance-benchmark.sh +78 -78
- package/claude-assets/skills/cfn-transparency-middleware/test-integration.sh +161 -161
- package/claude-assets/skills/cfn-transparency-middleware/test-transparency-skill.sh +367 -367
- package/claude-assets/skills/cfn-transparency-middleware/tests/input-validation.sh +92 -92
- package/claude-assets/skills/cfn-transparency-middleware/wrap-agent.sh +131 -131
- package/claude-assets/skills/docker-build/SKILL.md +96 -203
- package/claude-assets/skills/docker-build/build.sh +73 -73
- package/claude-assets/skills/integration/agent-handoff.sh +494 -0
- package/claude-assets/skills/integration/file-operations.sh +414 -0
- package/claude-assets/skills/workflow-codification/APPROVAL_WORKFLOW.md +806 -0
- package/claude-assets/skills/workflow-codification/COST_TRACKING.md +637 -0
- package/claude-assets/skills/workflow-codification/EDGE_CASE_TRACKING.md +404 -0
- package/claude-assets/skills/workflow-codification/README_PHASE4.md +457 -0
- package/claude-assets/skills/workflow-codification/SKILL.md +110 -0
- package/claude-assets/skills/workflow-codification/analyze-patterns.sh +899 -0
- package/claude-assets/skills/workflow-codification/approval-workflow.sh +514 -0
- package/claude-assets/skills/workflow-codification/generate-skill-update.sh +525 -0
- package/claude-assets/skills/workflow-codification/review-skill.sh +643 -0
- package/claude-assets/skills/workflow-codification/templates/email-notification.txt +114 -0
- package/claude-assets/skills/workflow-codification/templates/slack-notification.md +85 -0
- package/claude-assets/skills/workflow-codification/test-integration.sh +281 -0
- package/claude-assets/skills/workflow-codification/track-cost-savings.sh +445 -0
- package/claude-assets/skills/workflow-codification/track-edge-case.sh +323 -0
- package/dist/agents/agent-loader.js +165 -146
- package/dist/agents/agent-loader.js.map +1 -1
- package/dist/cli/config-manager.js +91 -109
- package/dist/cli/config-manager.js.map +1 -1
- package/dist/integration/DatabaseHandoff.js +507 -0
- package/dist/integration/DatabaseHandoff.js.map +1 -0
- package/dist/integration/StandardAdapter.js +291 -0
- package/dist/integration/StandardAdapter.js.map +1 -0
- package/dist/lib/agent-output-parser.js +518 -0
- package/dist/lib/agent-output-parser.js.map +1 -0
- package/dist/lib/agent-output-validator.js +950 -0
- package/dist/lib/agent-output-validator.js.map +1 -0
- package/dist/lib/artifact-registry.js +443 -0
- package/dist/lib/artifact-registry.js.map +1 -0
- package/dist/lib/config-validator.js +687 -0
- package/dist/lib/config-validator.js.map +1 -0
- package/dist/types/agent-output.js +44 -0
- package/dist/types/agent-output.js.map +1 -0
- package/dist/types/config.js +28 -0
- package/dist/types/config.js.map +1 -0
- package/package.json +2 -1
- package/scripts/artifact-cleanup.sh +392 -0
- package/scripts/build-linux.sh +78 -0
- package/scripts/deploy-production.sh +355 -355
- package/scripts/docker-playwright-fix.sh +311 -311
- package/scripts/docker-rebuild-all-agents.sh +127 -127
- package/scripts/memory-leak-prevention.sh +305 -305
- package/scripts/migrate-artifacts.sh +563 -0
- package/scripts/migrate-yaml-to-json.sh +465 -0
- package/scripts/run-marketing-tests.sh +42 -42
- package/scripts/update_paths.sh +46 -46
|
@@ -0,0 +1,899 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Pattern Analyzer for Workflow Codification System
|
|
4
|
+
# Analyzes ACE reflections to detect repeated workflow patterns suitable for codification
|
|
5
|
+
# Usage: ./analyze-patterns.sh [OPTIONS]
|
|
6
|
+
|
|
7
|
+
set -euo pipefail
|
|
8
|
+
|
|
9
|
+
# ============================================================================
|
|
10
|
+
# CONFIGURATION
|
|
11
|
+
# ============================================================================
|
|
12
|
+
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
13
|
+
readonly DEFAULT_DB_HOST="${CFN_DB_HOST:-localhost}"
|
|
14
|
+
readonly DEFAULT_DB_PORT="${CFN_DB_PORT:-5432}"
|
|
15
|
+
readonly DEFAULT_DB_NAME="${CFN_DB_NAME:-cfn_workflow}"
|
|
16
|
+
readonly DEFAULT_DB_USER="${CFN_DB_USER:-cfn_user}"
|
|
17
|
+
readonly DEFAULT_TIME_WINDOW_DAYS=90
|
|
18
|
+
readonly DEFAULT_MIN_OCCURRENCES=5
|
|
19
|
+
readonly DEFAULT_MIN_SIMILARITY=0.85
|
|
20
|
+
readonly DEFAULT_MIN_CONFIDENCE=0.90
|
|
21
|
+
readonly OUTPUT_DIR="/tmp/workflow-patterns"
|
|
22
|
+
|
|
23
|
+
# Colors for output
|
|
24
|
+
RED='\033[0;31m'
|
|
25
|
+
GREEN='\033[0;32m'
|
|
26
|
+
YELLOW='\033[1;33m'
|
|
27
|
+
BLUE='\033[0;34m'
|
|
28
|
+
NC='\033[0m'
|
|
29
|
+
|
|
30
|
+
# ============================================================================
|
|
31
|
+
# LOGGING FUNCTIONS
|
|
32
|
+
# ============================================================================
|
|
33
|
+
log() {
|
|
34
|
+
echo -e "${BLUE}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $*" >&2
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
log_success() {
|
|
38
|
+
echo -e "${GREEN}[SUCCESS]${NC} $*" >&2
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
log_error() {
|
|
42
|
+
echo -e "${RED}[ERROR]${NC} $*" >&2
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
log_warning() {
|
|
46
|
+
echo -e "${YELLOW}[WARNING]${NC} $*" >&2
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
# ============================================================================
|
|
50
|
+
# USAGE FUNCTION
|
|
51
|
+
# ============================================================================
|
|
52
|
+
usage() {
|
|
53
|
+
cat << EOF
|
|
54
|
+
Pattern Analyzer for Workflow Codification System
|
|
55
|
+
|
|
56
|
+
Analyzes ACE reflections from PostgreSQL to detect repeated workflow patterns
|
|
57
|
+
suitable for codification into executable skills.
|
|
58
|
+
|
|
59
|
+
Usage: $0 [OPTIONS]
|
|
60
|
+
|
|
61
|
+
Options:
|
|
62
|
+
--db-host HOST PostgreSQL host (default: localhost)
|
|
63
|
+
--db-port PORT PostgreSQL port (default: 5432)
|
|
64
|
+
--db-name NAME Database name (default: cfn_workflow)
|
|
65
|
+
--db-user USER Database user (default: cfn_user)
|
|
66
|
+
--db-password PASS Database password (env: CFN_DB_PASSWORD)
|
|
67
|
+
--time-window DAYS Analysis time window in days (default: 90)
|
|
68
|
+
--min-occurrences N Minimum pattern occurrences (default: 5)
|
|
69
|
+
--min-similarity N Minimum similarity score 0.0-1.0 (default: 0.85)
|
|
70
|
+
--min-confidence N Minimum confidence score 0.0-1.0 (default: 0.90)
|
|
71
|
+
--output-dir DIR Output directory for reports (default: /tmp/workflow-patterns)
|
|
72
|
+
--output-format FMT Output format: json|summary|both (default: both)
|
|
73
|
+
--insert-db Insert results into workflow_patterns table
|
|
74
|
+
--verbose Enable verbose logging
|
|
75
|
+
--help Show this help message
|
|
76
|
+
|
|
77
|
+
Environment Variables:
|
|
78
|
+
CFN_DB_HOST PostgreSQL host
|
|
79
|
+
CFN_DB_PORT PostgreSQL port
|
|
80
|
+
CFN_DB_NAME Database name
|
|
81
|
+
CFN_DB_USER Database user
|
|
82
|
+
CFN_DB_PASSWORD Database password
|
|
83
|
+
|
|
84
|
+
Examples:
|
|
85
|
+
# Basic analysis with default settings
|
|
86
|
+
$0
|
|
87
|
+
|
|
88
|
+
# Custom thresholds with database insertion
|
|
89
|
+
$0 --min-occurrences 10 --min-similarity 0.90 --insert-db
|
|
90
|
+
|
|
91
|
+
# Verbose mode with custom output directory
|
|
92
|
+
$0 --verbose --output-dir ./patterns --output-format json
|
|
93
|
+
|
|
94
|
+
# Production environment with environment variables
|
|
95
|
+
export CFN_DB_PASSWORD=secret
|
|
96
|
+
$0 --db-host db.production.com --insert-db
|
|
97
|
+
|
|
98
|
+
Output:
|
|
99
|
+
- Pattern report in JSON format
|
|
100
|
+
- Summary statistics
|
|
101
|
+
- Optional database insertion into workflow_patterns table
|
|
102
|
+
|
|
103
|
+
EOF
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# ============================================================================
|
|
107
|
+
# PARAMETER VALIDATION
|
|
108
|
+
# ============================================================================
|
|
109
|
+
validate_float() {
|
|
110
|
+
local value="$1"
|
|
111
|
+
local name="$2"
|
|
112
|
+
local min="${3:-0.0}"
|
|
113
|
+
local max="${4:-1.0}"
|
|
114
|
+
|
|
115
|
+
if ! [[ "$value" =~ ^[0-9]+\.?[0-9]*$ ]]; then
|
|
116
|
+
log_error "$name must be a valid number: $value"
|
|
117
|
+
return 1
|
|
118
|
+
fi
|
|
119
|
+
|
|
120
|
+
if (( $(echo "$value < $min" | bc -l) )) || (( $(echo "$value > $max" | bc -l) )); then
|
|
121
|
+
log_error "$name must be between $min and $max: $value"
|
|
122
|
+
return 1
|
|
123
|
+
fi
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
validate_integer() {
|
|
127
|
+
local value="$1"
|
|
128
|
+
local name="$2"
|
|
129
|
+
local min="${3:-1}"
|
|
130
|
+
|
|
131
|
+
if ! [[ "$value" =~ ^[0-9]+$ ]]; then
|
|
132
|
+
log_error "$name must be a valid integer: $value"
|
|
133
|
+
return 1
|
|
134
|
+
fi
|
|
135
|
+
|
|
136
|
+
if (( value < min )); then
|
|
137
|
+
log_error "$name must be at least $min: $value"
|
|
138
|
+
return 1
|
|
139
|
+
fi
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
# ============================================================================
|
|
143
|
+
# DATABASE FUNCTIONS
|
|
144
|
+
# ============================================================================
|
|
145
|
+
check_postgresql_connection() {
|
|
146
|
+
local host="$1"
|
|
147
|
+
local port="$2"
|
|
148
|
+
local dbname="$3"
|
|
149
|
+
local user="$4"
|
|
150
|
+
local password="${5:-}"
|
|
151
|
+
|
|
152
|
+
log "Checking PostgreSQL connection to $host:$port/$dbname"
|
|
153
|
+
|
|
154
|
+
# Check if psql is available
|
|
155
|
+
if ! command -v psql &> /dev/null; then
|
|
156
|
+
log_error "psql command not found. Please install PostgreSQL client."
|
|
157
|
+
return 1
|
|
158
|
+
fi
|
|
159
|
+
|
|
160
|
+
# Build connection string
|
|
161
|
+
local conn_str="host=$host port=$port dbname=$dbname user=$user"
|
|
162
|
+
if [[ -n "$password" ]]; then
|
|
163
|
+
export PGPASSWORD="$password"
|
|
164
|
+
fi
|
|
165
|
+
|
|
166
|
+
# Test connection
|
|
167
|
+
if psql "$conn_str" -c "SELECT 1" &> /dev/null; then
|
|
168
|
+
log_success "PostgreSQL connection successful"
|
|
169
|
+
return 0
|
|
170
|
+
else
|
|
171
|
+
log_error "Cannot connect to PostgreSQL at $host:$port/$dbname"
|
|
172
|
+
return 1
|
|
173
|
+
fi
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
execute_query() {
|
|
177
|
+
local host="$1"
|
|
178
|
+
local port="$2"
|
|
179
|
+
local dbname="$3"
|
|
180
|
+
local user="$4"
|
|
181
|
+
local password="$5"
|
|
182
|
+
local query="$6"
|
|
183
|
+
|
|
184
|
+
local conn_str="host=$host port=$port dbname=$dbname user=$user"
|
|
185
|
+
if [[ -n "$password" ]]; then
|
|
186
|
+
export PGPASSWORD="$password"
|
|
187
|
+
fi
|
|
188
|
+
|
|
189
|
+
# Execute query and return JSON output
|
|
190
|
+
psql "$conn_str" -t -A -F"," -c "$query" 2>/dev/null || {
|
|
191
|
+
log_error "Query execution failed"
|
|
192
|
+
return 1
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
# ============================================================================
|
|
197
|
+
# PATTERN ANALYSIS FUNCTIONS
|
|
198
|
+
# ============================================================================
|
|
199
|
+
|
|
200
|
+
# Generate normalized workflow signature from steps
|
|
201
|
+
generate_workflow_signature() {
|
|
202
|
+
local workflow_steps="$1"
|
|
203
|
+
|
|
204
|
+
# Extract commands from workflow steps (ignore parameters)
|
|
205
|
+
# Normalize whitespace and case
|
|
206
|
+
# Join with delimiter
|
|
207
|
+
echo "$workflow_steps" | jq -r '
|
|
208
|
+
if type == "array" then
|
|
209
|
+
[.[] |
|
|
210
|
+
gsub("^\\s+|\\s+$"; "") |
|
|
211
|
+
gsub("\\s+"; " ") |
|
|
212
|
+
split(" ")[0:3] |
|
|
213
|
+
join(" ")
|
|
214
|
+
] |
|
|
215
|
+
join(" → ")
|
|
216
|
+
else
|
|
217
|
+
empty
|
|
218
|
+
end' 2>/dev/null || echo "unknown"
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
# Calculate Jaccard similarity between two sets
|
|
222
|
+
calculate_jaccard_similarity() {
|
|
223
|
+
local steps_a="$1"
|
|
224
|
+
local steps_b="$2"
|
|
225
|
+
|
|
226
|
+
# Use jq to calculate set intersection and union
|
|
227
|
+
local similarity
|
|
228
|
+
similarity=$(jq -n \
|
|
229
|
+
--argjson a "$steps_a" \
|
|
230
|
+
--argjson b "$steps_b" \
|
|
231
|
+
'
|
|
232
|
+
($a | unique) as $set_a |
|
|
233
|
+
($b | unique) as $set_b |
|
|
234
|
+
($set_a + $set_b | unique) as $union |
|
|
235
|
+
($set_a - ($set_a - $set_b)) as $intersection |
|
|
236
|
+
if ($union | length) > 0 then
|
|
237
|
+
($intersection | length) / ($union | length)
|
|
238
|
+
else
|
|
239
|
+
0
|
|
240
|
+
end
|
|
241
|
+
' 2>/dev/null)
|
|
242
|
+
|
|
243
|
+
echo "$similarity"
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
# Calculate average pairwise similarity across reflection group
|
|
247
|
+
calculate_similarity_score() {
|
|
248
|
+
local reflections_json="$1"
|
|
249
|
+
|
|
250
|
+
# Extract workflow steps from all reflections
|
|
251
|
+
local count
|
|
252
|
+
count=$(echo "$reflections_json" | jq 'length')
|
|
253
|
+
|
|
254
|
+
if (( count < 2 )); then
|
|
255
|
+
echo "1.0"
|
|
256
|
+
return
|
|
257
|
+
fi
|
|
258
|
+
|
|
259
|
+
local total_similarity=0
|
|
260
|
+
local comparisons=0
|
|
261
|
+
|
|
262
|
+
# Calculate pairwise similarities
|
|
263
|
+
for (( i=0; i<count-1; i++ )); do
|
|
264
|
+
for (( j=i+1; j<count; j++ )); do
|
|
265
|
+
local steps_a
|
|
266
|
+
local steps_b
|
|
267
|
+
|
|
268
|
+
steps_a=$(echo "$reflections_json" | jq -c ".[$i].workflow_steps")
|
|
269
|
+
steps_b=$(echo "$reflections_json" | jq -c ".[$j].workflow_steps")
|
|
270
|
+
|
|
271
|
+
local similarity
|
|
272
|
+
similarity=$(calculate_jaccard_similarity "$steps_a" "$steps_b")
|
|
273
|
+
|
|
274
|
+
total_similarity=$(echo "$total_similarity + $similarity" | bc -l)
|
|
275
|
+
((comparisons++))
|
|
276
|
+
done
|
|
277
|
+
done
|
|
278
|
+
|
|
279
|
+
# Calculate average
|
|
280
|
+
if (( comparisons > 0 )); then
|
|
281
|
+
echo "scale=3; $total_similarity / $comparisons" | bc -l
|
|
282
|
+
else
|
|
283
|
+
echo "0.0"
|
|
284
|
+
fi
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
# Check if workflow is deterministic
|
|
288
|
+
check_deterministic() {
|
|
289
|
+
local reflections_json="$1"
|
|
290
|
+
|
|
291
|
+
# Heuristic 1: Check for non-deterministic patterns in workflow steps
|
|
292
|
+
local has_nondeterministic
|
|
293
|
+
has_nondeterministic=$(echo "$reflections_json" | jq -r '
|
|
294
|
+
[.[] | .workflow_steps[] | select(
|
|
295
|
+
test("random|timestamp|date|uuid|\\$\\(date|Math\\.random|rand\\(") or
|
|
296
|
+
test("api\\..*\\.com|http://|https://") or
|
|
297
|
+
test("curl |wget |fetch\\(")
|
|
298
|
+
)] | length > 0
|
|
299
|
+
' 2>/dev/null)
|
|
300
|
+
|
|
301
|
+
if [[ "$has_nondeterministic" == "true" ]]; then
|
|
302
|
+
echo "false"
|
|
303
|
+
return
|
|
304
|
+
fi
|
|
305
|
+
|
|
306
|
+
# Heuristic 2: Check output variance
|
|
307
|
+
local unique_outputs
|
|
308
|
+
local total_outputs
|
|
309
|
+
|
|
310
|
+
unique_outputs=$(echo "$reflections_json" | jq '[.[].output] | unique | length' 2>/dev/null || echo "0")
|
|
311
|
+
total_outputs=$(echo "$reflections_json" | jq 'length' 2>/dev/null || echo "1")
|
|
312
|
+
|
|
313
|
+
# If more than 30% output variance, likely not deterministic
|
|
314
|
+
local variance
|
|
315
|
+
variance=$(echo "scale=3; $unique_outputs / $total_outputs" | bc -l 2>/dev/null || echo "1.0")
|
|
316
|
+
|
|
317
|
+
if (( $(echo "$variance > 0.3" | bc -l) )); then
|
|
318
|
+
echo "false"
|
|
319
|
+
else
|
|
320
|
+
echo "true"
|
|
321
|
+
fi
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
# Estimate monthly cost savings from codifying workflow
|
|
325
|
+
estimate_cost_savings() {
|
|
326
|
+
local occurrence_count="$1"
|
|
327
|
+
local days_in_window="${2:-90}"
|
|
328
|
+
|
|
329
|
+
# Constants
|
|
330
|
+
local ai_input_tokens=5000
|
|
331
|
+
local ai_output_tokens=2000
|
|
332
|
+
local token_cost_per_million=0.50 # $0.50 per 1M tokens (Z.ai pricing)
|
|
333
|
+
local script_cost=0.0001 # Negligible
|
|
334
|
+
|
|
335
|
+
# Calculate per-execution savings
|
|
336
|
+
local total_tokens=$((ai_input_tokens + ai_output_tokens))
|
|
337
|
+
local ai_cost=$(echo "scale=6; ($total_tokens / 1000000) * $token_cost_per_million" | bc -l)
|
|
338
|
+
local savings_per_execution=$(echo "scale=6; $ai_cost - $script_cost" | bc -l)
|
|
339
|
+
|
|
340
|
+
# Estimate monthly executions
|
|
341
|
+
local daily_rate=$(echo "scale=3; $occurrence_count / $days_in_window" | bc -l)
|
|
342
|
+
local monthly_executions=$(echo "scale=0; $daily_rate * 30" | bc -l)
|
|
343
|
+
|
|
344
|
+
# Calculate monthly savings
|
|
345
|
+
local monthly_savings=$(echo "scale=2; $monthly_executions * $savings_per_execution" | bc -l)
|
|
346
|
+
|
|
347
|
+
echo "$monthly_savings"
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
# Calculate priority based on multiple factors
|
|
351
|
+
calculate_priority() {
|
|
352
|
+
local occurrence_count="$1"
|
|
353
|
+
local estimated_savings="$2"
|
|
354
|
+
local teams_count="$3"
|
|
355
|
+
local confidence_score="$4"
|
|
356
|
+
|
|
357
|
+
local score=0
|
|
358
|
+
|
|
359
|
+
# Factor 1: Occurrence count (weight: 40%)
|
|
360
|
+
if (( occurrence_count >= 20 )); then
|
|
361
|
+
score=$((score + 40))
|
|
362
|
+
elif (( occurrence_count >= 10 )); then
|
|
363
|
+
score=$((score + 25))
|
|
364
|
+
else
|
|
365
|
+
score=$((score + 10))
|
|
366
|
+
fi
|
|
367
|
+
|
|
368
|
+
# Factor 2: Cost savings (weight: 30%)
|
|
369
|
+
if (( $(echo "$estimated_savings >= 50" | bc -l) )); then
|
|
370
|
+
score=$((score + 30))
|
|
371
|
+
elif (( $(echo "$estimated_savings >= 20" | bc -l) )); then
|
|
372
|
+
score=$((score + 20))
|
|
373
|
+
else
|
|
374
|
+
score=$((score + 10))
|
|
375
|
+
fi
|
|
376
|
+
|
|
377
|
+
# Factor 3: Teams affected (weight: 20%)
|
|
378
|
+
if (( teams_count >= 3 )); then
|
|
379
|
+
score=$((score + 20))
|
|
380
|
+
elif (( teams_count >= 2 )); then
|
|
381
|
+
score=$((score + 12))
|
|
382
|
+
else
|
|
383
|
+
score=$((score + 5))
|
|
384
|
+
fi
|
|
385
|
+
|
|
386
|
+
# Factor 4: Confidence score (weight: 10%)
|
|
387
|
+
if (( $(echo "$confidence_score >= 0.90" | bc -l) )); then
|
|
388
|
+
score=$((score + 10))
|
|
389
|
+
elif (( $(echo "$confidence_score >= 0.80" | bc -l) )); then
|
|
390
|
+
score=$((score + 6))
|
|
391
|
+
else
|
|
392
|
+
score=$((score + 3))
|
|
393
|
+
fi
|
|
394
|
+
|
|
395
|
+
# Determine priority
|
|
396
|
+
if (( score >= 75 )); then
|
|
397
|
+
echo "high"
|
|
398
|
+
elif (( score >= 50 )); then
|
|
399
|
+
echo "medium"
|
|
400
|
+
else
|
|
401
|
+
echo "low"
|
|
402
|
+
fi
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
# ============================================================================
|
|
406
|
+
# MAIN PATTERN ANALYSIS FUNCTION
|
|
407
|
+
# ============================================================================
|
|
408
|
+
analyze_workflow_patterns() {
|
|
409
|
+
local db_host="$1"
|
|
410
|
+
local db_port="$2"
|
|
411
|
+
local db_name="$3"
|
|
412
|
+
local db_user="$4"
|
|
413
|
+
local db_password="$5"
|
|
414
|
+
local time_window="$6"
|
|
415
|
+
local min_occurrences="$7"
|
|
416
|
+
local min_similarity="$8"
|
|
417
|
+
local min_confidence="$9"
|
|
418
|
+
local insert_db="${10}"
|
|
419
|
+
local verbose="${11}"
|
|
420
|
+
|
|
421
|
+
log "Starting workflow pattern analysis"
|
|
422
|
+
log "Parameters: time_window=${time_window}d, min_occurrences=${min_occurrences}, min_similarity=${min_similarity}, min_confidence=${min_confidence}"
|
|
423
|
+
|
|
424
|
+
# Create output directory
|
|
425
|
+
mkdir -p "$OUTPUT_DIR"
|
|
426
|
+
|
|
427
|
+
# STEP 1: Query ACE reflections from last N days
|
|
428
|
+
log "Querying ACE reflections from last ${time_window} days"
|
|
429
|
+
|
|
430
|
+
local query="
|
|
431
|
+
SELECT
|
|
432
|
+
cr.id,
|
|
433
|
+
cr.task_id,
|
|
434
|
+
cr.team_id,
|
|
435
|
+
cr.content,
|
|
436
|
+
cr.workflow_steps::text,
|
|
437
|
+
cr.confidence,
|
|
438
|
+
cr.created_at::text,
|
|
439
|
+
COALESCE(cr.metadata->>'tags', '[]') as tags,
|
|
440
|
+
COALESCE(cr.metadata->>'domain', 'general') as domain,
|
|
441
|
+
COALESCE(cr.metadata->>'output', '') as output
|
|
442
|
+
FROM context_reflections cr
|
|
443
|
+
WHERE
|
|
444
|
+
cr.created_at > NOW() - INTERVAL '${time_window} days' AND
|
|
445
|
+
cr.confidence >= 0.75 AND
|
|
446
|
+
jsonb_array_length(COALESCE(cr.workflow_steps, '[]'::jsonb)) >= 2
|
|
447
|
+
ORDER BY cr.created_at DESC
|
|
448
|
+
"
|
|
449
|
+
|
|
450
|
+
local reflections_raw
|
|
451
|
+
reflections_raw=$(execute_query "$db_host" "$db_port" "$db_name" "$db_user" "$db_password" "$query")
|
|
452
|
+
|
|
453
|
+
if [[ -z "$reflections_raw" ]]; then
|
|
454
|
+
log_warning "No reflections found in the specified time window"
|
|
455
|
+
echo '{"patterns": [], "metadata": {"total_reflections": 0, "patterns_found": 0}}'
|
|
456
|
+
return 0
|
|
457
|
+
fi
|
|
458
|
+
|
|
459
|
+
# Convert CSV to JSON
|
|
460
|
+
local reflections_json
|
|
461
|
+
reflections_json=$(echo "$reflections_raw" | awk -F',' '
|
|
462
|
+
BEGIN { printf "[" }
|
|
463
|
+
{
|
|
464
|
+
if (NR > 1) printf ","
|
|
465
|
+
printf "{\"id\":\"%s\",\"task_id\":\"%s\",\"team_id\":\"%s\",\"content\":\"%s\",\"workflow_steps\":%s,\"confidence\":%s,\"created_at\":\"%s\",\"tags\":%s,\"domain\":\"%s\",\"output\":\"%s\"}",
|
|
466
|
+
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
|
|
467
|
+
}
|
|
468
|
+
END { printf "]" }
|
|
469
|
+
' 2>/dev/null)
|
|
470
|
+
|
|
471
|
+
local total_reflections
|
|
472
|
+
total_reflections=$(echo "$reflections_json" | jq 'length' 2>/dev/null || echo "0")
|
|
473
|
+
|
|
474
|
+
log "Retrieved $total_reflections reflections"
|
|
475
|
+
|
|
476
|
+
if [[ "$verbose" == "true" ]]; then
|
|
477
|
+
log "Sample reflection: $(echo "$reflections_json" | jq '.[0]' 2>/dev/null)"
|
|
478
|
+
fi
|
|
479
|
+
|
|
480
|
+
# STEP 2: Group reflections by workflow signature
|
|
481
|
+
log "Grouping reflections by workflow similarity"
|
|
482
|
+
|
|
483
|
+
# Create associative array for workflow groups
|
|
484
|
+
declare -A workflow_groups
|
|
485
|
+
local signatures=()
|
|
486
|
+
|
|
487
|
+
# Process each reflection
|
|
488
|
+
for idx in $(seq 0 $((total_reflections - 1))); do
|
|
489
|
+
local reflection
|
|
490
|
+
reflection=$(echo "$reflections_json" | jq -c ".[$idx]")
|
|
491
|
+
|
|
492
|
+
local workflow_steps
|
|
493
|
+
workflow_steps=$(echo "$reflection" | jq -c '.workflow_steps')
|
|
494
|
+
|
|
495
|
+
local signature
|
|
496
|
+
signature=$(generate_workflow_signature "$workflow_steps")
|
|
497
|
+
|
|
498
|
+
# Add to group
|
|
499
|
+
if [[ -z "${workflow_groups[$signature]:-}" ]]; then
|
|
500
|
+
workflow_groups["$signature"]="$reflection"
|
|
501
|
+
signatures+=("$signature")
|
|
502
|
+
else
|
|
503
|
+
workflow_groups["$signature"]=$(echo "[${workflow_groups[$signature]},$reflection]" | jq -c '.')
|
|
504
|
+
fi
|
|
505
|
+
done
|
|
506
|
+
|
|
507
|
+
log "Found ${#signatures[@]} unique workflow signatures"
|
|
508
|
+
|
|
509
|
+
# STEP 3: Filter groups with >= min_occurrences
|
|
510
|
+
log "Filtering patterns with >= $min_occurrences occurrences"
|
|
511
|
+
|
|
512
|
+
local candidate_patterns=()
|
|
513
|
+
|
|
514
|
+
for signature in "${signatures[@]}"; do
|
|
515
|
+
local group="${workflow_groups[$signature]}"
|
|
516
|
+
local group_count
|
|
517
|
+
|
|
518
|
+
# Handle both single reflection and array
|
|
519
|
+
if echo "$group" | jq -e 'type == "array"' &>/dev/null; then
|
|
520
|
+
group_count=$(echo "$group" | jq 'length')
|
|
521
|
+
else
|
|
522
|
+
group="[$group]"
|
|
523
|
+
group_count=1
|
|
524
|
+
fi
|
|
525
|
+
|
|
526
|
+
if (( group_count >= min_occurrences )); then
|
|
527
|
+
# Calculate similarity score
|
|
528
|
+
local similarity
|
|
529
|
+
similarity=$(calculate_similarity_score "$group")
|
|
530
|
+
|
|
531
|
+
# Calculate average confidence
|
|
532
|
+
local avg_confidence
|
|
533
|
+
avg_confidence=$(echo "$group" | jq '[.[].confidence] | add / length')
|
|
534
|
+
|
|
535
|
+
# Check if deterministic
|
|
536
|
+
local is_deterministic
|
|
537
|
+
is_deterministic=$(check_deterministic "$group")
|
|
538
|
+
|
|
539
|
+
if [[ "$verbose" == "true" ]]; then
|
|
540
|
+
log "Pattern: $signature - occurrences=$group_count, similarity=$similarity, confidence=$avg_confidence, deterministic=$is_deterministic"
|
|
541
|
+
fi
|
|
542
|
+
|
|
543
|
+
# Apply filters
|
|
544
|
+
if (( $(echo "$similarity >= $min_similarity" | bc -l) )) && \
|
|
545
|
+
(( $(echo "$avg_confidence >= $min_confidence" | bc -l) )) && \
|
|
546
|
+
[[ "$is_deterministic" == "true" ]]; then
|
|
547
|
+
|
|
548
|
+
# Extract common workflow steps
|
|
549
|
+
local common_steps
|
|
550
|
+
common_steps=$(echo "$group" | jq '[.[0].workflow_steps]' | jq -c '.[0]')
|
|
551
|
+
|
|
552
|
+
# Extract unique teams
|
|
553
|
+
local teams_affected
|
|
554
|
+
teams_affected=$(echo "$group" | jq -r '[.[].team_id] | unique | join(",")')
|
|
555
|
+
local teams_count
|
|
556
|
+
teams_count=$(echo "$group" | jq '[.[].team_id] | unique | length')
|
|
557
|
+
|
|
558
|
+
# Estimate cost savings
|
|
559
|
+
local estimated_savings
|
|
560
|
+
estimated_savings=$(estimate_cost_savings "$group_count" "$time_window")
|
|
561
|
+
|
|
562
|
+
# Calculate priority
|
|
563
|
+
local priority
|
|
564
|
+
priority=$(calculate_priority "$group_count" "$estimated_savings" "$teams_count" "$avg_confidence")
|
|
565
|
+
|
|
566
|
+
# Create pattern object
|
|
567
|
+
local pattern
|
|
568
|
+
pattern=$(jq -n \
|
|
569
|
+
--arg signature "$signature" \
|
|
570
|
+
--argjson steps "$common_steps" \
|
|
571
|
+
--arg count "$group_count" \
|
|
572
|
+
--arg teams "$teams_affected" \
|
|
573
|
+
--arg similarity "$similarity" \
|
|
574
|
+
--arg confidence "$avg_confidence" \
|
|
575
|
+
--arg deterministic "$is_deterministic" \
|
|
576
|
+
--arg savings "$estimated_savings" \
|
|
577
|
+
--arg priority "$priority" \
|
|
578
|
+
'{
|
|
579
|
+
pattern_name: $signature,
|
|
580
|
+
workflow_steps: $steps,
|
|
581
|
+
occurrence_count: ($count | tonumber),
|
|
582
|
+
teams_affected: ($teams | split(",")),
|
|
583
|
+
similarity_score: ($similarity | tonumber),
|
|
584
|
+
confidence_score: ($confidence | tonumber),
|
|
585
|
+
deterministic: ($deterministic == "true"),
|
|
586
|
+
estimated_savings_usd: ($savings | tonumber),
|
|
587
|
+
priority: $priority,
|
|
588
|
+
status: "detected"
|
|
589
|
+
}')
|
|
590
|
+
|
|
591
|
+
candidate_patterns+=("$pattern")
|
|
592
|
+
|
|
593
|
+
log_success "Pattern detected: $signature (priority: $priority, savings: \$$estimated_savings/month)"
|
|
594
|
+
fi
|
|
595
|
+
fi
|
|
596
|
+
done
|
|
597
|
+
|
|
598
|
+
log "Found ${#candidate_patterns[@]} candidate patterns after filtering"
|
|
599
|
+
|
|
600
|
+
# STEP 4: Sort patterns by priority
|
|
601
|
+
local patterns_json="[]"
|
|
602
|
+
for pattern in "${candidate_patterns[@]}"; do
|
|
603
|
+
patterns_json=$(echo "$patterns_json" | jq --argjson p "$pattern" '. + [$p]')
|
|
604
|
+
done
|
|
605
|
+
|
|
606
|
+
# Sort by priority (high → medium → low) and estimated savings
|
|
607
|
+
patterns_json=$(echo "$patterns_json" | jq '
|
|
608
|
+
sort_by(
|
|
609
|
+
if .priority == "high" then 0
|
|
610
|
+
elif .priority == "medium" then 1
|
|
611
|
+
else 2 end,
|
|
612
|
+
-.estimated_savings_usd
|
|
613
|
+
)')
|
|
614
|
+
|
|
615
|
+
# STEP 5: Generate output
|
|
616
|
+
local timestamp
|
|
617
|
+
timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
|
618
|
+
|
|
619
|
+
local report
|
|
620
|
+
report=$(jq -n \
|
|
621
|
+
--argjson patterns "$patterns_json" \
|
|
622
|
+
--arg timestamp "$timestamp" \
|
|
623
|
+
--arg total_reflections "$total_reflections" \
|
|
624
|
+
--arg patterns_found "${#candidate_patterns[@]}" \
|
|
625
|
+
--arg time_window "$time_window" \
|
|
626
|
+
--arg min_occurrences "$min_occurrences" \
|
|
627
|
+
--arg min_similarity "$min_similarity" \
|
|
628
|
+
--arg min_confidence "$min_confidence" \
|
|
629
|
+
'{
|
|
630
|
+
metadata: {
|
|
631
|
+
analysis_timestamp: $timestamp,
|
|
632
|
+
time_window_days: ($time_window | tonumber),
|
|
633
|
+
total_reflections_analyzed: ($total_reflections | tonumber),
|
|
634
|
+
patterns_found: ($patterns_found | tonumber),
|
|
635
|
+
filters: {
|
|
636
|
+
min_occurrences: ($min_occurrences | tonumber),
|
|
637
|
+
min_similarity: ($min_similarity | tonumber),
|
|
638
|
+
min_confidence: ($min_confidence | tonumber)
|
|
639
|
+
}
|
|
640
|
+
},
|
|
641
|
+
patterns: $patterns
|
|
642
|
+
}')
|
|
643
|
+
|
|
644
|
+
# Save to file
|
|
645
|
+
local output_file="${OUTPUT_DIR}/pattern-analysis-${timestamp}.json"
|
|
646
|
+
echo "$report" > "$output_file"
|
|
647
|
+
log_success "Pattern analysis report saved to: $output_file"
|
|
648
|
+
|
|
649
|
+
# STEP 6: Insert into database if requested
|
|
650
|
+
if [[ "$insert_db" == "true" ]]; then
|
|
651
|
+
log "Inserting patterns into workflow_patterns table"
|
|
652
|
+
|
|
653
|
+
for idx in $(seq 0 $((${#candidate_patterns[@]} - 1))); do
|
|
654
|
+
local pattern="${candidate_patterns[$idx]}"
|
|
655
|
+
|
|
656
|
+
local pattern_name
|
|
657
|
+
local workflow_steps
|
|
658
|
+
local occurrence_count
|
|
659
|
+
local teams_affected
|
|
660
|
+
local similarity_score
|
|
661
|
+
local confidence_score
|
|
662
|
+
local deterministic
|
|
663
|
+
local estimated_savings
|
|
664
|
+
local priority
|
|
665
|
+
|
|
666
|
+
pattern_name=$(echo "$pattern" | jq -r '.pattern_name')
|
|
667
|
+
workflow_steps=$(echo "$pattern" | jq -c '.workflow_steps')
|
|
668
|
+
occurrence_count=$(echo "$pattern" | jq -r '.occurrence_count')
|
|
669
|
+
teams_affected=$(echo "$pattern" | jq -r '.teams_affected | join(",")')
|
|
670
|
+
similarity_score=$(echo "$pattern" | jq -r '.similarity_score')
|
|
671
|
+
confidence_score=$(echo "$pattern" | jq -r '.confidence_score')
|
|
672
|
+
deterministic=$(echo "$pattern" | jq -r '.deterministic')
|
|
673
|
+
estimated_savings=$(echo "$pattern" | jq -r '.estimated_savings_usd')
|
|
674
|
+
priority=$(echo "$pattern" | jq -r '.priority')
|
|
675
|
+
|
|
676
|
+
# Check if pattern already exists
|
|
677
|
+
local exists_query="
|
|
678
|
+
SELECT id FROM workflow_patterns
|
|
679
|
+
WHERE pattern_name = '$pattern_name'
|
|
680
|
+
"
|
|
681
|
+
|
|
682
|
+
local existing_id
|
|
683
|
+
existing_id=$(execute_query "$db_host" "$db_port" "$db_name" "$db_user" "$db_password" "$exists_query" | head -1)
|
|
684
|
+
|
|
685
|
+
if [[ -z "$existing_id" ]]; then
|
|
686
|
+
# Insert new pattern
|
|
687
|
+
local insert_query="
|
|
688
|
+
INSERT INTO workflow_patterns (
|
|
689
|
+
pattern_name,
|
|
690
|
+
workflow_steps,
|
|
691
|
+
occurrence_count,
|
|
692
|
+
teams_affected,
|
|
693
|
+
similarity_score,
|
|
694
|
+
deterministic,
|
|
695
|
+
confidence_score,
|
|
696
|
+
estimated_savings_usd,
|
|
697
|
+
priority,
|
|
698
|
+
status,
|
|
699
|
+
created_at
|
|
700
|
+
) VALUES (
|
|
701
|
+
'$pattern_name',
|
|
702
|
+
'$workflow_steps'::jsonb,
|
|
703
|
+
$occurrence_count,
|
|
704
|
+
ARRAY['${teams_affected//,/\',\'}'],
|
|
705
|
+
$similarity_score,
|
|
706
|
+
$deterministic,
|
|
707
|
+
$confidence_score,
|
|
708
|
+
$estimated_savings,
|
|
709
|
+
'$priority',
|
|
710
|
+
'detected',
|
|
711
|
+
NOW()
|
|
712
|
+
)
|
|
713
|
+
"
|
|
714
|
+
|
|
715
|
+
if execute_query "$db_host" "$db_port" "$db_name" "$db_user" "$db_password" "$insert_query" &>/dev/null; then
|
|
716
|
+
log_success "Inserted pattern: $pattern_name"
|
|
717
|
+
else
|
|
718
|
+
log_error "Failed to insert pattern: $pattern_name"
|
|
719
|
+
fi
|
|
720
|
+
else
|
|
721
|
+
log "Pattern already exists in database: $pattern_name (id: $existing_id)"
|
|
722
|
+
fi
|
|
723
|
+
done
|
|
724
|
+
fi
|
|
725
|
+
|
|
726
|
+
# Return JSON report
|
|
727
|
+
echo "$report"
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
# ============================================================================
|
|
731
|
+
# MAIN EXECUTION
|
|
732
|
+
# ============================================================================
|
|
733
|
+
main() {
|
|
734
|
+
# Default parameters
|
|
735
|
+
local db_host="$DEFAULT_DB_HOST"
|
|
736
|
+
local db_port="$DEFAULT_DB_PORT"
|
|
737
|
+
local db_name="$DEFAULT_DB_NAME"
|
|
738
|
+
local db_user="$DEFAULT_DB_USER"
|
|
739
|
+
local db_password="${CFN_DB_PASSWORD:-}"
|
|
740
|
+
local time_window="$DEFAULT_TIME_WINDOW_DAYS"
|
|
741
|
+
local min_occurrences="$DEFAULT_MIN_OCCURRENCES"
|
|
742
|
+
local min_similarity="$DEFAULT_MIN_SIMILARITY"
|
|
743
|
+
local min_confidence="$DEFAULT_MIN_CONFIDENCE"
|
|
744
|
+
local output_format="both"
|
|
745
|
+
local insert_db="false"
|
|
746
|
+
local verbose="false"
|
|
747
|
+
|
|
748
|
+
# Parse command line arguments
|
|
749
|
+
while [[ $# -gt 0 ]]; do
|
|
750
|
+
case "$1" in
|
|
751
|
+
--db-host)
|
|
752
|
+
db_host="$2"
|
|
753
|
+
shift 2
|
|
754
|
+
;;
|
|
755
|
+
--db-port)
|
|
756
|
+
db_port="$2"
|
|
757
|
+
shift 2
|
|
758
|
+
;;
|
|
759
|
+
--db-name)
|
|
760
|
+
db_name="$2"
|
|
761
|
+
shift 2
|
|
762
|
+
;;
|
|
763
|
+
--db-user)
|
|
764
|
+
db_user="$2"
|
|
765
|
+
shift 2
|
|
766
|
+
;;
|
|
767
|
+
--db-password)
|
|
768
|
+
db_password="$2"
|
|
769
|
+
shift 2
|
|
770
|
+
;;
|
|
771
|
+
--time-window)
|
|
772
|
+
time_window="$2"
|
|
773
|
+
shift 2
|
|
774
|
+
;;
|
|
775
|
+
--min-occurrences)
|
|
776
|
+
min_occurrences="$2"
|
|
777
|
+
shift 2
|
|
778
|
+
;;
|
|
779
|
+
--min-similarity)
|
|
780
|
+
min_similarity="$2"
|
|
781
|
+
shift 2
|
|
782
|
+
;;
|
|
783
|
+
--min-confidence)
|
|
784
|
+
min_confidence="$2"
|
|
785
|
+
shift 2
|
|
786
|
+
;;
|
|
787
|
+
--output-dir)
|
|
788
|
+
OUTPUT_DIR="$2"
|
|
789
|
+
shift 2
|
|
790
|
+
;;
|
|
791
|
+
--output-format)
|
|
792
|
+
output_format="$2"
|
|
793
|
+
shift 2
|
|
794
|
+
;;
|
|
795
|
+
--insert-db)
|
|
796
|
+
insert_db="true"
|
|
797
|
+
shift
|
|
798
|
+
;;
|
|
799
|
+
--verbose)
|
|
800
|
+
verbose="true"
|
|
801
|
+
shift
|
|
802
|
+
;;
|
|
803
|
+
--help)
|
|
804
|
+
usage
|
|
805
|
+
exit 0
|
|
806
|
+
;;
|
|
807
|
+
*)
|
|
808
|
+
log_error "Unknown option: $1"
|
|
809
|
+
usage
|
|
810
|
+
exit 1
|
|
811
|
+
;;
|
|
812
|
+
esac
|
|
813
|
+
done
|
|
814
|
+
|
|
815
|
+
# Validate parameters
|
|
816
|
+
validate_integer "$time_window" "time-window" 1 || exit 1
|
|
817
|
+
validate_integer "$min_occurrences" "min-occurrences" 1 || exit 1
|
|
818
|
+
validate_float "$min_similarity" "min-similarity" 0.0 1.0 || exit 1
|
|
819
|
+
validate_float "$min_confidence" "min-confidence" 0.0 1.0 || exit 1
|
|
820
|
+
|
|
821
|
+
# Check PostgreSQL connection
|
|
822
|
+
if ! check_postgresql_connection "$db_host" "$db_port" "$db_name" "$db_user" "$db_password"; then
|
|
823
|
+
log_error "PostgreSQL connection check failed"
|
|
824
|
+
exit 1
|
|
825
|
+
fi
|
|
826
|
+
|
|
827
|
+
# Run pattern analysis
|
|
828
|
+
local report
|
|
829
|
+
report=$(analyze_workflow_patterns \
|
|
830
|
+
"$db_host" \
|
|
831
|
+
"$db_port" \
|
|
832
|
+
"$db_name" \
|
|
833
|
+
"$db_user" \
|
|
834
|
+
"$db_password" \
|
|
835
|
+
"$time_window" \
|
|
836
|
+
"$min_occurrences" \
|
|
837
|
+
"$min_similarity" \
|
|
838
|
+
"$min_confidence" \
|
|
839
|
+
"$insert_db" \
|
|
840
|
+
"$verbose")
|
|
841
|
+
|
|
842
|
+
# Output results based on format
|
|
843
|
+
case "$output_format" in
|
|
844
|
+
json)
|
|
845
|
+
echo "$report" | jq '.'
|
|
846
|
+
;;
|
|
847
|
+
summary)
|
|
848
|
+
echo "$report" | jq -r '
|
|
849
|
+
"Pattern Analysis Summary",
|
|
850
|
+
"========================",
|
|
851
|
+
"",
|
|
852
|
+
"Analysis Timestamp: " + .metadata.analysis_timestamp,
|
|
853
|
+
"Time Window: " + (.metadata.time_window_days | tostring) + " days",
|
|
854
|
+
"Total Reflections Analyzed: " + (.metadata.total_reflections_analyzed | tostring),
|
|
855
|
+
"Patterns Found: " + (.metadata.patterns_found | tostring),
|
|
856
|
+
"",
|
|
857
|
+
"Filters:",
|
|
858
|
+
" Min Occurrences: " + (.metadata.filters.min_occurrences | tostring),
|
|
859
|
+
" Min Similarity: " + (.metadata.filters.min_similarity | tostring),
|
|
860
|
+
" Min Confidence: " + (.metadata.filters.min_confidence | tostring),
|
|
861
|
+
"",
|
|
862
|
+
"Patterns by Priority:",
|
|
863
|
+
" High: " + ([.patterns[] | select(.priority == "high")] | length | tostring),
|
|
864
|
+
" Medium: " + ([.patterns[] | select(.priority == "medium")] | length | tostring),
|
|
865
|
+
" Low: " + ([.patterns[] | select(.priority == "low")] | length | tostring),
|
|
866
|
+
"",
|
|
867
|
+
"Top 5 Patterns:",
|
|
868
|
+
(.patterns[:5] | to_entries | .[] | " " + (.key + 1 | tostring) + ". " + .value.pattern_name + " (priority: " + .value.priority + ", savings: $" + (.value.estimated_savings_usd | tostring) + "/month)")
|
|
869
|
+
'
|
|
870
|
+
;;
|
|
871
|
+
both)
|
|
872
|
+
# Summary to stderr
|
|
873
|
+
echo "$report" | jq -r '
|
|
874
|
+
"Pattern Analysis Summary",
|
|
875
|
+
"========================",
|
|
876
|
+
"",
|
|
877
|
+
"Analysis Timestamp: " + .metadata.analysis_timestamp,
|
|
878
|
+
"Total Patterns Found: " + (.metadata.patterns_found | tostring),
|
|
879
|
+
"High Priority: " + ([.patterns[] | select(.priority == "high")] | length | tostring),
|
|
880
|
+
"Medium Priority: " + ([.patterns[] | select(.priority == "medium")] | length | tostring),
|
|
881
|
+
"Low Priority: " + ([.patterns[] | select(.priority == "low")] | length | tostring)
|
|
882
|
+
' >&2
|
|
883
|
+
echo "" >&2
|
|
884
|
+
# JSON to stdout
|
|
885
|
+
echo "$report" | jq '.'
|
|
886
|
+
;;
|
|
887
|
+
*)
|
|
888
|
+
log_error "Invalid output format: $output_format (must be json|summary|both)"
|
|
889
|
+
exit 1
|
|
890
|
+
;;
|
|
891
|
+
esac
|
|
892
|
+
|
|
893
|
+
log_success "Pattern analysis completed successfully"
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
# Run main function if script is executed directly
|
|
897
|
+
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
898
|
+
main "$@"
|
|
899
|
+
fi
|