create-merlin-brain 3.6.3 → 3.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,284 @@
1
+ #!/usr/bin/env bash
2
+ #
3
+ # ╔═══════════════════════════════════════════════════════════════════════════╗
4
+ # ║ BLEND ENGINE — Stage 3: Structured Handoffs ║
5
+ # ║ Clean JSON pipeline context instead of raw output dumping ║
6
+ # ╚═══════════════════════════════════════════════════════════════════════════╝
7
+ #
8
+ # Replace raw text output passing between agents with structured JSON:
9
+ # - decisions_made: what the agent decided and why
10
+ # - files_changed: what files were created/modified/deleted
11
+ # - open_questions: unresolved items for the next agent
12
+ # - key_constraints: constraints the next agent must respect
13
+ # - exit_status: success/partial/failed
14
+ #
15
+ # This gives the next agent clean context instead of 10K of noise.
16
+ #
17
+ # Requires: blend.sh loaded first
18
+
19
+ # Colors
20
+ : "${RESET:=\033[0m}"
21
+ : "${BOLD:=\033[1m}"
22
+ : "${DIM:=\033[2m}"
23
+ : "${GREEN:=\033[32m}"
24
+ : "${YELLOW:=\033[33m}"
25
+ : "${CYAN:=\033[36m}"
26
+
27
+ # ═══════════════════════════════════════════════════════════════════════════════
28
+ # Handoff File Management
29
+ # ═══════════════════════════════════════════════════════════════════════════════
30
+
31
+ # Create an empty handoff structure
32
+ handoff_create() {
33
+ local session_dir="${1:-/tmp/merlin-blend-$$}"
34
+ local handoff_file="${session_dir}/handoff.json"
35
+ mkdir -p "$session_dir"
36
+
37
+ cat > "$handoff_file" << 'EMPTY_HANDOFF'
38
+ {
39
+ "version": 1,
40
+ "steps": [],
41
+ "accumulated_context": {
42
+ "decisions": [],
43
+ "files_changed": [],
44
+ "constraints": [],
45
+ "open_questions": []
46
+ }
47
+ }
48
+ EMPTY_HANDOFF
49
+
50
+ echo "$handoff_file"
51
+ }
52
+
53
+ # Append a step result to the handoff chain
54
+ # Usage: handoff_append <handoff_file> <step_name> <raw_output>
55
+ handoff_append() {
56
+ local handoff_file="$1"
57
+ local step_name="$2"
58
+ local raw_output="$3"
59
+
60
+ if [ ! -f "$handoff_file" ]; then
61
+ echo -e "${RED}Handoff file not found: $handoff_file${RESET}" >&2
62
+ return 1
63
+ fi
64
+
65
+ # Extract structured data from raw output using lightweight parsing
66
+ local decisions files_changed open_questions constraints status
67
+
68
+ # Parse decisions (lines starting with "Decision:", "Decided:", or "→")
69
+ decisions=$(echo "$raw_output" | grep -iE '^\s*(decision|decided|→|⟶):' | head -10 | sed 's/^[^:]*: *//' | python3 -c "
70
+ import sys, json
71
+ lines = [l.strip() for l in sys.stdin if l.strip()]
72
+ print(json.dumps(lines))
73
+ " 2>/dev/null || echo "[]")
74
+
75
+ # Parse files changed (lines with file paths)
76
+ files_changed=$(echo "$raw_output" | grep -oE '(created|modified|deleted|updated|wrote|edited)\s+`?[a-zA-Z0-9_./-]+`?' | head -20 | python3 -c "
77
+ import sys, json, re
78
+ files = []
79
+ for line in sys.stdin:
80
+ match = re.search(r'(created|modified|deleted|updated|wrote|edited)\s+\`?([a-zA-Z0-9_./-]+)\`?', line.strip())
81
+ if match:
82
+ files.append({'action': match.group(1), 'path': match.group(2)})
83
+ print(json.dumps(files))
84
+ " 2>/dev/null || echo "[]")
85
+
86
+ # Parse open questions (lines with "?" or "TODO" or "QUESTION")
87
+ open_questions=$(echo "$raw_output" | grep -iE '(\?|TODO|QUESTION|open question|needs decision)' | head -5 | python3 -c "
88
+ import sys, json
89
+ lines = [l.strip() for l in sys.stdin if l.strip()]
90
+ print(json.dumps(lines))
91
+ " 2>/dev/null || echo "[]")
92
+
93
+ # Determine status
94
+ if echo "$raw_output" | grep -qi "error\|failed\|FAIL"; then
95
+ status="failed"
96
+ elif echo "$raw_output" | grep -qi "partial\|incomplete\|skipped"; then
97
+ status="partial"
98
+ else
99
+ status="success"
100
+ fi
101
+
102
+ # Build the step entry and merge into handoff
103
+ python3 << PYEOF
104
+ import json, sys
105
+ from datetime import datetime, timezone
106
+
107
+ handoff_file = "$handoff_file"
108
+ step_name = "$step_name"
109
+ status = "$status"
110
+
111
+ with open(handoff_file, 'r') as f:
112
+ handoff = json.load(f)
113
+
114
+ # Parse extracted data
115
+ try:
116
+ decisions = json.loads('''$decisions''')
117
+ except: decisions = []
118
+ try:
119
+ files_changed = json.loads('''$files_changed''')
120
+ except: files_changed = []
121
+ try:
122
+ open_questions = json.loads('''$open_questions''')
123
+ except: open_questions = []
124
+
125
+ # Build step entry
126
+ step = {
127
+ "name": step_name,
128
+ "timestamp": datetime.now(timezone.utc).isoformat(),
129
+ "status": status,
130
+ "decisions": decisions,
131
+ "files_changed": files_changed,
132
+ "open_questions": open_questions,
133
+ "output_length": len('''$raw_output''')
134
+ }
135
+
136
+ handoff["steps"].append(step)
137
+
138
+ # Accumulate context
139
+ handoff["accumulated_context"]["decisions"].extend(decisions)
140
+ handoff["accumulated_context"]["files_changed"].extend(files_changed)
141
+ handoff["accumulated_context"]["open_questions"] = open_questions # Only latest
142
+ # Keep constraints from previous steps
143
+ if decisions:
144
+ handoff["accumulated_context"]["constraints"].extend(
145
+ [f"Step '{step_name}' decided: {d}" for d in decisions[:3]]
146
+ )
147
+
148
+ with open(handoff_file, 'w') as f:
149
+ json.dump(handoff, f, indent=2)
150
+
151
+ print(f"Handoff updated: {step_name} ({status})")
152
+ PYEOF
153
+ }
154
+
155
+ # Format handoff context for injection into next agent's prompt
156
+ # Returns a clean, readable summary (not raw JSON)
157
+ handoff_format_for_agent() {
158
+ local handoff_file="$1"
159
+
160
+ if [ ! -f "$handoff_file" ]; then
161
+ echo ""
162
+ return
163
+ fi
164
+
165
+ python3 << PYEOF
166
+ import json
167
+
168
+ with open("$handoff_file", 'r') as f:
169
+ handoff = json.load(f)
170
+
171
+ ctx = handoff.get("accumulated_context", {})
172
+ steps = handoff.get("steps", [])
173
+
174
+ if not steps:
175
+ print("")
176
+ exit()
177
+
178
+ output = "## Pipeline Context (from previous steps)\n\n"
179
+
180
+ # Steps summary
181
+ output += "### Completed Steps\n"
182
+ for s in steps:
183
+ icon = "✓" if s["status"] == "success" else "⚠" if s["status"] == "partial" else "✗"
184
+ output += f"- {icon} **{s['name']}** ({s['status']})\n"
185
+ output += "\n"
186
+
187
+ # Key decisions
188
+ decisions = ctx.get("decisions", [])
189
+ if decisions:
190
+ output += "### Decisions Made (DO NOT contradict these)\n"
191
+ for d in decisions[-10:]: # Last 10
192
+ output += f"- {d}\n"
193
+ output += "\n"
194
+
195
+ # Files already changed
196
+ files = ctx.get("files_changed", [])
197
+ if files:
198
+ output += "### Files Already Changed (check before modifying)\n"
199
+ for f in files[-20:]: # Last 20
200
+ if isinstance(f, dict):
201
+ output += f"- {f.get('action', '?')}: \`{f.get('path', '?')}\`\n"
202
+ else:
203
+ output += f"- {f}\n"
204
+ output += "\n"
205
+
206
+ # Constraints
207
+ constraints = ctx.get("constraints", [])
208
+ if constraints:
209
+ output += "### Constraints (MUST respect)\n"
210
+ for c in constraints[-5:]:
211
+ output += f"- {c}\n"
212
+ output += "\n"
213
+
214
+ # Open questions
215
+ questions = ctx.get("open_questions", [])
216
+ if questions:
217
+ output += "### Open Questions (resolve if relevant to your task)\n"
218
+ for q in questions:
219
+ output += f"- {q}\n"
220
+ output += "\n"
221
+
222
+ print(output)
223
+ PYEOF
224
+ }
225
+
226
+ # ═══════════════════════════════════════════════════════════════════════════════
227
+ # Pipeline: Execute a sequence of blended agents with structured handoffs
228
+ # ═══════════════════════════════════════════════════════════════════════════════
229
+
230
+ # Execute a multi-step pipeline with clean handoffs between agents
231
+ # Usage: blend_pipeline <session_dir> "step1_task" "step2_task" ...
232
+ blend_pipeline() {
233
+ local session_dir="$1"
234
+ shift
235
+ local tasks=("$@")
236
+
237
+ mkdir -p "$session_dir"
238
+
239
+ # Create handoff chain
240
+ local handoff_file
241
+ handoff_file=$(handoff_create "$session_dir")
242
+
243
+ local step_count=${#tasks[@]}
244
+ echo -e "${MAGENTA}${BOLD} Pipeline: $step_count steps${RESET}" >&2
245
+
246
+ local final_output=""
247
+
248
+ for i in "${!tasks[@]}"; do
249
+ local task="${tasks[$i]}"
250
+ local step_num=$((i + 1))
251
+ local step_name="step-${step_num}"
252
+
253
+ echo -e "${CYAN} [${step_num}/${step_count}]${RESET} ${task:0:60}..." >&2
254
+
255
+ # Get accumulated context from previous steps
256
+ local pipeline_context
257
+ pipeline_context=$(handoff_format_for_agent "$handoff_file")
258
+
259
+ # Combine pipeline context with current task
260
+ local full_context=""
261
+ if [ -n "$pipeline_context" ]; then
262
+ full_context="$pipeline_context"
263
+ fi
264
+
265
+ # Execute with blend
266
+ local step_output
267
+ step_output=$(blend_and_spawn "$task" "$full_context" "${session_dir}/${step_name}")
268
+ local step_exit=$?
269
+
270
+ # Record in handoff
271
+ handoff_append "$handoff_file" "$step_name" "$step_output"
272
+
273
+ if [ $step_exit -ne 0 ]; then
274
+ echo -e "${RED} ✗ Step $step_num failed${RESET}" >&2
275
+ final_output="$step_output"
276
+ break
277
+ fi
278
+
279
+ echo -e "${GREEN} ✓ Step $step_num complete${RESET}" >&2
280
+ final_output="$step_output"
281
+ done
282
+
283
+ echo "$final_output"
284
+ }
@@ -0,0 +1,337 @@
1
+ #!/usr/bin/env bash
2
+ #
3
+ # ╔═══════════════════════════════════════════════════════════════════════════╗
4
+ # ║ BLEND ENGINE — Stage 4: Blend Learning ║
5
+ # ║ Track which blend combinations succeed, adjust scoring over time ║
6
+ # ╚═══════════════════════════════════════════════════════════════════════════╝
7
+ #
8
+ # Every blend execution produces a signal: did it work?
9
+ # This module captures those signals and uses them to boost or penalize
10
+ # agent combinations in future scoring.
11
+ #
12
+ # Data storage:
13
+ # Global: ~/.merlin/blend-learn.jsonl (universal patterns)
14
+ # Project: .merlin-loop/blend-learn.jsonl (project-specific patterns)
15
+ #
16
+ # Signal types:
17
+ # - exit_code: 0 = success, non-zero = failure
18
+ # - verification_verdict: PASS / PASS_WITH_WARNINGS / FAIL (from blend-verify.sh)
19
+ # - duration: how long the spawn took (fast = good signal)
20
+ # - agent_combo: which agents were blended
21
+ #
22
+ # Scoring integration:
23
+ # After blend_score_all() runs keyword matching, learning applies a
24
+ # multiplier per agent based on historical success rate:
25
+ # success_rate > 0.8 → +2 bonus
26
+ # success_rate > 0.6 → +1 bonus
27
+ # success_rate < 0.3 → -1 penalty
28
+ # success_rate < 0.1 → -2 penalty
29
+ # Minimum 5 samples required before adjustments kick in.
30
+ #
31
+ # Requires: blend.sh loaded first
32
+
33
+ # Colors
34
+ : "${RESET:=\033[0m}"
35
+ : "${BOLD:=\033[1m}"
36
+ : "${DIM:=\033[2m}"
37
+ : "${GREEN:=\033[32m}"
38
+ : "${YELLOW:=\033[33m}"
39
+ : "${RED:=\033[31m}"
40
+ : "${CYAN:=\033[36m}"
41
+ : "${MAGENTA:=\033[35m}"
42
+
43
+ # Storage paths
44
+ LEARN_GLOBAL_FILE="${HOME}/.merlin/blend-learn.jsonl"
45
+ LEARN_PROJECT_FILE="${MERLIN_LOOP_DIR:-.merlin-loop}/blend-learn.jsonl"
46
+
47
+ # Minimum samples before learning kicks in
48
+ LEARN_MIN_SAMPLES="${LEARN_MIN_SAMPLES:-5}"
49
+
50
+ # How many recent records to consider (sliding window)
51
+ LEARN_WINDOW="${LEARN_WINDOW:-100}"
52
+
53
+ # ═══════════════════════════════════════════════════════════════════════════════
54
+ # Recording: Capture outcome signals after every blend execution
55
+ # ═══════════════════════════════════════════════════════════════════════════════
56
+
57
+ # Record a blend execution outcome
58
+ # Usage: learn_record <primary_agent> <secondary_agents_csv> <exit_code> <duration_secs> [verdict]
59
+ learn_record() {
60
+ local primary="$1"
61
+ local secondaries="${2:-}"
62
+ local exit_code="${3:-0}"
63
+ local duration="${4:-0}"
64
+ local verdict="${5:-}" # From verification: PASS/PASS_WITH_WARNINGS/FAIL
65
+
66
+ # Ensure directories exist
67
+ mkdir -p "$(dirname "$LEARN_GLOBAL_FILE")"
68
+ mkdir -p "$(dirname "$LEARN_PROJECT_FILE")"
69
+
70
+ # Determine success
71
+ local success="true"
72
+ if [ "$exit_code" -ne 0 ]; then
73
+ success="false"
74
+ fi
75
+ if [ "$verdict" = "FAIL" ]; then
76
+ success="false"
77
+ fi
78
+
79
+ # Build the record
80
+ local record
81
+ record=$(python3 -c "
82
+ import json
83
+ from datetime import datetime, timezone
84
+
85
+ record = {
86
+ 'timestamp': datetime.now(timezone.utc).isoformat(),
87
+ 'primary': '$primary',
88
+ 'secondaries': '$secondaries'.split(',') if '$secondaries' else [],
89
+ 'exit_code': $exit_code,
90
+ 'duration_secs': $duration,
91
+ 'verdict': '$verdict' or None,
92
+ 'success': $success,
93
+ 'project': '$(basename "$(pwd)")'
94
+ }
95
+ print(json.dumps(record))
96
+ " 2>/dev/null)
97
+
98
+ if [ -z "$record" ]; then
99
+ return 1
100
+ fi
101
+
102
+ # Append to both files
103
+ echo "$record" >> "$LEARN_GLOBAL_FILE"
104
+ echo "$record" >> "$LEARN_PROJECT_FILE"
105
+ }
106
+
107
+ # ═══════════════════════════════════════════════════════════════════════════════
108
+ # Analysis: Compute success rates per agent
109
+ # ═══════════════════════════════════════════════════════════════════════════════
110
+
111
+ # Get the score adjustment for an agent based on learning history
112
+ # Returns: integer adjustment (-2, -1, 0, +1, +2)
113
+ learn_get_boost() {
114
+ local agent_key="$1"
115
+
116
+ # Check project-specific first (more relevant), then global
117
+ local boost=0
118
+ for learn_file in "$LEARN_PROJECT_FILE" "$LEARN_GLOBAL_FILE"; do
119
+ [ ! -f "$learn_file" ] && continue
120
+
121
+ local result
122
+ result=$(python3 << PYEOF 2>/dev/null
123
+ import json, sys
124
+
125
+ agent_key = "$agent_key"
126
+ window = $LEARN_WINDOW
127
+ min_samples = $LEARN_MIN_SAMPLES
128
+
129
+ records = []
130
+ try:
131
+ with open("$learn_file", 'r') as f:
132
+ for line in f:
133
+ line = line.strip()
134
+ if line:
135
+ try:
136
+ records.append(json.loads(line))
137
+ except:
138
+ pass
139
+ except:
140
+ pass
141
+
142
+ # Filter to recent window, matching this agent as primary or secondary
143
+ relevant = []
144
+ for r in records[-window:]:
145
+ if r.get('primary') == agent_key:
146
+ relevant.append(r)
147
+ elif agent_key in r.get('secondaries', []):
148
+ relevant.append(r)
149
+
150
+ total = len(relevant)
151
+ if total < min_samples:
152
+ print(0)
153
+ sys.exit()
154
+
155
+ successes = sum(1 for r in relevant if r.get('success'))
156
+ rate = successes / total
157
+
158
+ if rate > 0.8:
159
+ print(2)
160
+ elif rate > 0.6:
161
+ print(1)
162
+ elif rate < 0.1:
163
+ print(-2)
164
+ elif rate < 0.3:
165
+ print(-1)
166
+ else:
167
+ print(0)
168
+ PYEOF
169
+ )
170
+ if [ -n "$result" ] && [ "$result" != "0" ]; then
171
+ boost="$result"
172
+ break # Project-specific takes priority
173
+ fi
174
+ done
175
+
176
+ echo "$boost"
177
+ }
178
+
179
+ # ═══════════════════════════════════════════════════════════════════════════════
180
+ # Scoring Integration: Apply learning boosts to scored agents
181
+ # ═══════════════════════════════════════════════════════════════════════════════
182
+
183
+ # Take scored agents (from blend_score_all) and apply learning adjustments
184
+ # Input: stdin of "score:key:label:file" lines
185
+ # Output: adjusted "score:key:label:file" lines, re-sorted
186
+ learn_apply_weights() {
187
+ # Check if any learning data exists
188
+ if [ ! -f "$LEARN_PROJECT_FILE" ] && [ ! -f "$LEARN_GLOBAL_FILE" ]; then
189
+ cat # Pass through unchanged
190
+ return
191
+ fi
192
+
193
+ local lines=()
194
+ while IFS= read -r line; do
195
+ [ -z "$line" ] && continue
196
+ lines+=("$line")
197
+ done
198
+
199
+ # Apply boosts
200
+ local adjusted=()
201
+ for line in "${lines[@]}"; do
202
+ local score key label file
203
+ IFS=: read -r score key label file <<< "$line"
204
+ [ -z "$key" ] && continue
205
+
206
+ local boost
207
+ boost=$(learn_get_boost "$key")
208
+
209
+ if [ "$boost" != "0" ]; then
210
+ local new_score=$((score + boost))
211
+ # Floor at 0
212
+ [ "$new_score" -lt 0 ] && new_score=0
213
+ adjusted+=("${new_score}:${key}:${label}:${file}")
214
+ else
215
+ adjusted+=("$line")
216
+ fi
217
+ done
218
+
219
+ # Re-sort by score descending
220
+ printf '%s\n' "${adjusted[@]}" | sort -t: -k1 -nr
221
+ }
222
+
223
+ # ═══════════════════════════════════════════════════════════════════════════════
224
+ # Statistics: Show learning insights
225
+ # ═══════════════════════════════════════════════════════════════════════════════
226
+
227
+ # Show learning statistics
228
+ learn_stats() {
229
+ local learn_file="${1:-$LEARN_PROJECT_FILE}"
230
+
231
+ if [ ! -f "$learn_file" ]; then
232
+ echo -e "${DIM} No learning data yet.${RESET}" >&2
233
+ return
234
+ fi
235
+
236
+ python3 << PYEOF
237
+ import json
238
+ from collections import defaultdict
239
+
240
+ records = []
241
+ with open("$learn_file", 'r') as f:
242
+ for line in f:
243
+ line = line.strip()
244
+ if line:
245
+ try:
246
+ records.append(json.loads(line))
247
+ except:
248
+ pass
249
+
250
+ if not records:
251
+ print(" No learning data.")
252
+ exit()
253
+
254
+ # Aggregate by primary agent
255
+ stats = defaultdict(lambda: {'total': 0, 'success': 0, 'avg_duration': 0, 'durations': []})
256
+
257
+ for r in records:
258
+ key = r.get('primary', 'unknown')
259
+ stats[key]['total'] += 1
260
+ if r.get('success'):
261
+ stats[key]['success'] += 1
262
+ dur = r.get('duration_secs', 0)
263
+ if dur > 0:
264
+ stats[key]['durations'].append(dur)
265
+
266
+ print(f"\n 📊 Blend Learning Stats ({len(records)} total executions)\n")
267
+ print(f" {'Agent':<20} {'Success':>8} {'Total':>6} {'Rate':>7} {'Avg Time':>10}")
268
+ print(f" {'─'*20} {'─'*8} {'─'*6} {'─'*7} {'─'*10}")
269
+
270
+ for key in sorted(stats.keys(), key=lambda k: stats[k]['total'], reverse=True):
271
+ s = stats[key]
272
+ rate = s['success'] / s['total'] if s['total'] > 0 else 0
273
+ avg_dur = sum(s['durations']) / len(s['durations']) if s['durations'] else 0
274
+
275
+ # Color coding
276
+ if rate > 0.8:
277
+ indicator = "✓"
278
+ elif rate < 0.3:
279
+ indicator = "✗"
280
+ else:
281
+ indicator = "○"
282
+
283
+ dur_str = f"{avg_dur:.0f}s" if avg_dur > 0 else "—"
284
+ print(f" {indicator} {key:<18} {s['success']:>8} {s['total']:>6} {rate:>6.0%} {dur_str:>10}")
285
+
286
+ # Top combos
287
+ combos = defaultdict(lambda: {'total': 0, 'success': 0})
288
+ for r in records:
289
+ combo_key = r.get('primary', '?') + '+' + ','.join(sorted(r.get('secondaries', [])))
290
+ combos[combo_key]['total'] += 1
291
+ if r.get('success'):
292
+ combos[combo_key]['success'] += 1
293
+
294
+ if combos:
295
+ print(f"\n 🔗 Top Blend Combinations:")
296
+ for combo in sorted(combos.keys(), key=lambda k: combos[k]['total'], reverse=True)[:5]:
297
+ c = combos[combo]
298
+ rate = c['success'] / c['total']
299
+ print(f" {combo:<40} {c['success']}/{c['total']} ({rate:.0%})")
300
+ PYEOF
301
+ }
302
+
303
+ # ═══════════════════════════════════════════════════════════════════════════════
304
+ # Maintenance: Reset and cleanup
305
+ # ═══════════════════════════════════════════════════════════════════════════════
306
+
307
+ # Reset project learning data
308
+ learn_reset_project() {
309
+ rm -f "$LEARN_PROJECT_FILE"
310
+ echo -e "${YELLOW} Project learning data reset.${RESET}" >&2
311
+ }
312
+
313
+ # Reset all learning data
314
+ learn_reset_all() {
315
+ rm -f "$LEARN_PROJECT_FILE"
316
+ rm -f "$LEARN_GLOBAL_FILE"
317
+ echo -e "${YELLOW} All learning data reset.${RESET}" >&2
318
+ }
319
+
320
+ # Compact learning data (keep only last N records)
321
+ learn_compact() {
322
+ local max_records="${1:-500}"
323
+
324
+ for learn_file in "$LEARN_PROJECT_FILE" "$LEARN_GLOBAL_FILE"; do
325
+ [ ! -f "$learn_file" ] && continue
326
+
327
+ local count
328
+ count=$(wc -l < "$learn_file" 2>/dev/null || echo "0")
329
+
330
+ if [ "$count" -gt "$max_records" ]; then
331
+ local tmp_file="${learn_file}.tmp"
332
+ tail -n "$max_records" "$learn_file" > "$tmp_file"
333
+ mv "$tmp_file" "$learn_file"
334
+ echo -e "${DIM} Compacted $learn_file: $count → $max_records records${RESET}" >&2
335
+ fi
336
+ done
337
+ }