@misterhuydo/sentinel 1.2.6 → 1.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cairn/.hint-lock +1 -1
- package/.cairn/session.json +2 -2
- package/package.json +1 -1
- package/python/sentinel/sentinel_boss.py +151 -5
package/.cairn/.hint-lock
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
2026-03-23T11:
|
|
1
|
+
2026-03-23T11:43:23.881Z
|
package/.cairn/session.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
|
-
"message": "Auto-checkpoint at 2026-03-23T11:
|
|
3
|
-
"checkpoint_at": "2026-03-23T11:
|
|
2
|
+
"message": "Auto-checkpoint at 2026-03-23T11:40:37.793Z",
|
|
3
|
+
"checkpoint_at": "2026-03-23T11:40:37.794Z",
|
|
4
4
|
"active_files": [],
|
|
5
5
|
"notes": [],
|
|
6
6
|
"mtime_snapshot": {}
|
package/package.json
CHANGED
|
@@ -240,6 +240,29 @@ When to act vs. when to ask:
|
|
|
240
240
|
- If a tool call will take a moment (search, fetch, pull), prefix your reply with a brief "working" line ending in "..." before the results, e.g. "Searching SSOLWA for TryDig activity..." then the actual output.
|
|
241
241
|
Never just say a working line and stop — always follow it with the results in the same message.
|
|
242
242
|
|
|
243
|
+
Search reasoning — always do this before calling filter_logs or search_logs:
|
|
244
|
+
1. Interpret intent: what is the user actually looking for? Don't pass the raw message as the query.
|
|
245
|
+
Examples:
|
|
246
|
+
- "TryDig errors" → query="TryDig" (component name; look for it in any context)
|
|
247
|
+
- "payment failures last hour" → query="pay|payment|transaction", since_hours=1
|
|
248
|
+
- "why is the app crashing" → query="Exception|Error|FAILED|crash", look for stack traces
|
|
249
|
+
- "login issues today" → query="login|auth|401|403|session", since_hours=24
|
|
250
|
+
- "slow requests" → query="timeout|slow|latency|took [0-9]+ms|duration"
|
|
251
|
+
- "startup problems" → query="APPLICATION FAILED|BeanCreation|NoSuchMethod|ClassNotFound"
|
|
252
|
+
Use | in the regex to cover synonyms and related terms. Keep it focused — not too broad.
|
|
253
|
+
2. Choose since_hours if a time window is implied ("last hour", "today", "this morning").
|
|
254
|
+
3. Pick source if the user mentioned a specific service (SSOLWA, STS, etc.) or server.
|
|
255
|
+
|
|
256
|
+
After getting filter_logs results, always synthesize — never dump raw output:
|
|
257
|
+
- Lead with 1-2 sentences: total count, affected sources, dominant pattern.
|
|
258
|
+
e.g. "Found 47 matches across SSOLWA and STS — mostly NullPointerException in DigService (31 hits)."
|
|
259
|
+
- List the top 3-5 patterns with counts in plain language.
|
|
260
|
+
- Call out any notable time clustering (e.g. "spike between 10:23–10:47 UTC").
|
|
261
|
+
- Show 2-3 example lines at most — only the most informative ones.
|
|
262
|
+
- End with a recommendation if the pattern suggests something actionable:
|
|
263
|
+
e.g. "Looks like a dependency resolution issue — create an issue?" or "Pattern consistent with a null config value at startup."
|
|
264
|
+
- If total_matches=0, say so plainly and suggest what else to try.
|
|
265
|
+
|
|
243
266
|
Session context — critical rules:
|
|
244
267
|
- Loaded conversation history is prior-session background only. It may be hours or days old.
|
|
245
268
|
- NEVER say "the previous search", "I already fetched", "as I found earlier", or any phrase implying you already did part of the current task — unless a tool result appears in THIS response's tool calls.
|
|
@@ -1341,11 +1364,29 @@ async def _run_tool(name: str, inputs: dict, cfg_loader, store, slack_client=Non
|
|
|
1341
1364
|
|
|
1342
1365
|
if name == "filter_logs":
|
|
1343
1366
|
import re as _re
|
|
1367
|
+
from collections import Counter as _Counter
|
|
1344
1368
|
from datetime import datetime, timedelta, timezone as _tz
|
|
1369
|
+
|
|
1370
|
+
# Extract a short grouping key from a log line for pattern analysis
|
|
1371
|
+
_EXC_PAT = _re.compile(r'([A-Z][a-zA-Z]+(?:Exception|Error|Failure|Fault|Warning))')
|
|
1372
|
+
_LVL_PAT = _re.compile(r'\b(ERROR|WARN(?:ING)?|CRITICAL|FATAL|SEVERE)\b', _re.IGNORECASE)
|
|
1373
|
+
|
|
1374
|
+
def _signature(line):
|
|
1375
|
+
exc = _EXC_PAT.search(line)
|
|
1376
|
+
if exc:
|
|
1377
|
+
return exc.group(1)
|
|
1378
|
+
m = _LVL_PAT.search(line)
|
|
1379
|
+
if m:
|
|
1380
|
+
after = line[m.end():].strip()
|
|
1381
|
+
token = after.split()[0].rstrip(':.,') if after.split() else ''
|
|
1382
|
+
if token and len(token) > 2:
|
|
1383
|
+
return m.group(1).upper() + ' ' + token[:40]
|
|
1384
|
+
return line.strip()[:40]
|
|
1385
|
+
|
|
1345
1386
|
query_f = inputs.get("query", "")
|
|
1346
1387
|
source_f = inputs.get("source", "").lower()
|
|
1347
1388
|
since_hours = inputs.get("since_hours")
|
|
1348
|
-
max_matches = int(inputs.get("max_matches",
|
|
1389
|
+
max_matches = int(inputs.get("max_matches", 300))
|
|
1349
1390
|
case_flag = 0 if inputs.get("case_sensitive") else _re.IGNORECASE
|
|
1350
1391
|
try:
|
|
1351
1392
|
pat = _re.compile(query_f, case_flag)
|
|
@@ -1416,12 +1457,117 @@ async def _run_tool(name: str, inputs: dict, cfg_loader, store, slack_client=Non
|
|
|
1416
1457
|
"note": "No matches found in synced logs.",
|
|
1417
1458
|
})
|
|
1418
1459
|
|
|
1460
|
+
|
|
1461
|
+
try:
|
|
1462
|
+
pat = _re.compile(query_f, case_flag)
|
|
1463
|
+
except _re.error as e:
|
|
1464
|
+
return json.dumps({"error": f"Invalid regex: {e}"})
|
|
1465
|
+
|
|
1466
|
+
synced_base = Path("workspace/synced")
|
|
1467
|
+
if not synced_base.exists():
|
|
1468
|
+
return json.dumps({
|
|
1469
|
+
"error": "No synced logs found.",
|
|
1470
|
+
"hint": "Log sync runs every SYNC_INTERVAL_SECONDS (default 300s). "
|
|
1471
|
+
"If just started, wait a minute then try again.",
|
|
1472
|
+
})
|
|
1473
|
+
|
|
1474
|
+
cutoff = None
|
|
1475
|
+
if since_hours:
|
|
1476
|
+
cutoff = datetime.now(_tz.utc) - timedelta(hours=int(since_hours))
|
|
1477
|
+
|
|
1478
|
+
if source_f:
|
|
1479
|
+
src_dirs = [d for d in sorted(synced_base.iterdir())
|
|
1480
|
+
if d.is_dir() and source_f in d.name.lower()]
|
|
1481
|
+
else:
|
|
1482
|
+
src_dirs = [d for d in sorted(synced_base.iterdir()) if d.is_dir()]
|
|
1483
|
+
|
|
1484
|
+
if not src_dirs:
|
|
1485
|
+
available = [d.name for d in synced_base.iterdir() if d.is_dir()]
|
|
1486
|
+
return json.dumps({
|
|
1487
|
+
"error": f"No synced source matching '{source_f}'",
|
|
1488
|
+
"available_sources": available,
|
|
1489
|
+
})
|
|
1490
|
+
|
|
1491
|
+
all_matches = [] # list of (source_name, line)
|
|
1492
|
+
sources_hit = set()
|
|
1493
|
+
for src_dir in src_dirs:
|
|
1494
|
+
for log_file in sorted(src_dir.glob("*")):
|
|
1495
|
+
try:
|
|
1496
|
+
lines = log_file.read_text(encoding="utf-8", errors="replace").splitlines()
|
|
1497
|
+
for line in lines:
|
|
1498
|
+
if not pat.search(line):
|
|
1499
|
+
continue
|
|
1500
|
+
if cutoff:
|
|
1501
|
+
from .log_fetcher import _parse_line_ts
|
|
1502
|
+
ts = _parse_line_ts(line)
|
|
1503
|
+
if ts and ts < cutoff:
|
|
1504
|
+
continue
|
|
1505
|
+
all_matches.append((src_dir.name, line[:300]))
|
|
1506
|
+
sources_hit.add(src_dir.name)
|
|
1507
|
+
if len(all_matches) >= max_matches:
|
|
1508
|
+
break
|
|
1509
|
+
except Exception:
|
|
1510
|
+
pass
|
|
1511
|
+
if len(all_matches) >= max_matches:
|
|
1512
|
+
break
|
|
1513
|
+
|
|
1514
|
+
total = len(all_matches)
|
|
1515
|
+
if total == 0:
|
|
1516
|
+
return json.dumps({
|
|
1517
|
+
"query": query_f,
|
|
1518
|
+
"total_matches": 0,
|
|
1519
|
+
"sources_searched": [d.name for d in src_dirs],
|
|
1520
|
+
"note": "No matches found in synced logs.",
|
|
1521
|
+
})
|
|
1522
|
+
|
|
1523
|
+
# Pattern grouping: count occurrences of each error signature
|
|
1524
|
+
sig_counter = _Counter()
|
|
1525
|
+
sig_examples = {}
|
|
1526
|
+
for src, line in all_matches:
|
|
1527
|
+
sig = _signature(line)
|
|
1528
|
+
sig_counter[sig] += 1
|
|
1529
|
+
if sig not in sig_examples:
|
|
1530
|
+
sig_examples[sig] = f"[{src}] {line}"
|
|
1531
|
+
|
|
1532
|
+
top_patterns = [
|
|
1533
|
+
{"pattern": sig, "count": cnt, "example": sig_examples[sig][:250]}
|
|
1534
|
+
for sig, cnt in sig_counter.most_common(10)
|
|
1535
|
+
]
|
|
1536
|
+
|
|
1537
|
+
# Sample: first unique-signature line from each source
|
|
1538
|
+
sample_lines = []
|
|
1539
|
+
seen_sigs = set()
|
|
1540
|
+
for src, line in all_matches:
|
|
1541
|
+
sig = _signature(line)
|
|
1542
|
+
if sig not in seen_sigs:
|
|
1543
|
+
sample_lines.append(f"[{src}] {line}")
|
|
1544
|
+
seen_sigs.add(sig)
|
|
1545
|
+
if len(sample_lines) >= 10:
|
|
1546
|
+
break
|
|
1547
|
+
|
|
1548
|
+
# Time span
|
|
1549
|
+
time_span = {}
|
|
1550
|
+
try:
|
|
1551
|
+
from .log_fetcher import _parse_line_ts
|
|
1552
|
+
timestamps = [_parse_line_ts(ln) for _, ln in all_matches]
|
|
1553
|
+
timestamps = [t for t in timestamps if t]
|
|
1554
|
+
if timestamps:
|
|
1555
|
+
time_span = {
|
|
1556
|
+
"earliest": min(timestamps).strftime("%Y-%m-%d %H:%M:%S UTC"),
|
|
1557
|
+
"latest": max(timestamps).strftime("%Y-%m-%d %H:%M:%S UTC"),
|
|
1558
|
+
}
|
|
1559
|
+
except Exception:
|
|
1560
|
+
pass
|
|
1561
|
+
|
|
1419
1562
|
return json.dumps({
|
|
1420
|
-
"query":
|
|
1421
|
-
"
|
|
1422
|
-
"
|
|
1563
|
+
"query": query_f,
|
|
1564
|
+
"total_matches": total,
|
|
1565
|
+
"sources_hit": sorted(sources_hit),
|
|
1423
1566
|
"sources_searched": [d.name for d in src_dirs],
|
|
1424
|
-
"
|
|
1567
|
+
"top_patterns": top_patterns,
|
|
1568
|
+
"sample_lines": sample_lines,
|
|
1569
|
+
"time_span": time_span,
|
|
1570
|
+
"capped": total >= max_matches,
|
|
1425
1571
|
})
|
|
1426
1572
|
|
|
1427
1573
|
if name == "trigger_poll":
|