devloop 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. devloop/__init__.py +3 -0
  2. devloop/agents/__init__.py +33 -0
  3. devloop/agents/agent_health_monitor.py +105 -0
  4. devloop/agents/ci_monitor.py +237 -0
  5. devloop/agents/code_rabbit.py +248 -0
  6. devloop/agents/doc_lifecycle.py +374 -0
  7. devloop/agents/echo.py +24 -0
  8. devloop/agents/file_logger.py +46 -0
  9. devloop/agents/formatter.py +511 -0
  10. devloop/agents/git_commit_assistant.py +421 -0
  11. devloop/agents/linter.py +399 -0
  12. devloop/agents/performance_profiler.py +284 -0
  13. devloop/agents/security_scanner.py +322 -0
  14. devloop/agents/snyk.py +292 -0
  15. devloop/agents/test_runner.py +484 -0
  16. devloop/agents/type_checker.py +242 -0
  17. devloop/cli/__init__.py +1 -0
  18. devloop/cli/commands/__init__.py +1 -0
  19. devloop/cli/commands/custom_agents.py +144 -0
  20. devloop/cli/commands/feedback.py +161 -0
  21. devloop/cli/commands/summary.py +50 -0
  22. devloop/cli/main.py +430 -0
  23. devloop/cli/main_v1.py +144 -0
  24. devloop/collectors/__init__.py +17 -0
  25. devloop/collectors/base.py +55 -0
  26. devloop/collectors/filesystem.py +126 -0
  27. devloop/collectors/git.py +171 -0
  28. devloop/collectors/manager.py +159 -0
  29. devloop/collectors/process.py +221 -0
  30. devloop/collectors/system.py +195 -0
  31. devloop/core/__init__.py +21 -0
  32. devloop/core/agent.py +206 -0
  33. devloop/core/agent_template.py +498 -0
  34. devloop/core/amp_integration.py +166 -0
  35. devloop/core/auto_fix.py +224 -0
  36. devloop/core/config.py +272 -0
  37. devloop/core/context.py +0 -0
  38. devloop/core/context_store.py +530 -0
  39. devloop/core/contextual_feedback.py +311 -0
  40. devloop/core/custom_agent.py +439 -0
  41. devloop/core/debug_trace.py +289 -0
  42. devloop/core/event.py +105 -0
  43. devloop/core/event_store.py +316 -0
  44. devloop/core/feedback.py +311 -0
  45. devloop/core/learning.py +351 -0
  46. devloop/core/manager.py +219 -0
  47. devloop/core/performance.py +433 -0
  48. devloop/core/proactive_feedback.py +302 -0
  49. devloop/core/summary_formatter.py +159 -0
  50. devloop/core/summary_generator.py +275 -0
  51. devloop-0.2.0.dist-info/METADATA +705 -0
  52. devloop-0.2.0.dist-info/RECORD +55 -0
  53. devloop-0.2.0.dist-info/WHEEL +4 -0
  54. devloop-0.2.0.dist-info/entry_points.txt +3 -0
  55. devloop-0.2.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,530 @@
1
+ """Context store for sharing agent findings with coding agents (Claude Code)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import json
7
+ import logging
8
+ from dataclasses import asdict, dataclass, field
9
+ from datetime import datetime, UTC
10
+ from enum import Enum
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List, Literal
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class Severity(str, Enum):
18
+ """Finding severity levels."""
19
+
20
+ ERROR = "error"
21
+ WARNING = "warning"
22
+ INFO = "info"
23
+ STYLE = "style"
24
+
25
+
26
+ class ScopeType(str, Enum):
27
+ """Finding scope types."""
28
+
29
+ CURRENT_FILE = "current_file"
30
+ RELATED_FILES = "related_files"
31
+ PROJECT_WIDE = "project_wide"
32
+
33
+
34
+ class Tier(str, Enum):
35
+ """Context tier for progressive disclosure."""
36
+
37
+ IMMEDIATE = "immediate" # Show now, blocking issues
38
+ RELEVANT = "relevant" # Mention at task completion
39
+ BACKGROUND = "background" # Show only on request
40
+ AUTO_FIXED = "auto_fixed" # Already fixed silently
41
+
42
+
43
+ @dataclass
44
+ class Finding:
45
+ """A single finding from an agent."""
46
+
47
+ id: str
48
+ agent: str
49
+ timestamp: str
50
+ file: str
51
+ line: int | None = None
52
+ column: int | None = None
53
+
54
+ severity: Severity = Severity.INFO
55
+ blocking: bool = False
56
+ category: str = "general"
57
+
58
+ message: str = ""
59
+ detail: str = ""
60
+ suggestion: str = ""
61
+
62
+ auto_fixable: bool = False
63
+ fix_command: str | None = None
64
+
65
+ scope_type: ScopeType = ScopeType.CURRENT_FILE
66
+ caused_by_recent_change: bool = False
67
+ is_new: bool = True
68
+
69
+ relevance_score: float = 0.5
70
+ disclosure_level: int = 0
71
+ seen_by_user: bool = False
72
+
73
+ workflow_hints: Dict[str, bool] = field(default_factory=dict)
74
+ context: Dict[str, Any] = field(default_factory=dict)
75
+
76
+ def __post_init__(self):
77
+ """Validate Finding parameters."""
78
+ if not isinstance(self.id, str) or not self.id:
79
+ raise ValueError("id must be a non-empty string")
80
+
81
+ if not isinstance(self.agent, str) or not self.agent:
82
+ raise ValueError("agent must be a non-empty string")
83
+
84
+ if not isinstance(self.file, str) or not self.file:
85
+ raise ValueError("file must be a non-empty string")
86
+
87
+ # Convert string enums to proper enums
88
+ if isinstance(self.severity, str):
89
+ self.severity = Severity(self.severity)
90
+
91
+ if isinstance(self.scope_type, str):
92
+ self.scope_type = ScopeType(self.scope_type)
93
+
94
+ # Validate numeric ranges
95
+ if self.line is not None and self.line < 0:
96
+ raise ValueError(f"line must be non-negative, got {self.line}")
97
+
98
+ if self.column is not None and self.column < 0:
99
+ raise ValueError(f"column must be non-negative, got {self.column}")
100
+
101
+ if not 0.0 <= self.relevance_score <= 1.0:
102
+ raise ValueError(
103
+ f"relevance_score must be between 0.0 and 1.0, got {self.relevance_score}"
104
+ )
105
+
106
+ if self.disclosure_level < 0:
107
+ raise ValueError(
108
+ f"disclosure_level must be non-negative, got {self.disclosure_level}"
109
+ )
110
+
111
+
112
+ @dataclass
113
+ class UserContext:
114
+ """Context about user's current development state."""
115
+
116
+ currently_editing: List[str] = field(default_factory=list)
117
+ recently_modified: List[str] = field(default_factory=list)
118
+ related_files: List[str] = field(default_factory=list)
119
+ phase: Literal["active_coding", "pre_commit", "reviewing"] = "active_coding"
120
+ explicit_request: str | None = None
121
+
122
+ def matches_request(self, category: str) -> bool:
123
+ """Check if category matches user's explicit request."""
124
+ if not self.explicit_request:
125
+ return False
126
+ return category.lower() in self.explicit_request.lower()
127
+
128
+
129
+ @dataclass
130
+ class ContextIndex:
131
+ """Summary index for quick LLM consumption."""
132
+
133
+ last_updated: str
134
+ check_now: Dict[str, Any]
135
+ mention_if_relevant: Dict[str, Any]
136
+ deferred: Dict[str, Any]
137
+ auto_fixed: Dict[str, Any] = field(default_factory=dict)
138
+
139
+
140
+ class ContextStore:
141
+ """
142
+ Manages context storage for agent findings.
143
+
144
+ Organizes findings into tiers for progressive disclosure:
145
+ - immediate: Blocking issues, show immediately
146
+ - relevant: Mention at task completion
147
+ - background: Show only on explicit request
148
+ - auto_fixed: Log of silent fixes
149
+ """
150
+
151
+ def __init__(self, context_dir: Path | str | None = None):
152
+ """
153
+ Initialize context store.
154
+
155
+ Args:
156
+ context_dir: Directory for context files. Defaults to .devloop/context
157
+ """
158
+ if context_dir is None:
159
+ context_dir = Path.cwd() / ".devloop" / "context"
160
+ self.context_dir = Path(context_dir)
161
+ self._lock = asyncio.Lock()
162
+ self._findings: Dict[Tier, List[Finding]] = {
163
+ Tier.IMMEDIATE: [],
164
+ Tier.RELEVANT: [],
165
+ Tier.BACKGROUND: [],
166
+ Tier.AUTO_FIXED: [],
167
+ }
168
+ logger.info(f"Context store initialized at {self.context_dir}")
169
+
170
+ async def initialize(self) -> None:
171
+ """Create context directory structure if it doesn't exist."""
172
+ try:
173
+ self.context_dir.mkdir(parents=True, exist_ok=True)
174
+ logger.info(f"Context directory ready: {self.context_dir}")
175
+ except Exception as e:
176
+ logger.error(f"Failed to create context directory: {e}")
177
+ raise
178
+
179
+ async def add_finding(
180
+ self,
181
+ finding: Finding | Dict[str, Any],
182
+ user_context: UserContext | None = None,
183
+ ) -> None:
184
+ """
185
+ Add a finding to the context store.
186
+
187
+ Args:
188
+ finding: Finding object or dict with finding data
189
+ user_context: Optional user context for relevance scoring
190
+ """
191
+ # Convert dict to Finding if needed
192
+ if isinstance(finding, dict):
193
+ finding = Finding(**finding)
194
+
195
+ # Compute relevance score
196
+ if user_context:
197
+ finding.relevance_score = self.compute_relevance(finding, user_context)
198
+
199
+ # Assign to tier
200
+ tier = self.assign_tier(finding)
201
+
202
+ async with self._lock:
203
+ self._findings[tier].append(finding)
204
+ await self._write_tier(tier)
205
+ await self._update_index()
206
+
207
+ logger.debug(
208
+ f"Added finding {finding.id} to {tier.value} (relevance: {finding.relevance_score:.2f})"
209
+ )
210
+
211
+ def compute_relevance(self, finding: Finding, user_context: UserContext) -> float:
212
+ """
213
+ Compute relevance score for a finding.
214
+
215
+ Returns score between 0.0 and 1.0:
216
+ - 0.0 - 0.3: background
217
+ - 0.4 - 0.7: relevant
218
+ - 0.8 - 1.0: immediate
219
+ """
220
+ score = 0.0
221
+
222
+ # File scope (max 0.5)
223
+ if finding.file in user_context.currently_editing:
224
+ score += 0.5
225
+ elif finding.file in user_context.recently_modified:
226
+ score += 0.3
227
+ elif finding.file in user_context.related_files:
228
+ score += 0.2
229
+
230
+ # Severity (max 0.4)
231
+ if finding.blocking:
232
+ score += 0.4
233
+ elif finding.severity == Severity.ERROR:
234
+ score += 0.3
235
+ elif finding.severity == Severity.WARNING:
236
+ score += 0.15
237
+ elif finding.severity == Severity.INFO:
238
+ score += 0.05
239
+
240
+ # Freshness (max 0.3)
241
+ if finding.is_new and finding.caused_by_recent_change:
242
+ score += 0.3
243
+ elif finding.is_new:
244
+ score += 0.15
245
+
246
+ # User intent (max 0.5, can override)
247
+ if user_context.matches_request(finding.category):
248
+ score += 0.5
249
+
250
+ # Workflow phase adjustments
251
+ if user_context.phase == "pre_commit":
252
+ score += 0.2
253
+ elif user_context.phase == "active_coding":
254
+ score -= 0.2
255
+
256
+ return min(score, 1.0)
257
+
258
+ def assign_tier(self, finding: Finding) -> Tier:
259
+ """
260
+ Assign finding to a tier based on relevance and properties.
261
+
262
+ Args:
263
+ finding: Finding to assign
264
+
265
+ Returns:
266
+ Tier assignment
267
+ """
268
+ # Blockers always immediate
269
+ if finding.blocking:
270
+ return Tier.IMMEDIATE
271
+
272
+ # Auto-fixable style issues
273
+ if (
274
+ finding.auto_fixable
275
+ and finding.severity == Severity.STYLE
276
+ and finding.relevance_score < 0.5
277
+ ):
278
+ return Tier.AUTO_FIXED
279
+
280
+ # Score-based assignment
281
+ if finding.relevance_score >= 0.8:
282
+ return Tier.IMMEDIATE
283
+ elif finding.relevance_score >= 0.4:
284
+ return Tier.RELEVANT
285
+ else:
286
+ return Tier.BACKGROUND
287
+
288
+ async def get_findings(
289
+ self, tier: Tier | None = None, file_filter: str | None = None
290
+ ) -> List[Finding]:
291
+ """
292
+ Get findings from the store.
293
+
294
+ Args:
295
+ tier: Optional tier filter
296
+ file_filter: Optional file path filter
297
+
298
+ Returns:
299
+ List of findings matching filters
300
+ """
301
+ async with self._lock:
302
+ if tier:
303
+ findings = self._findings[tier].copy()
304
+ else:
305
+ findings = []
306
+ for tier_findings in self._findings.values():
307
+ findings.extend(tier_findings)
308
+
309
+ if file_filter:
310
+ findings = [f for f in findings if f.file == file_filter]
311
+
312
+ return findings
313
+
314
+ async def clear_findings(
315
+ self, tier: Tier | None = None, file_filter: str | None = None
316
+ ) -> int:
317
+ """
318
+ Clear findings from the store.
319
+
320
+ Args:
321
+ tier: Optional tier filter
322
+ file_filter: Optional file path filter
323
+
324
+ Returns:
325
+ Number of findings cleared
326
+ """
327
+ count = 0
328
+ async with self._lock:
329
+ if tier:
330
+ tiers_to_clear = [tier]
331
+ else:
332
+ tiers_to_clear = list(Tier)
333
+
334
+ for t in tiers_to_clear:
335
+ if file_filter:
336
+ original_count = len(self._findings[t])
337
+ self._findings[t] = [
338
+ f for f in self._findings[t] if f.file != file_filter
339
+ ]
340
+ count += original_count - len(self._findings[t])
341
+ else:
342
+ count += len(self._findings[t])
343
+ self._findings[t] = []
344
+
345
+ await self._write_tier(t)
346
+
347
+ await self._update_index()
348
+
349
+ logger.info(f"Cleared {count} finding(s)")
350
+ return count
351
+
352
+ async def _write_tier(self, tier: Tier) -> None:
353
+ """Write a tier's findings to disk."""
354
+ tier_file = self.context_dir / f"{tier.value}.json"
355
+
356
+ try:
357
+ # Convert findings to dict
358
+ findings_data = {
359
+ "tier": tier.value,
360
+ "count": len(self._findings[tier]),
361
+ "findings": [
362
+ {
363
+ **asdict(f),
364
+ "severity": f.severity.value,
365
+ "scope_type": f.scope_type.value,
366
+ }
367
+ for f in self._findings[tier]
368
+ ],
369
+ }
370
+
371
+ # Write atomically (write to temp, then rename)
372
+ temp_file = tier_file.with_suffix(".tmp")
373
+ temp_file.write_text(json.dumps(findings_data, indent=2))
374
+ temp_file.replace(tier_file)
375
+
376
+ logger.debug(f"Wrote {len(self._findings[tier])} findings to {tier_file}")
377
+
378
+ except Exception as e:
379
+ logger.error(f"Failed to write tier {tier.value}: {e}")
380
+ raise
381
+
382
+ async def _update_index(self) -> None:
383
+ """Update the index file for quick LLM consumption."""
384
+ index_file = self.context_dir / "index.json"
385
+
386
+ try:
387
+ # Gather summaries
388
+ immediate = self._findings[Tier.IMMEDIATE]
389
+ relevant = self._findings[Tier.RELEVANT]
390
+ background = self._findings[Tier.BACKGROUND]
391
+ auto_fixed = self._findings[Tier.AUTO_FIXED]
392
+
393
+ # Build index
394
+ index = {
395
+ "last_updated": datetime.now(UTC).isoformat() + "Z",
396
+ "check_now": {
397
+ "count": len(immediate),
398
+ "severity_breakdown": self._severity_breakdown(immediate),
399
+ "files": list(set(f.file for f in immediate)),
400
+ "preview": self._generate_preview(immediate),
401
+ },
402
+ "mention_if_relevant": {
403
+ "count": len(relevant),
404
+ "categories": self._category_breakdown(relevant),
405
+ "summary": self._generate_summary(relevant),
406
+ },
407
+ "deferred": {
408
+ "count": len(background),
409
+ "summary": f"{len(background)} background items",
410
+ },
411
+ "auto_fixed": {
412
+ "count": len(auto_fixed),
413
+ "summary": f"{len(auto_fixed)} items auto-fixed",
414
+ },
415
+ }
416
+
417
+ # Write atomically
418
+ temp_file = index_file.with_suffix(".tmp")
419
+ temp_file.write_text(json.dumps(index, indent=2))
420
+ temp_file.replace(index_file)
421
+
422
+ logger.debug(f"Updated index: {index_file}")
423
+
424
+ except Exception as e:
425
+ logger.error(f"Failed to update index: {e}")
426
+ raise
427
+
428
+ def _severity_breakdown(self, findings: List[Finding]) -> Dict[str, int]:
429
+ """Count findings by severity."""
430
+ breakdown: Dict[str, int] = {}
431
+ for f in findings:
432
+ severity = f.severity.value
433
+ breakdown[severity] = breakdown.get(severity, 0) + 1
434
+ return breakdown
435
+
436
+ def _category_breakdown(self, findings: List[Finding]) -> Dict[str, int]:
437
+ """Count findings by category."""
438
+ breakdown: Dict[str, int] = {}
439
+ for f in findings:
440
+ breakdown[f.category] = breakdown.get(f.category, 0) + 1
441
+ return breakdown
442
+
443
+ def _generate_preview(self, findings: List[Finding]) -> str:
444
+ """Generate a brief preview of findings."""
445
+ if not findings:
446
+ return "No immediate issues"
447
+
448
+ if len(findings) == 1:
449
+ f = findings[0]
450
+ return f"{f.severity.value.title()} in {f.file}:{f.line or '?'}"
451
+
452
+ # Multiple findings
453
+ severity_counts = self._severity_breakdown(findings)
454
+ parts = [f"{count} {sev}" for sev, count in severity_counts.items()]
455
+ return ", ".join(parts)
456
+
457
+ def _generate_summary(self, findings: List[Finding]) -> str:
458
+ """Generate a summary of findings."""
459
+ if not findings:
460
+ return "No relevant issues"
461
+
462
+ category_counts = self._category_breakdown(findings)
463
+ parts = [
464
+ f"{count} {cat.replace('_', ' ')}" for cat, count in category_counts.items()
465
+ ]
466
+ return ", ".join(parts)
467
+
468
+ async def read_index(self) -> Dict[str, Any]:
469
+ """
470
+ Read the index file.
471
+
472
+ Returns:
473
+ Index data as dict
474
+ """
475
+ index_file = self.context_dir / "index.json"
476
+
477
+ try:
478
+ if not index_file.exists():
479
+ return {
480
+ "last_updated": datetime.now(UTC).isoformat() + "Z",
481
+ "check_now": {"count": 0, "preview": "No immediate issues"},
482
+ "mention_if_relevant": {
483
+ "count": 0,
484
+ "summary": "No relevant issues",
485
+ },
486
+ "deferred": {"count": 0, "summary": "No background items"},
487
+ "auto_fixed": {"count": 0, "summary": "No auto-fixed items"},
488
+ }
489
+
490
+ data = json.loads(index_file.read_text())
491
+ return data
492
+
493
+ except Exception as e:
494
+ logger.error(f"Failed to read index: {e}")
495
+ raise
496
+
497
+ async def load_from_disk(self) -> None:
498
+ """Load all findings from disk into memory."""
499
+ async with self._lock:
500
+ for tier in Tier:
501
+ tier_file = self.context_dir / f"{tier.value}.json"
502
+
503
+ if not tier_file.exists():
504
+ continue
505
+
506
+ try:
507
+ data = json.loads(tier_file.read_text())
508
+ findings = []
509
+
510
+ for f_data in data.get("findings", []):
511
+ # Convert severity and scope_type back to enums
512
+ if "severity" in f_data:
513
+ f_data["severity"] = Severity(f_data["severity"])
514
+ if "scope_type" in f_data:
515
+ f_data["scope_type"] = ScopeType(f_data["scope_type"])
516
+
517
+ findings.append(Finding(**f_data))
518
+
519
+ self._findings[tier] = findings
520
+ logger.info(
521
+ f"Loaded {len(findings)} findings from {tier.value}.json"
522
+ )
523
+
524
+ except Exception as e:
525
+ logger.error(f"Failed to load {tier_file}: {e}")
526
+ # Continue with other tiers
527
+
528
+
529
+ # Global instance
530
+ context_store = ContextStore()