tapps-agents 3.5.38__py3-none-any.whl → 3.5.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. tapps_agents/__init__.py +2 -2
  2. tapps_agents/agents/cleanup/__init__.py +7 -0
  3. tapps_agents/agents/cleanup/agent.py +445 -0
  4. tapps_agents/agents/enhancer/agent.py +2 -2
  5. tapps_agents/agents/implementer/agent.py +35 -13
  6. tapps_agents/agents/reviewer/agent.py +43 -10
  7. tapps_agents/agents/reviewer/scoring.py +59 -68
  8. tapps_agents/agents/reviewer/tools/__init__.py +24 -0
  9. tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
  10. tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
  11. tapps_agents/beads/__init__.py +11 -0
  12. tapps_agents/beads/hydration.py +213 -0
  13. tapps_agents/beads/specs.py +206 -0
  14. tapps_agents/cli/commands/cleanup_agent.py +92 -0
  15. tapps_agents/cli/commands/health.py +19 -3
  16. tapps_agents/cli/commands/simple_mode.py +842 -676
  17. tapps_agents/cli/commands/task.py +219 -0
  18. tapps_agents/cli/commands/top_level.py +13 -0
  19. tapps_agents/cli/main.py +15 -2
  20. tapps_agents/cli/parsers/cleanup_agent.py +228 -0
  21. tapps_agents/cli/parsers/top_level.py +1978 -1881
  22. tapps_agents/core/config.py +43 -0
  23. tapps_agents/core/init_project.py +3012 -2896
  24. tapps_agents/epic/markdown_sync.py +105 -0
  25. tapps_agents/epic/orchestrator.py +1 -2
  26. tapps_agents/epic/parser.py +427 -423
  27. tapps_agents/experts/adaptive_domain_detector.py +0 -2
  28. tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
  29. tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
  30. tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
  31. tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
  32. tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
  33. tapps_agents/health/checks/outcomes.py +134 -46
  34. tapps_agents/health/orchestrator.py +12 -4
  35. tapps_agents/hooks/__init__.py +33 -0
  36. tapps_agents/hooks/config.py +140 -0
  37. tapps_agents/hooks/events.py +135 -0
  38. tapps_agents/hooks/executor.py +128 -0
  39. tapps_agents/hooks/manager.py +143 -0
  40. tapps_agents/session/__init__.py +19 -0
  41. tapps_agents/session/manager.py +256 -0
  42. tapps_agents/simple_mode/code_snippet_handler.py +382 -0
  43. tapps_agents/simple_mode/intent_parser.py +29 -4
  44. tapps_agents/simple_mode/orchestrators/base.py +185 -59
  45. tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
  46. tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
  47. tapps_agents/simple_mode/workflow_suggester.py +37 -3
  48. tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
  49. tapps_agents/workflow/cursor_executor.py +2196 -2118
  50. tapps_agents/workflow/direct_execution_fallback.py +16 -3
  51. tapps_agents/workflow/enforcer.py +36 -23
  52. tapps_agents/workflow/message_formatter.py +188 -0
  53. tapps_agents/workflow/parallel_executor.py +43 -4
  54. tapps_agents/workflow/parser.py +375 -357
  55. tapps_agents/workflow/rules_generator.py +337 -331
  56. tapps_agents/workflow/skill_invoker.py +9 -3
  57. {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/METADATA +9 -5
  58. {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/RECORD +62 -53
  59. tapps_agents/agents/analyst/SKILL.md +0 -85
  60. tapps_agents/agents/architect/SKILL.md +0 -80
  61. tapps_agents/agents/debugger/SKILL.md +0 -66
  62. tapps_agents/agents/designer/SKILL.md +0 -78
  63. tapps_agents/agents/documenter/SKILL.md +0 -95
  64. tapps_agents/agents/enhancer/SKILL.md +0 -189
  65. tapps_agents/agents/implementer/SKILL.md +0 -117
  66. tapps_agents/agents/improver/SKILL.md +0 -55
  67. tapps_agents/agents/ops/SKILL.md +0 -64
  68. tapps_agents/agents/orchestrator/SKILL.md +0 -238
  69. tapps_agents/agents/planner/story_template.md +0 -37
  70. tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
  71. tapps_agents/agents/tester/SKILL.md +0 -71
  72. {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/WHEEL +0 -0
  73. {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/entry_points.txt +0 -0
  74. {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/licenses/LICENSE +0 -0
  75. {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/top_level.txt +0 -0
tapps_agents/__init__.py CHANGED
@@ -24,8 +24,8 @@ Example:
24
24
  ```
25
25
  """
26
26
 
27
- __version__: str = "3.5.38"
27
+ __version__: str = "3.5.40"
28
28
 
29
29
  # Also expose as _version_ for compatibility with some import mechanisms
30
30
  # This helps with editable installs where __version__ might not be importable
31
- _version_: str = "3.5.38"
31
+ _version_: str = "3.5.40"
@@ -0,0 +1,7 @@
1
+ """
2
+ Cleanup Agent - Project structure analysis and cleanup
3
+ """
4
+
5
+ from .agent import CleanupAgent
6
+
7
+ __all__ = ["CleanupAgent"]
@@ -0,0 +1,445 @@
1
+ """
2
+ Cleanup Agent - Project structure analysis and intelligent cleanup
3
+
4
+ This agent helps keep projects clean by:
5
+ - Analyzing project structure for cleanup opportunities
6
+ - Detecting duplicate files, outdated docs, and naming inconsistencies
7
+ - Generating cleanup plans with rationale for each action
8
+ - Executing cleanup operations safely with backups and rollback
9
+ """
10
+
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ from ...core.agent_base import BaseAgent
15
+ from ...core.config import ProjectConfig, load_config
16
+ from ...utils.project_cleanup_agent import (
17
+ AnalysisReport,
18
+ CleanupAgent as CleanupAgentUtil,
19
+ CleanupPlan,
20
+ ExecutionReport,
21
+ ProjectAnalyzer,
22
+ )
23
+
24
+
25
+ class CleanupAgent(BaseAgent):
26
+ """
27
+ Cleanup Agent - Project structure analysis and cleanup.
28
+
29
+ Permissions: Read, Write, Edit, Glob, Bash
30
+
31
+ This agent provides guided project cleanup capabilities:
32
+ - Analyze project structure (duplicates, outdated files, naming issues)
33
+ - Generate cleanup plans with user confirmation
34
+ - Execute cleanup operations safely with backups
35
+ - Support dry-run mode for previewing changes
36
+ """
37
+
38
+ def __init__(self, config: ProjectConfig | None = None):
39
+ super().__init__(
40
+ agent_id="cleanup-agent",
41
+ agent_name="Cleanup Agent",
42
+ config=config,
43
+ )
44
+ # Use config if provided, otherwise load defaults
45
+ if config is None:
46
+ config = load_config()
47
+ self.config = config
48
+
49
+ # Get cleanup agent config
50
+ cleanup_config = config.agents.cleanup_agent if config and config.agents else None
51
+ self.dry_run_default = cleanup_config.dry_run_default if cleanup_config else True
52
+ self.backup_enabled = cleanup_config.backup_enabled if cleanup_config else True
53
+ self.interactive_mode = cleanup_config.interactive_mode if cleanup_config else True
54
+
55
+ # Utility components (lazily initialized)
56
+ self._util: CleanupAgentUtil | None = None
57
+ self._analyzer: ProjectAnalyzer | None = None
58
+
59
+ def _get_util(self, project_root: Path | None = None) -> CleanupAgentUtil:
60
+ """Get or create the cleanup utility instance."""
61
+ root = project_root or self._project_root or Path.cwd()
62
+ if self._util is None or self._util.project_root != root:
63
+ self._util = CleanupAgentUtil(root)
64
+ return self._util
65
+
66
+ def _get_analyzer(self, project_root: Path | None = None) -> ProjectAnalyzer:
67
+ """Get or create the analyzer instance."""
68
+ root = project_root or self._project_root or Path.cwd()
69
+ if self._analyzer is None or self._analyzer.project_root != root:
70
+ self._analyzer = ProjectAnalyzer(root)
71
+ return self._analyzer
72
+
73
+ def get_commands(self) -> list[dict[str, str]]:
74
+ """Return list of available commands."""
75
+ commands = super().get_commands()
76
+ commands.extend(
77
+ [
78
+ {
79
+ "command": "*analyze",
80
+ "description": "Analyze project structure for cleanup opportunities",
81
+ },
82
+ {
83
+ "command": "*plan",
84
+ "description": "Generate cleanup plan from analysis",
85
+ },
86
+ {
87
+ "command": "*execute",
88
+ "description": "Execute cleanup plan (dry-run by default)",
89
+ },
90
+ {
91
+ "command": "*run",
92
+ "description": "Run full cleanup workflow (analyze, plan, execute)",
93
+ },
94
+ ]
95
+ )
96
+ return commands
97
+
98
+ async def run(self, command: str, **kwargs) -> dict[str, Any]:
99
+ """Execute a command."""
100
+ if command == "analyze":
101
+ return await self.analyze_command(**kwargs)
102
+ elif command == "plan":
103
+ return await self.plan_command(**kwargs)
104
+ elif command == "execute":
105
+ return await self.execute_command(**kwargs)
106
+ elif command == "run":
107
+ return await self.run_full_cleanup_command(**kwargs)
108
+ elif command == "help":
109
+ return self._help()
110
+ else:
111
+ return {"error": f"Unknown command: {command}"}
112
+
113
+ async def analyze_command(
114
+ self,
115
+ path: str | Path | None = None,
116
+ pattern: str = "*.md",
117
+ output: str | Path | None = None,
118
+ ) -> dict[str, Any]:
119
+ """
120
+ Analyze project structure for cleanup opportunities.
121
+
122
+ Args:
123
+ path: Path to analyze (defaults to project docs/)
124
+ pattern: File pattern to match (default: *.md)
125
+ output: Optional output file for analysis report
126
+
127
+ Returns:
128
+ Analysis report with duplicates, outdated files, naming issues
129
+ """
130
+ project_root = self._project_root or Path.cwd()
131
+ scan_path = Path(path) if path else project_root / "docs"
132
+
133
+ if not scan_path.exists():
134
+ return {
135
+ "error": f"Path does not exist: {scan_path}",
136
+ "type": "analyze",
137
+ "success": False,
138
+ }
139
+
140
+ try:
141
+ util = self._get_util(project_root)
142
+ report = await util.run_analysis(scan_path, pattern)
143
+
144
+ result = {
145
+ "type": "analyze",
146
+ "success": True,
147
+ "report": {
148
+ "total_files": report.total_files,
149
+ "total_size_mb": report.total_size / 1024 / 1024,
150
+ "duplicate_groups": len(report.duplicates),
151
+ "duplicate_files": report.duplicate_count,
152
+ "potential_savings_kb": report.potential_savings / 1024,
153
+ "outdated_files": len(report.outdated_files),
154
+ "obsolete_files": report.obsolete_file_count,
155
+ "naming_issues": len(report.naming_issues),
156
+ "timestamp": report.timestamp.isoformat(),
157
+ "scan_path": str(report.scan_path),
158
+ },
159
+ "summary": report.to_markdown(),
160
+ "message": f"Analysis complete: {report.total_files} files analyzed",
161
+ }
162
+
163
+ # Save report if output specified
164
+ if output:
165
+ output_path = Path(output)
166
+ output_path.write_text(report.model_dump_json(indent=2))
167
+ result["output_file"] = str(output_path)
168
+
169
+ return result
170
+
171
+ except Exception as e:
172
+ return {
173
+ "type": "analyze",
174
+ "success": False,
175
+ "error": str(e),
176
+ }
177
+
178
+ async def plan_command(
179
+ self,
180
+ analysis_file: str | Path | None = None,
181
+ path: str | Path | None = None,
182
+ pattern: str = "*.md",
183
+ output: str | Path | None = None,
184
+ ) -> dict[str, Any]:
185
+ """
186
+ Generate cleanup plan from analysis.
187
+
188
+ Args:
189
+ analysis_file: Path to analysis report JSON (optional)
190
+ path: Path to analyze if no analysis file (defaults to docs/)
191
+ pattern: File pattern if running fresh analysis
192
+ output: Optional output file for cleanup plan
193
+
194
+ Returns:
195
+ Cleanup plan with prioritized actions
196
+ """
197
+ project_root = self._project_root or Path.cwd()
198
+
199
+ try:
200
+ # Load analysis or run fresh
201
+ if analysis_file:
202
+ analysis_path = Path(analysis_file)
203
+ if not analysis_path.exists():
204
+ return {
205
+ "error": f"Analysis file not found: {analysis_path}",
206
+ "type": "plan",
207
+ "success": False,
208
+ }
209
+ analysis = AnalysisReport.model_validate_json(analysis_path.read_text())
210
+ else:
211
+ scan_path = Path(path) if path else project_root / "docs"
212
+ util = self._get_util(project_root)
213
+ analysis = await util.run_analysis(scan_path, pattern)
214
+
215
+ # Generate plan
216
+ util = self._get_util(project_root)
217
+ plan = util.run_planning(analysis)
218
+
219
+ result = {
220
+ "type": "plan",
221
+ "success": True,
222
+ "plan": {
223
+ "total_actions": len(plan.actions),
224
+ "high_priority": plan.high_priority_count,
225
+ "medium_priority": plan.medium_priority_count,
226
+ "low_priority": plan.low_priority_count,
227
+ "estimated_savings_mb": plan.estimated_savings / 1024 / 1024,
228
+ "estimated_file_reduction": f"{plan.estimated_file_reduction:.1f}%",
229
+ "created_at": plan.created_at.isoformat(),
230
+ },
231
+ "actions_preview": [
232
+ {
233
+ "type": str(a.action_type),
234
+ "files": [str(f) for f in a.source_files],
235
+ "target": str(a.target_path) if a.target_path else None,
236
+ "rationale": a.rationale,
237
+ "priority": a.priority,
238
+ "safety": str(a.safety_level),
239
+ "requires_confirmation": a.requires_confirmation,
240
+ }
241
+ for a in plan.actions[:10] # Preview first 10
242
+ ],
243
+ "summary": plan.to_markdown(),
244
+ "message": f"Plan generated: {len(plan.actions)} actions",
245
+ }
246
+
247
+ # Save plan if output specified
248
+ if output:
249
+ output_path = Path(output)
250
+ output_path.write_text(plan.model_dump_json(indent=2))
251
+ result["output_file"] = str(output_path)
252
+
253
+ return result
254
+
255
+ except Exception as e:
256
+ return {
257
+ "type": "plan",
258
+ "success": False,
259
+ "error": str(e),
260
+ }
261
+
262
+ async def execute_command(
263
+ self,
264
+ plan_file: str | Path | None = None,
265
+ path: str | Path | None = None,
266
+ pattern: str = "*.md",
267
+ dry_run: bool | None = None,
268
+ backup: bool | None = None,
269
+ ) -> dict[str, Any]:
270
+ """
271
+ Execute cleanup plan.
272
+
273
+ Args:
274
+ plan_file: Path to cleanup plan JSON (optional)
275
+ path: Path to analyze if no plan file
276
+ pattern: File pattern if running fresh
277
+ dry_run: Preview changes without executing (default: True)
278
+ backup: Create backup before execution (default: True)
279
+
280
+ Returns:
281
+ Execution report with results
282
+ """
283
+ project_root = self._project_root or Path.cwd()
284
+
285
+ # Use defaults if not specified
286
+ if dry_run is None:
287
+ dry_run = self.dry_run_default
288
+ if backup is None:
289
+ backup = self.backup_enabled
290
+
291
+ try:
292
+ # Load plan or generate fresh
293
+ if plan_file:
294
+ plan_path = Path(plan_file)
295
+ if not plan_path.exists():
296
+ return {
297
+ "error": f"Plan file not found: {plan_path}",
298
+ "type": "execute",
299
+ "success": False,
300
+ }
301
+ plan = CleanupPlan.model_validate_json(plan_path.read_text())
302
+ else:
303
+ # Run analysis and planning first
304
+ scan_path = Path(path) if path else project_root / "docs"
305
+ util = self._get_util(project_root)
306
+ analysis = await util.run_analysis(scan_path, pattern)
307
+ plan = util.run_planning(analysis)
308
+
309
+ # Execute plan
310
+ util = self._get_util(project_root)
311
+ report = await util.run_execution(plan, dry_run=dry_run, create_backup=backup)
312
+
313
+ result = {
314
+ "type": "execute",
315
+ "success": True,
316
+ "dry_run": report.dry_run,
317
+ "report": {
318
+ "total_operations": len(report.operations),
319
+ "successful": report.success_count,
320
+ "failed": report.failure_count,
321
+ "files_deleted": report.files_deleted,
322
+ "files_moved": report.files_moved,
323
+ "files_renamed": report.files_renamed,
324
+ "files_modified": report.files_modified,
325
+ "duration_seconds": report.duration_seconds,
326
+ "backup_location": str(report.backup_location) if report.backup_location else None,
327
+ },
328
+ "summary": report.to_markdown(),
329
+ "message": (
330
+ f"{'Dry run' if dry_run else 'Execution'} complete: "
331
+ f"{report.success_count} successful, {report.failure_count} failed"
332
+ ),
333
+ }
334
+
335
+ return result
336
+
337
+ except Exception as e:
338
+ return {
339
+ "type": "execute",
340
+ "success": False,
341
+ "error": str(e),
342
+ }
343
+
344
+ async def run_full_cleanup_command(
345
+ self,
346
+ path: str | Path | None = None,
347
+ pattern: str = "*.md",
348
+ dry_run: bool | None = None,
349
+ backup: bool | None = None,
350
+ ) -> dict[str, Any]:
351
+ """
352
+ Run full cleanup workflow (analyze, plan, execute).
353
+
354
+ Args:
355
+ path: Path to analyze (defaults to docs/)
356
+ pattern: File pattern to match
357
+ dry_run: Preview changes without executing (default: True)
358
+ backup: Create backup before execution (default: True)
359
+
360
+ Returns:
361
+ Combined report with analysis, plan, and execution results
362
+ """
363
+ project_root = self._project_root or Path.cwd()
364
+ scan_path = Path(path) if path else project_root / "docs"
365
+
366
+ # Use defaults if not specified
367
+ if dry_run is None:
368
+ dry_run = self.dry_run_default
369
+ if backup is None:
370
+ backup = self.backup_enabled
371
+
372
+ if not scan_path.exists():
373
+ return {
374
+ "error": f"Path does not exist: {scan_path}",
375
+ "type": "run",
376
+ "success": False,
377
+ }
378
+
379
+ try:
380
+ util = self._get_util(project_root)
381
+ analysis, plan, execution = await util.run_full_cleanup(
382
+ scan_path,
383
+ pattern,
384
+ dry_run=dry_run,
385
+ create_backup=backup,
386
+ )
387
+
388
+ return {
389
+ "type": "run",
390
+ "success": True,
391
+ "dry_run": execution.dry_run,
392
+ "analysis": {
393
+ "total_files": analysis.total_files,
394
+ "duplicates": analysis.duplicate_count,
395
+ "outdated": len(analysis.outdated_files),
396
+ "naming_issues": len(analysis.naming_issues),
397
+ },
398
+ "plan": {
399
+ "total_actions": len(plan.actions),
400
+ "estimated_savings_mb": plan.estimated_savings / 1024 / 1024,
401
+ },
402
+ "execution": {
403
+ "successful": execution.success_count,
404
+ "failed": execution.failure_count,
405
+ "files_modified": execution.files_modified,
406
+ "backup_location": str(execution.backup_location) if execution.backup_location else None,
407
+ },
408
+ "summary": "\n".join([
409
+ "=" * 60,
410
+ analysis.to_markdown(),
411
+ "=" * 60,
412
+ plan.to_markdown(),
413
+ "=" * 60,
414
+ execution.to_markdown(),
415
+ ]),
416
+ "message": (
417
+ f"{'Dry run' if dry_run else 'Cleanup'} complete: "
418
+ f"{analysis.total_files} files analyzed, "
419
+ f"{len(plan.actions)} actions, "
420
+ f"{execution.success_count} successful"
421
+ ),
422
+ }
423
+
424
+ except Exception as e:
425
+ return {
426
+ "type": "run",
427
+ "success": False,
428
+ "error": str(e),
429
+ }
430
+
431
+ def _help(self) -> dict[str, Any]:
432
+ """Return help information for Cleanup Agent."""
433
+ examples = [
434
+ " *analyze --path ./docs --pattern '*.md'",
435
+ " *plan --analysis-file analysis.json --output cleanup-plan.json",
436
+ " *execute --plan-file cleanup-plan.json --dry-run",
437
+ " *run --path ./docs --dry-run --backup",
438
+ ]
439
+ help_text = "\n".join([self.format_help(), "\nExamples:", *examples])
440
+ return {"type": "help", "content": help_text}
441
+
442
+ async def close(self):
443
+ """Close agent and clean up resources."""
444
+ self._util = None
445
+ self._analyzer = None
@@ -1733,7 +1733,7 @@ Create a comprehensive, context-aware enhanced prompt that:
1733
1733
 
1734
1734
  try:
1735
1735
  from ...core.mal import MAL, MALDisabledInCursorModeError
1736
-
1736
+
1737
1737
  mal_config = self.config.mal if self.config else None
1738
1738
  if mal_config and mal_config.enabled:
1739
1739
  mal = MAL(config=mal_config)
@@ -1770,7 +1770,7 @@ Create a comprehensive, context-aware enhanced prompt that:
1770
1770
  "mode": "structured",
1771
1771
  },
1772
1772
  }
1773
- except MALDisabledInCursorModeError:
1773
+ except (ImportError, ModuleNotFoundError):
1774
1774
  # Should not happen in headless mode, but handle gracefully
1775
1775
  logger.warning("MAL disabled error in headless mode")
1776
1776
  return {
@@ -625,16 +625,16 @@ class ImplementerAgent(BaseAgent, ExpertSupportMixin):
625
625
  async def _detect_api_client_pattern(self, specification: str, context: str | None = None) -> bool:
626
626
  """
627
627
  Detect if specification/context indicates an HTTP/API client implementation.
628
-
628
+
629
629
  Checks for common patterns that indicate API client code:
630
630
  - Keywords: "API client", "OAuth2", "refresh token", "external API", "HTTP client"
631
- - Authentication patterns: "Bearer", "Zoho", "token", "authentication"
631
+ - Authentication patterns: "Bearer", "token", "authentication"
632
632
  - API patterns: "REST API", "API integration", "third-party API"
633
-
633
+
634
634
  Args:
635
635
  specification: Code specification/description
636
636
  context: Optional context code
637
-
637
+
638
638
  Returns:
639
639
  True if specification appears to be for an API client, False otherwise
640
640
  """
@@ -646,13 +646,14 @@ class ImplementerAgent(BaseAgent, ExpertSupportMixin):
646
646
  if context:
647
647
  text_to_analyze += " " + context.lower()
648
648
 
649
- # API client keywords
649
+ # API client keywords (enhanced)
650
650
  api_client_keywords = [
651
651
  "api client",
652
652
  "http client",
653
653
  "rest client",
654
654
  "oauth2",
655
655
  "oauth 2",
656
+ "oauth",
656
657
  "refresh token",
657
658
  "access token",
658
659
  "external api",
@@ -660,31 +661,52 @@ class ImplementerAgent(BaseAgent, ExpertSupportMixin):
660
661
  "api integration",
661
662
  "rest api",
662
663
  "api wrapper",
664
+ "graphql client",
665
+ "graphql api",
666
+ "websocket client",
667
+ "mqtt client",
668
+ "grpc client",
663
669
  ]
664
-
665
- # Authentication keywords
670
+
671
+ # Authentication keywords (enhanced with OAuth2 patterns)
666
672
  auth_keywords = [
667
673
  "bearer",
668
- "zoho",
669
- "site24x7",
670
- "okta",
671
- "salesforce",
674
+ "token", # General token (covers access, refresh, bearer, etc.)
672
675
  "authentication",
673
676
  "authorization",
674
677
  "api key",
678
+ "api_key",
679
+ "apikey",
675
680
  "client_id",
676
681
  "client_secret",
677
682
  "token_url",
678
683
  "api_base_url",
684
+ "jwt",
685
+ "id_token",
686
+ "grant_type",
687
+ "authorization_code",
688
+ "client_credentials",
689
+ "credentials", # General credentials
690
+ "auth", # Short form
679
691
  ]
680
-
681
- # Structure keywords
692
+
693
+ # Structure keywords (enhanced with framework patterns)
682
694
  structure_keywords = [
683
695
  "class.*client",
684
696
  "get method",
685
697
  "post method",
698
+ "put method",
699
+ "delete method",
700
+ "patch method",
686
701
  "api endpoint",
702
+ "endpoint", # General endpoint
703
+ "rest endpoint",
687
704
  "make request",
705
+ "http request",
706
+ "fastapi",
707
+ "django rest",
708
+ "api route",
709
+ "router", # General router
688
710
  ]
689
711
 
690
712
  # Check for API client keywords
@@ -3218,16 +3218,16 @@ class ReviewerAgent(BaseAgent, ExpertSupportMixin):
3218
3218
  async def _detect_api_client_pattern(self, code: str) -> bool:
3219
3219
  """
3220
3220
  Detect if code appears to be an HTTP/API client.
3221
-
3221
+
3222
3222
  Checks for common patterns that indicate API client code:
3223
3223
  - HTTP client libraries (requests, httpx)
3224
- - Authentication headers (Authorization, Bearer, Zoho-oauthtoken, X-API-Key)
3224
+ - Authentication headers (Authorization, Bearer, X-API-Key)
3225
3225
  - Token management (refresh_token, access_token, token_url)
3226
3226
  - API client structure (class Client, get/post methods, api_base_url)
3227
-
3227
+
3228
3228
  Args:
3229
3229
  code: Code content to analyze
3230
-
3230
+
3231
3231
  Returns:
3232
3232
  True if code appears to be an API client, False otherwise
3233
3233
  """
@@ -3243,18 +3243,20 @@ class ReviewerAgent(BaseAgent, ExpertSupportMixin):
3243
3243
  "requests.put",
3244
3244
  "requests.delete",
3245
3245
  "httpx.client",
3246
- "httpx.asynccclient",
3246
+ "httpx.asyncclient",
3247
3247
  "httpx.get",
3248
3248
  "httpx.post",
3249
3249
  "urllib.request",
3250
3250
  "urllib3",
3251
+ "aiohttp",
3252
+ "fetch(", # JavaScript/TypeScript
3253
+ "axios", # JavaScript/TypeScript
3251
3254
  ]
3252
-
3253
- # Authentication indicators
3255
+
3256
+ # Authentication indicators (including OAuth2)
3254
3257
  auth_indicators = [
3255
3258
  "authorization:",
3256
3259
  "bearer",
3257
- "zoho-oauthtoken",
3258
3260
  "x-api-key",
3259
3261
  "api_key",
3260
3262
  "api-key",
@@ -3263,6 +3265,13 @@ class ReviewerAgent(BaseAgent, ExpertSupportMixin):
3263
3265
  "token_url",
3264
3266
  "client_id",
3265
3267
  "client_secret",
3268
+ "oauth2",
3269
+ "oauth",
3270
+ "grant_type",
3271
+ "authorization_code",
3272
+ "client_credentials",
3273
+ "jwt",
3274
+ "id_token",
3266
3275
  ]
3267
3276
 
3268
3277
  # API client structure indicators
@@ -3270,14 +3279,25 @@ class ReviewerAgent(BaseAgent, ExpertSupportMixin):
3270
3279
  "api_base_url",
3271
3280
  "base_url",
3272
3281
  "api_url",
3282
+ "endpoint",
3283
+ "/api/",
3284
+ "rest",
3285
+ "graphql",
3273
3286
  "class.*client",
3287
+ "class.*api",
3274
3288
  "def get(",
3275
3289
  "def post(",
3276
3290
  "def put(",
3277
3291
  "def delete(",
3292
+ "def patch(",
3278
3293
  "def _headers",
3279
3294
  "def _get_access_token",
3280
3295
  "def _refresh",
3296
+ "@app.get", # FastAPI
3297
+ "@app.post", # FastAPI
3298
+ "@router", # FastAPI router
3299
+ "apiview", # Django REST
3300
+ "viewset", # Django REST
3281
3301
  ]
3282
3302
 
3283
3303
  # Check for HTTP client usage
@@ -3289,8 +3309,21 @@ class ReviewerAgent(BaseAgent, ExpertSupportMixin):
3289
3309
  # Check for API client structure
3290
3310
  has_structure = any(indicator in code_lower for indicator in structure_indicators)
3291
3311
 
3292
- # Code is likely an API client if it has HTTP client usage AND (auth OR structure)
3293
- return has_http_client and (has_auth or has_structure)
3312
+ # Server-side REST framework indicators (FastAPI, Django REST, etc.)
3313
+ server_api_indicators = [
3314
+ "@app.get",
3315
+ "@app.post",
3316
+ "@router",
3317
+ "apiview",
3318
+ "viewset",
3319
+ ]
3320
+ has_server_api = any(indicator in code_lower for indicator in server_api_indicators)
3321
+
3322
+ # Code is likely an API client/server if:
3323
+ # 1. Has HTTP client usage AND (auth OR structure), OR
3324
+ # 2. Has auth AND structure (e.g., OAuth2 client without explicit http calls yet), OR
3325
+ # 3. Has server-side REST framework patterns (FastAPI, Django REST)
3326
+ return (has_http_client and (has_auth or has_structure)) or (has_auth and has_structure) or has_server_api
3294
3327
 
3295
3328
  def _score_yaml_file(self, file_path: Path, code: str) -> dict[str, Any]:
3296
3329
  """