anvil-dev-framework 0.1.7 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -13
- package/VERSION +1 -1
- package/docs/ANV-263-hook-logging-investigation.md +116 -0
- package/docs/command-reference.md +301 -1
- package/docs/session-workflow.md +62 -9
- package/docs/system-architecture.md +569 -0
- package/global/commands/anvil-settings.md +3 -1
- package/global/commands/audit.md +163 -0
- package/global/commands/checklist.md +180 -0
- package/global/commands/efficiency.md +356 -0
- package/global/commands/evidence.md +99 -32
- package/global/commands/insights.md +101 -3
- package/global/commands/patterns.md +115 -0
- package/global/commands/ralph.md +47 -1
- package/global/commands/token-budget.md +214 -0
- package/global/lib/__pycache__/context_optimizer.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/git_utils.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/issue_models.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/linear_provider.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/optimization_applier.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/ralph_state.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/token_analyzer.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/token_metrics.cpython-314.pyc +0 -0
- package/global/lib/context_optimizer.py +323 -0
- package/global/lib/linear_provider.py +210 -16
- package/global/lib/optimization_applier.py +582 -0
- package/global/lib/ralph_state.py +264 -24
- package/global/lib/token_analyzer.py +1357 -0
- package/global/lib/token_metrics.py +873 -0
- package/global/tests/__pycache__/test_context_optimizer.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_doc_coverage.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_git_utils.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_issue_models.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_linear_filtering.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_linear_provider.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_local_provider.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_optimization_applier.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_token_analyzer.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_token_analyzer_phase6.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_token_metrics.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/test_context_optimizer.py +321 -0
- package/global/tests/test_linear_filtering.py +319 -0
- package/global/tests/test_linear_provider.py +40 -1
- package/global/tests/test_optimization_applier.py +508 -0
- package/global/tests/test_token_analyzer.py +735 -0
- package/global/tests/test_token_analyzer_phase6.py +537 -0
- package/global/tests/test_token_metrics.py +791 -0
- package/package.json +1 -1
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context Optimizer Service for Anvil Framework.
|
|
3
|
+
|
|
4
|
+
Provides intelligent context loading to reduce initial token consumption
|
|
5
|
+
while ensuring relevant information is available when needed.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Trigger keyword detection for on-demand command loading
|
|
9
|
+
- Session-level caching to avoid redundant loads
|
|
10
|
+
- Integration with token_metrics for tracking
|
|
11
|
+
- Compressed CLAUDE.md format support
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from context_optimizer import ContextOptimizer, get_optimizer
|
|
15
|
+
|
|
16
|
+
optimizer = get_optimizer()
|
|
17
|
+
suggestions = optimizer.detect_triggers("help with anti-patterns")
|
|
18
|
+
optimizer.mark_loaded("patterns")
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import re
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Optional, Dict, List, Set, Any
|
|
24
|
+
from dataclasses import dataclass, field
|
|
25
|
+
from datetime import datetime
|
|
26
|
+
|
|
27
|
+
# Try to import token_metrics for integration
|
|
28
|
+
try:
|
|
29
|
+
from token_metrics import TokenMetrics, get_metrics
|
|
30
|
+
METRICS_AVAILABLE = True
|
|
31
|
+
except ImportError:
|
|
32
|
+
METRICS_AVAILABLE = False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class TriggerRule:
|
|
37
|
+
"""A rule mapping keywords to an on-demand command."""
|
|
38
|
+
command: str
|
|
39
|
+
keywords: List[str]
|
|
40
|
+
description: str
|
|
41
|
+
estimated_tokens: int
|
|
42
|
+
priority: int = 2 # 1=high, 2=medium, 3=low
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class LoadSuggestion:
|
|
47
|
+
"""A suggestion to load an on-demand command."""
|
|
48
|
+
command: str
|
|
49
|
+
reason: str
|
|
50
|
+
estimated_tokens: int
|
|
51
|
+
priority: int
|
|
52
|
+
keywords_matched: List[str]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# Default trigger rules mapping keywords to on-demand commands
|
|
56
|
+
DEFAULT_TRIGGER_RULES: List[TriggerRule] = [
|
|
57
|
+
TriggerRule(
|
|
58
|
+
command="/patterns",
|
|
59
|
+
keywords=[
|
|
60
|
+
"anti-pattern", "antipattern", "bad practice", "avoid",
|
|
61
|
+
"mistake", "wrong", "should not", "shouldn't",
|
|
62
|
+
"learned pattern", "code pattern", "confidence",
|
|
63
|
+
"rule of three", "premature abstraction", "over-engineering"
|
|
64
|
+
],
|
|
65
|
+
description="Anti-patterns, learned patterns, and code patterns",
|
|
66
|
+
estimated_tokens=1500,
|
|
67
|
+
priority=1
|
|
68
|
+
),
|
|
69
|
+
TriggerRule(
|
|
70
|
+
command="/checklist",
|
|
71
|
+
keywords=[
|
|
72
|
+
"checklist", "quality", "before pr", "before commit",
|
|
73
|
+
"verification", "check", "validate", "pre-merge",
|
|
74
|
+
"lint", "test", "typecheck", "evidence"
|
|
75
|
+
],
|
|
76
|
+
description="Quality gates and implementation checklists",
|
|
77
|
+
estimated_tokens=1200,
|
|
78
|
+
priority=1
|
|
79
|
+
),
|
|
80
|
+
TriggerRule(
|
|
81
|
+
command="/audit",
|
|
82
|
+
keywords=[
|
|
83
|
+
"token", "context", "usage", "consumption",
|
|
84
|
+
"how much", "budget", "waste", "efficiency"
|
|
85
|
+
],
|
|
86
|
+
description="Real-time token consumption analysis",
|
|
87
|
+
estimated_tokens=800,
|
|
88
|
+
priority=2
|
|
89
|
+
),
|
|
90
|
+
TriggerRule(
|
|
91
|
+
command="/explore",
|
|
92
|
+
keywords=[
|
|
93
|
+
"explore", "discovery", "investigate", "understand",
|
|
94
|
+
"how does", "where is", "find", "search"
|
|
95
|
+
],
|
|
96
|
+
description="Discovery phase for new feature work",
|
|
97
|
+
estimated_tokens=600,
|
|
98
|
+
priority=2
|
|
99
|
+
),
|
|
100
|
+
TriggerRule(
|
|
101
|
+
command="/ralph",
|
|
102
|
+
keywords=[
|
|
103
|
+
"ralph", "autonomous", "unattended", "overnight",
|
|
104
|
+
"long running", "migration", "refactor"
|
|
105
|
+
],
|
|
106
|
+
description="Ralph Wiggum autonomous execution mode",
|
|
107
|
+
estimated_tokens=1000,
|
|
108
|
+
priority=3
|
|
109
|
+
),
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class ContextOptimizer:
|
|
114
|
+
"""
|
|
115
|
+
Intelligent context loading optimizer.
|
|
116
|
+
|
|
117
|
+
Tracks loaded commands and suggests relevant on-demand loads
|
|
118
|
+
based on trigger keywords in user prompts.
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
def __init__(self, custom_rules: Optional[List[TriggerRule]] = None):
|
|
122
|
+
"""Initialize the optimizer."""
|
|
123
|
+
# Copy rules to avoid mutating DEFAULT_TRIGGER_RULES
|
|
124
|
+
self.rules = list(custom_rules) if custom_rules is not None else list(DEFAULT_TRIGGER_RULES)
|
|
125
|
+
self._loaded_commands: Set[str] = set()
|
|
126
|
+
self._session_start = datetime.now()
|
|
127
|
+
self._suggestion_history: List[LoadSuggestion] = []
|
|
128
|
+
self._metrics: Optional[Any] = None
|
|
129
|
+
|
|
130
|
+
# Try to connect to metrics
|
|
131
|
+
if METRICS_AVAILABLE:
|
|
132
|
+
try:
|
|
133
|
+
self._metrics = get_metrics()
|
|
134
|
+
except Exception:
|
|
135
|
+
pass
|
|
136
|
+
|
|
137
|
+
def detect_triggers(
|
|
138
|
+
self,
|
|
139
|
+
text: str,
|
|
140
|
+
include_loaded: bool = False
|
|
141
|
+
) -> List[LoadSuggestion]:
|
|
142
|
+
"""
|
|
143
|
+
Detect trigger keywords in text and return load suggestions.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
text: The text to analyze (usually user prompt)
|
|
147
|
+
include_loaded: If True, include already-loaded commands
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
List of LoadSuggestion sorted by priority
|
|
151
|
+
"""
|
|
152
|
+
text_lower = text.lower()
|
|
153
|
+
suggestions: List[LoadSuggestion] = []
|
|
154
|
+
|
|
155
|
+
for rule in self.rules:
|
|
156
|
+
# Skip if already loaded (unless include_loaded)
|
|
157
|
+
if not include_loaded and rule.command in self._loaded_commands:
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
# Find matching keywords
|
|
161
|
+
matched = [kw for kw in rule.keywords if kw in text_lower]
|
|
162
|
+
|
|
163
|
+
if matched:
|
|
164
|
+
suggestions.append(LoadSuggestion(
|
|
165
|
+
command=rule.command,
|
|
166
|
+
reason=rule.description,
|
|
167
|
+
estimated_tokens=rule.estimated_tokens,
|
|
168
|
+
priority=rule.priority,
|
|
169
|
+
keywords_matched=matched
|
|
170
|
+
))
|
|
171
|
+
|
|
172
|
+
# Sort by priority (lower = higher priority), then by match count
|
|
173
|
+
suggestions.sort(key=lambda s: (s.priority, -len(s.keywords_matched)))
|
|
174
|
+
|
|
175
|
+
return suggestions
|
|
176
|
+
|
|
177
|
+
def mark_loaded(self, command: str, tokens: Optional[int] = None) -> None:
|
|
178
|
+
"""
|
|
179
|
+
Mark a command as loaded in this session.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
command: The command name (e.g., "/patterns")
|
|
183
|
+
tokens: Optional token count to record in metrics
|
|
184
|
+
"""
|
|
185
|
+
# Normalize command name
|
|
186
|
+
if not command.startswith("/"):
|
|
187
|
+
command = "/" + command
|
|
188
|
+
|
|
189
|
+
self._loaded_commands.add(command)
|
|
190
|
+
|
|
191
|
+
# Record in metrics if available
|
|
192
|
+
if tokens and self._metrics:
|
|
193
|
+
try:
|
|
194
|
+
self._metrics.record_component_load(
|
|
195
|
+
component_type="command",
|
|
196
|
+
component_name=command.lstrip("/"),
|
|
197
|
+
tokens=tokens,
|
|
198
|
+
source=f"global/commands/{command.lstrip('/')}.md"
|
|
199
|
+
)
|
|
200
|
+
except Exception:
|
|
201
|
+
pass
|
|
202
|
+
|
|
203
|
+
def is_loaded(self, command: str) -> bool:
|
|
204
|
+
"""Check if a command has been loaded in this session."""
|
|
205
|
+
if not command.startswith("/"):
|
|
206
|
+
command = "/" + command
|
|
207
|
+
return command in self._loaded_commands
|
|
208
|
+
|
|
209
|
+
def get_loaded_commands(self) -> List[str]:
|
|
210
|
+
"""Get list of commands loaded in this session."""
|
|
211
|
+
return list(self._loaded_commands)
|
|
212
|
+
|
|
213
|
+
def get_estimated_savings(self) -> Dict[str, Any]:
|
|
214
|
+
"""
|
|
215
|
+
Estimate token savings from deferred loading.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Dict with savings metrics
|
|
219
|
+
"""
|
|
220
|
+
total_deferred = sum(
|
|
221
|
+
rule.estimated_tokens
|
|
222
|
+
for rule in self.rules
|
|
223
|
+
if rule.command not in self._loaded_commands
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
total_loaded = sum(
|
|
227
|
+
rule.estimated_tokens
|
|
228
|
+
for rule in self.rules
|
|
229
|
+
if rule.command in self._loaded_commands
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
total_available = sum(rule.estimated_tokens for rule in self.rules)
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
"total_available_tokens": total_available,
|
|
236
|
+
"loaded_tokens": total_loaded,
|
|
237
|
+
"deferred_tokens": total_deferred,
|
|
238
|
+
"commands_loaded": len(self._loaded_commands),
|
|
239
|
+
"commands_deferred": len(self.rules) - len(self._loaded_commands),
|
|
240
|
+
"savings_percent": round(
|
|
241
|
+
(total_deferred / total_available * 100) if total_available else 0, 1
|
|
242
|
+
)
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
def format_suggestions(
|
|
246
|
+
self,
|
|
247
|
+
suggestions: List[LoadSuggestion],
|
|
248
|
+
max_suggestions: int = 3
|
|
249
|
+
) -> str:
|
|
250
|
+
"""
|
|
251
|
+
Format load suggestions as a readable message.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
suggestions: List of suggestions to format
|
|
255
|
+
max_suggestions: Maximum number to include
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Formatted string for display
|
|
259
|
+
"""
|
|
260
|
+
if not suggestions:
|
|
261
|
+
return ""
|
|
262
|
+
|
|
263
|
+
lines = ["**Relevant commands available:**"]
|
|
264
|
+
|
|
265
|
+
for suggestion in suggestions[:max_suggestions]:
|
|
266
|
+
priority_icon = {1: "!", 2: "-", 3: "o"}.get(suggestion.priority, "-")
|
|
267
|
+
lines.append(
|
|
268
|
+
f" {priority_icon} `{suggestion.command}` — {suggestion.reason} "
|
|
269
|
+
f"(~{suggestion.estimated_tokens} tokens)"
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
if len(suggestions) > max_suggestions:
|
|
273
|
+
lines.append(f" ... and {len(suggestions) - max_suggestions} more")
|
|
274
|
+
|
|
275
|
+
return "\n".join(lines)
|
|
276
|
+
|
|
277
|
+
def add_rule(self, rule: TriggerRule) -> None:
|
|
278
|
+
"""Add a custom trigger rule."""
|
|
279
|
+
self.rules.append(rule)
|
|
280
|
+
|
|
281
|
+
def reset_session(self) -> None:
|
|
282
|
+
"""Reset session state (clear loaded commands)."""
|
|
283
|
+
self._loaded_commands.clear()
|
|
284
|
+
self._suggestion_history.clear()
|
|
285
|
+
self._session_start = datetime.now()
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
# Global singleton instance
|
|
289
|
+
_optimizer_instance: Optional[ContextOptimizer] = None
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
def get_optimizer() -> ContextOptimizer:
|
|
293
|
+
"""Get the global ContextOptimizer instance."""
|
|
294
|
+
global _optimizer_instance
|
|
295
|
+
if _optimizer_instance is None:
|
|
296
|
+
_optimizer_instance = ContextOptimizer()
|
|
297
|
+
return _optimizer_instance
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def detect_triggers(text: str) -> List[LoadSuggestion]:
|
|
301
|
+
"""Convenience function to detect triggers using global instance."""
|
|
302
|
+
return get_optimizer().detect_triggers(text)
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def format_trigger_table() -> str:
|
|
306
|
+
"""
|
|
307
|
+
Format a compact trigger table for CLAUDE.md.
|
|
308
|
+
|
|
309
|
+
Returns a markdown table showing trigger keywords and their commands.
|
|
310
|
+
"""
|
|
311
|
+
lines = [
|
|
312
|
+
"| Trigger | Command | Description |",
|
|
313
|
+
"|---------|---------|-------------|"
|
|
314
|
+
]
|
|
315
|
+
|
|
316
|
+
for rule in DEFAULT_TRIGGER_RULES:
|
|
317
|
+
# Show top 3 keywords
|
|
318
|
+
keywords = ", ".join(rule.keywords[:3])
|
|
319
|
+
if len(rule.keywords) > 3:
|
|
320
|
+
keywords += "..."
|
|
321
|
+
lines.append(f"| {keywords} | `{rule.command}` | {rule.description} |")
|
|
322
|
+
|
|
323
|
+
return "\n".join(lines)
|
|
@@ -4,15 +4,24 @@ LinearProvider - Linear API adapter for Anvil Framework.
|
|
|
4
4
|
Wraps the Linear GraphQL API to provide the IssueProvider interface.
|
|
5
5
|
Uses the LINEAR_API_KEY environment variable for authentication.
|
|
6
6
|
|
|
7
|
+
Features:
|
|
8
|
+
- Smart result filtering for token efficiency (Phase 5)
|
|
9
|
+
- Status-based, priority-based, and date-based filtering
|
|
10
|
+
- Compact mode for reduced token consumption
|
|
11
|
+
|
|
7
12
|
Usage:
|
|
8
13
|
from linear_provider import LinearProvider
|
|
9
14
|
|
|
10
15
|
provider = LinearProvider(team_key="ANV")
|
|
11
16
|
issues = provider.list_issues(status=IssueStatus.TODO)
|
|
17
|
+
|
|
18
|
+
# Smart filtering for token efficiency
|
|
19
|
+
issues = provider.list_issues_smart(max_results=10, days_back=7)
|
|
12
20
|
"""
|
|
13
21
|
|
|
14
22
|
import os
|
|
15
|
-
from
|
|
23
|
+
from dataclasses import dataclass, field
|
|
24
|
+
from datetime import datetime, timezone, timedelta
|
|
16
25
|
from typing import Optional
|
|
17
26
|
import json
|
|
18
27
|
|
|
@@ -29,6 +38,21 @@ except ImportError:
|
|
|
29
38
|
from issue_provider import BaseProvider
|
|
30
39
|
|
|
31
40
|
|
|
41
|
+
@dataclass
|
|
42
|
+
class FilterOptions:
|
|
43
|
+
"""
|
|
44
|
+
Smart filtering options for token-efficient Linear queries.
|
|
45
|
+
|
|
46
|
+
Used by list_issues_smart() to reduce data returned.
|
|
47
|
+
"""
|
|
48
|
+
max_results: int = 10
|
|
49
|
+
days_back: Optional[int] = 7 # Only issues updated in last N days
|
|
50
|
+
exclude_done: bool = True # Exclude completed/cancelled
|
|
51
|
+
exclude_backlog: bool = False # Exclude backlog items
|
|
52
|
+
priority_threshold: Optional[Priority] = None # Only P0-P2, etc.
|
|
53
|
+
compact_mode: bool = False # Return minimal fields for display
|
|
54
|
+
|
|
55
|
+
|
|
32
56
|
class LinearProvider(BaseProvider):
|
|
33
57
|
"""
|
|
34
58
|
Linear API adapter implementing IssueProvider interface.
|
|
@@ -128,9 +152,9 @@ class LinearProvider(BaseProvider):
|
|
|
128
152
|
"Content-Type": "application/json"
|
|
129
153
|
}
|
|
130
154
|
|
|
131
|
-
# Cache for state UUIDs (populated on first use)
|
|
132
|
-
|
|
133
|
-
self.
|
|
155
|
+
# Cache for state UUIDs per team (populated on first use)
|
|
156
|
+
# Structure: {team_id: {state_name_lower: state_data}}
|
|
157
|
+
self._state_cache: dict[str, dict[str, dict]] = {}
|
|
134
158
|
|
|
135
159
|
# Cache for label UUIDs (populated on first use)
|
|
136
160
|
self._label_cache: dict[str, str] = {} # name -> id
|
|
@@ -205,12 +229,19 @@ class LinearProvider(BaseProvider):
|
|
|
205
229
|
|
|
206
230
|
raise ValueError(f"Team not found: {self.team_key}")
|
|
207
231
|
|
|
208
|
-
def _ensure_state_cache(self):
|
|
209
|
-
"""Initialize state cache if needed.
|
|
210
|
-
|
|
232
|
+
def _ensure_state_cache(self, team_id: Optional[str] = None):
|
|
233
|
+
"""Initialize state cache for a team if needed.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
team_id: Team UUID to cache states for. If None, uses provider's team.
|
|
237
|
+
"""
|
|
238
|
+
if team_id is None:
|
|
239
|
+
team_id = self._get_team_id()
|
|
240
|
+
|
|
241
|
+
# Check if already cached for this team
|
|
242
|
+
if team_id in self._state_cache:
|
|
211
243
|
return
|
|
212
244
|
|
|
213
|
-
team_id = self._get_team_id()
|
|
214
245
|
query = """
|
|
215
246
|
query($teamId: String!) {
|
|
216
247
|
team(id: $teamId) {
|
|
@@ -227,22 +258,58 @@ class LinearProvider(BaseProvider):
|
|
|
227
258
|
result = self._query(query, {"teamId": team_id})
|
|
228
259
|
states = result.get("team", {}).get("states", {}).get("nodes", [])
|
|
229
260
|
|
|
261
|
+
# Initialize team's cache
|
|
262
|
+
self._state_cache[team_id] = {}
|
|
230
263
|
for state in states:
|
|
231
264
|
name_lower = state.get("name", "").lower()
|
|
232
|
-
self._state_cache[name_lower] = state
|
|
265
|
+
self._state_cache[team_id][name_lower] = state
|
|
266
|
+
|
|
267
|
+
def _get_state_id(self, status: IssueStatus, team_id: Optional[str] = None) -> Optional[str]:
|
|
268
|
+
"""Get Linear state UUID for an IssueStatus.
|
|
233
269
|
|
|
234
|
-
|
|
270
|
+
Args:
|
|
271
|
+
status: The IssueStatus to look up.
|
|
272
|
+
team_id: Team UUID to get state for. If None, uses provider's team.
|
|
273
|
+
|
|
274
|
+
Returns:
|
|
275
|
+
The state UUID, or None if not found.
|
|
276
|
+
"""
|
|
277
|
+
if team_id is None:
|
|
278
|
+
team_id = self._get_team_id()
|
|
235
279
|
|
|
236
|
-
|
|
237
|
-
"""Get Linear state UUID for an IssueStatus."""
|
|
238
|
-
self._ensure_state_cache()
|
|
280
|
+
self._ensure_state_cache(team_id)
|
|
239
281
|
|
|
240
282
|
state_name = self.STATUS_TO_STATE_NAME.get(status, "").lower()
|
|
241
|
-
|
|
242
|
-
|
|
283
|
+
team_states = self._state_cache.get(team_id, {})
|
|
284
|
+
if state_name in team_states:
|
|
285
|
+
return team_states[state_name].get("id")
|
|
243
286
|
|
|
244
287
|
return None
|
|
245
288
|
|
|
289
|
+
def _get_issue_team_id(self, identifier: str) -> Optional[str]:
|
|
290
|
+
"""Look up an issue's team ID by identifier.
|
|
291
|
+
|
|
292
|
+
Args:
|
|
293
|
+
identifier: Issue identifier (e.g., "ANV-123").
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
The team UUID, or None if issue not found.
|
|
297
|
+
"""
|
|
298
|
+
query = """
|
|
299
|
+
query($id: String!) {
|
|
300
|
+
issue(id: $id) {
|
|
301
|
+
team {
|
|
302
|
+
id
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
"""
|
|
307
|
+
try:
|
|
308
|
+
result = self._query(query, {"id": identifier})
|
|
309
|
+
return result.get("issue", {}).get("team", {}).get("id")
|
|
310
|
+
except Exception:
|
|
311
|
+
return None
|
|
312
|
+
|
|
246
313
|
def _ensure_label_cache(self):
|
|
247
314
|
"""Initialize label cache if needed."""
|
|
248
315
|
if self._label_cache_initialized:
|
|
@@ -416,6 +483,131 @@ class LinearProvider(BaseProvider):
|
|
|
416
483
|
|
|
417
484
|
return issues[:limit]
|
|
418
485
|
|
|
486
|
+
def list_issues_smart(
|
|
487
|
+
self,
|
|
488
|
+
options: Optional[FilterOptions] = None,
|
|
489
|
+
project: Optional[str] = None
|
|
490
|
+
) -> list[Issue]:
|
|
491
|
+
"""
|
|
492
|
+
List issues with smart filtering for token efficiency.
|
|
493
|
+
|
|
494
|
+
This method applies intelligent filtering to reduce the number of
|
|
495
|
+
issues returned, minimizing token consumption when displaying results.
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
options: FilterOptions instance (defaults to sensible settings)
|
|
499
|
+
project: Optional project filter
|
|
500
|
+
|
|
501
|
+
Returns:
|
|
502
|
+
Filtered list of Issue objects, sorted by relevance.
|
|
503
|
+
"""
|
|
504
|
+
if options is None:
|
|
505
|
+
options = FilterOptions()
|
|
506
|
+
|
|
507
|
+
# Fetch more than needed to allow for filtering
|
|
508
|
+
fetch_limit = min(options.max_results * 3, 100)
|
|
509
|
+
all_issues = self.list_issues(project=project, limit=fetch_limit)
|
|
510
|
+
|
|
511
|
+
return self._apply_filters(all_issues, options)
|
|
512
|
+
|
|
513
|
+
def _apply_filters(
|
|
514
|
+
self,
|
|
515
|
+
issues: list[Issue],
|
|
516
|
+
options: FilterOptions
|
|
517
|
+
) -> list[Issue]:
|
|
518
|
+
"""
|
|
519
|
+
Apply smart filtering to a list of issues.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
issues: List of issues to filter
|
|
523
|
+
options: Filtering options
|
|
524
|
+
|
|
525
|
+
Returns:
|
|
526
|
+
Filtered and sorted list of issues
|
|
527
|
+
"""
|
|
528
|
+
filtered = issues
|
|
529
|
+
|
|
530
|
+
# Date-based filtering
|
|
531
|
+
if options.days_back is not None:
|
|
532
|
+
cutoff = datetime.now(timezone.utc) - timedelta(days=options.days_back)
|
|
533
|
+
filtered = [
|
|
534
|
+
i for i in filtered
|
|
535
|
+
if i.updated_at and i.updated_at >= cutoff
|
|
536
|
+
]
|
|
537
|
+
|
|
538
|
+
# Status filtering
|
|
539
|
+
if options.exclude_done:
|
|
540
|
+
filtered = [
|
|
541
|
+
i for i in filtered
|
|
542
|
+
if i.status not in {IssueStatus.DONE, IssueStatus.CANCELLED}
|
|
543
|
+
]
|
|
544
|
+
|
|
545
|
+
if options.exclude_backlog:
|
|
546
|
+
filtered = [
|
|
547
|
+
i for i in filtered
|
|
548
|
+
if i.status != IssueStatus.BACKLOG
|
|
549
|
+
]
|
|
550
|
+
|
|
551
|
+
# Priority filtering
|
|
552
|
+
if options.priority_threshold:
|
|
553
|
+
threshold_value = options.priority_threshold.value
|
|
554
|
+
filtered = [
|
|
555
|
+
i for i in filtered
|
|
556
|
+
if i.priority.value <= threshold_value
|
|
557
|
+
]
|
|
558
|
+
|
|
559
|
+
# Sort by priority (lower = more urgent) then by updated_at (recent first)
|
|
560
|
+
def sort_key(issue: Issue) -> tuple:
|
|
561
|
+
priority = issue.priority.value if issue.priority else 999
|
|
562
|
+
updated = issue.updated_at or datetime.min.replace(tzinfo=timezone.utc)
|
|
563
|
+
# Negate timestamp for descending order (most recent first)
|
|
564
|
+
return (priority, -updated.timestamp())
|
|
565
|
+
|
|
566
|
+
filtered.sort(key=sort_key)
|
|
567
|
+
|
|
568
|
+
# Apply max_results limit
|
|
569
|
+
return filtered[:options.max_results]
|
|
570
|
+
|
|
571
|
+
def get_filtering_stats(
|
|
572
|
+
self,
|
|
573
|
+
original_count: int,
|
|
574
|
+
filtered_count: int,
|
|
575
|
+
options: FilterOptions
|
|
576
|
+
) -> dict:
|
|
577
|
+
"""
|
|
578
|
+
Get statistics about filtering effectiveness.
|
|
579
|
+
|
|
580
|
+
Useful for tracking and optimizing token efficiency.
|
|
581
|
+
|
|
582
|
+
Args:
|
|
583
|
+
original_count: Number of issues before filtering
|
|
584
|
+
filtered_count: Number of issues after filtering
|
|
585
|
+
options: The filter options used
|
|
586
|
+
|
|
587
|
+
Returns:
|
|
588
|
+
Dict with filtering statistics
|
|
589
|
+
"""
|
|
590
|
+
reduction_pct = (
|
|
591
|
+
((original_count - filtered_count) / original_count * 100)
|
|
592
|
+
if original_count > 0 else 0
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
return {
|
|
596
|
+
"original_count": original_count,
|
|
597
|
+
"filtered_count": filtered_count,
|
|
598
|
+
"reduction_percent": round(reduction_pct, 1),
|
|
599
|
+
"filters_applied": {
|
|
600
|
+
"days_back": options.days_back,
|
|
601
|
+
"exclude_done": options.exclude_done,
|
|
602
|
+
"exclude_backlog": options.exclude_backlog,
|
|
603
|
+
"priority_threshold": (
|
|
604
|
+
options.priority_threshold.name
|
|
605
|
+
if options.priority_threshold else None
|
|
606
|
+
),
|
|
607
|
+
"max_results": options.max_results,
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
|
|
419
611
|
def get_issue(self, identifier: str) -> Optional[Issue]:
|
|
420
612
|
"""Get a single issue by identifier (e.g., ANV-72)."""
|
|
421
613
|
query = """
|
|
@@ -593,7 +785,9 @@ class LinearProvider(BaseProvider):
|
|
|
593
785
|
if priority is not None:
|
|
594
786
|
input_data["priority"] = self.PRIORITY_TO_LINEAR.get(priority, 2)
|
|
595
787
|
if status is not None:
|
|
596
|
-
|
|
788
|
+
# Look up issue's team to get correct state UUID (ANV-232)
|
|
789
|
+
issue_team_id = self._get_issue_team_id(identifier)
|
|
790
|
+
state_id = self._get_state_id(status, team_id=issue_team_id)
|
|
597
791
|
if state_id:
|
|
598
792
|
input_data["stateId"] = state_id
|
|
599
793
|
# Handle labels (resolve names to UUIDs)
|