titan-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- titan_cli/__init__.py +3 -0
- titan_cli/__main__.py +4 -0
- titan_cli/ai/__init__.py +0 -0
- titan_cli/ai/agents/__init__.py +15 -0
- titan_cli/ai/agents/base.py +152 -0
- titan_cli/ai/client.py +170 -0
- titan_cli/ai/constants.py +56 -0
- titan_cli/ai/exceptions.py +48 -0
- titan_cli/ai/models.py +34 -0
- titan_cli/ai/oauth_helper.py +120 -0
- titan_cli/ai/providers/__init__.py +9 -0
- titan_cli/ai/providers/anthropic.py +117 -0
- titan_cli/ai/providers/base.py +75 -0
- titan_cli/ai/providers/gemini.py +278 -0
- titan_cli/cli.py +59 -0
- titan_cli/clients/__init__.py +1 -0
- titan_cli/clients/gcloud_client.py +52 -0
- titan_cli/core/__init__.py +3 -0
- titan_cli/core/config.py +274 -0
- titan_cli/core/discovery.py +51 -0
- titan_cli/core/errors.py +81 -0
- titan_cli/core/models.py +52 -0
- titan_cli/core/plugins/available.py +36 -0
- titan_cli/core/plugins/models.py +67 -0
- titan_cli/core/plugins/plugin_base.py +108 -0
- titan_cli/core/plugins/plugin_registry.py +163 -0
- titan_cli/core/secrets.py +141 -0
- titan_cli/core/workflows/__init__.py +22 -0
- titan_cli/core/workflows/models.py +88 -0
- titan_cli/core/workflows/project_step_source.py +86 -0
- titan_cli/core/workflows/workflow_exceptions.py +17 -0
- titan_cli/core/workflows/workflow_filter_service.py +137 -0
- titan_cli/core/workflows/workflow_registry.py +419 -0
- titan_cli/core/workflows/workflow_sources.py +307 -0
- titan_cli/engine/__init__.py +39 -0
- titan_cli/engine/builder.py +159 -0
- titan_cli/engine/context.py +82 -0
- titan_cli/engine/mock_context.py +176 -0
- titan_cli/engine/results.py +91 -0
- titan_cli/engine/steps/ai_assistant_step.py +185 -0
- titan_cli/engine/steps/command_step.py +93 -0
- titan_cli/engine/utils/__init__.py +3 -0
- titan_cli/engine/utils/venv.py +31 -0
- titan_cli/engine/workflow_executor.py +187 -0
- titan_cli/external_cli/__init__.py +0 -0
- titan_cli/external_cli/configs.py +17 -0
- titan_cli/external_cli/launcher.py +65 -0
- titan_cli/messages.py +121 -0
- titan_cli/ui/tui/__init__.py +205 -0
- titan_cli/ui/tui/__previews__/statusbar_preview.py +88 -0
- titan_cli/ui/tui/app.py +113 -0
- titan_cli/ui/tui/icons.py +70 -0
- titan_cli/ui/tui/screens/__init__.py +24 -0
- titan_cli/ui/tui/screens/ai_config.py +498 -0
- titan_cli/ui/tui/screens/ai_config_wizard.py +882 -0
- titan_cli/ui/tui/screens/base.py +110 -0
- titan_cli/ui/tui/screens/cli_launcher.py +151 -0
- titan_cli/ui/tui/screens/global_setup_wizard.py +363 -0
- titan_cli/ui/tui/screens/main_menu.py +162 -0
- titan_cli/ui/tui/screens/plugin_config_wizard.py +550 -0
- titan_cli/ui/tui/screens/plugin_management.py +377 -0
- titan_cli/ui/tui/screens/project_setup_wizard.py +686 -0
- titan_cli/ui/tui/screens/workflow_execution.py +592 -0
- titan_cli/ui/tui/screens/workflows.py +249 -0
- titan_cli/ui/tui/textual_components.py +537 -0
- titan_cli/ui/tui/textual_workflow_executor.py +405 -0
- titan_cli/ui/tui/theme.py +102 -0
- titan_cli/ui/tui/widgets/__init__.py +40 -0
- titan_cli/ui/tui/widgets/button.py +108 -0
- titan_cli/ui/tui/widgets/header.py +116 -0
- titan_cli/ui/tui/widgets/panel.py +81 -0
- titan_cli/ui/tui/widgets/status_bar.py +115 -0
- titan_cli/ui/tui/widgets/table.py +77 -0
- titan_cli/ui/tui/widgets/text.py +177 -0
- titan_cli/utils/__init__.py +0 -0
- titan_cli/utils/autoupdate.py +155 -0
- titan_cli-0.1.0.dist-info/METADATA +149 -0
- titan_cli-0.1.0.dist-info/RECORD +146 -0
- titan_cli-0.1.0.dist-info/WHEEL +4 -0
- titan_cli-0.1.0.dist-info/entry_points.txt +9 -0
- titan_cli-0.1.0.dist-info/licenses/LICENSE +201 -0
- titan_plugin_git/__init__.py +1 -0
- titan_plugin_git/clients/__init__.py +8 -0
- titan_plugin_git/clients/git_client.py +772 -0
- titan_plugin_git/exceptions.py +40 -0
- titan_plugin_git/messages.py +112 -0
- titan_plugin_git/models.py +39 -0
- titan_plugin_git/plugin.py +118 -0
- titan_plugin_git/steps/__init__.py +1 -0
- titan_plugin_git/steps/ai_commit_message_step.py +171 -0
- titan_plugin_git/steps/branch_steps.py +104 -0
- titan_plugin_git/steps/commit_step.py +80 -0
- titan_plugin_git/steps/push_step.py +63 -0
- titan_plugin_git/steps/status_step.py +59 -0
- titan_plugin_git/workflows/__previews__/__init__.py +1 -0
- titan_plugin_git/workflows/__previews__/commit_ai_preview.py +124 -0
- titan_plugin_git/workflows/commit-ai.yaml +28 -0
- titan_plugin_github/__init__.py +11 -0
- titan_plugin_github/agents/__init__.py +6 -0
- titan_plugin_github/agents/config_loader.py +130 -0
- titan_plugin_github/agents/issue_generator.py +353 -0
- titan_plugin_github/agents/pr_agent.py +528 -0
- titan_plugin_github/clients/__init__.py +8 -0
- titan_plugin_github/clients/github_client.py +1105 -0
- titan_plugin_github/config/__init__.py +0 -0
- titan_plugin_github/config/pr_agent.toml +85 -0
- titan_plugin_github/exceptions.py +28 -0
- titan_plugin_github/messages.py +88 -0
- titan_plugin_github/models.py +330 -0
- titan_plugin_github/plugin.py +131 -0
- titan_plugin_github/steps/__init__.py +12 -0
- titan_plugin_github/steps/ai_pr_step.py +172 -0
- titan_plugin_github/steps/create_pr_step.py +86 -0
- titan_plugin_github/steps/github_prompt_steps.py +171 -0
- titan_plugin_github/steps/issue_steps.py +143 -0
- titan_plugin_github/steps/preview_step.py +40 -0
- titan_plugin_github/utils.py +82 -0
- titan_plugin_github/workflows/__previews__/__init__.py +1 -0
- titan_plugin_github/workflows/__previews__/create_pr_ai_preview.py +140 -0
- titan_plugin_github/workflows/create-issue-ai.yaml +32 -0
- titan_plugin_github/workflows/create-pr-ai.yaml +49 -0
- titan_plugin_jira/__init__.py +8 -0
- titan_plugin_jira/agents/__init__.py +6 -0
- titan_plugin_jira/agents/config_loader.py +154 -0
- titan_plugin_jira/agents/jira_agent.py +553 -0
- titan_plugin_jira/agents/prompts.py +364 -0
- titan_plugin_jira/agents/response_parser.py +435 -0
- titan_plugin_jira/agents/token_tracker.py +223 -0
- titan_plugin_jira/agents/validators.py +246 -0
- titan_plugin_jira/clients/jira_client.py +745 -0
- titan_plugin_jira/config/jira_agent.toml +92 -0
- titan_plugin_jira/config/templates/issue_analysis.md.j2 +78 -0
- titan_plugin_jira/exceptions.py +37 -0
- titan_plugin_jira/formatters/__init__.py +6 -0
- titan_plugin_jira/formatters/markdown_formatter.py +245 -0
- titan_plugin_jira/messages.py +115 -0
- titan_plugin_jira/models.py +89 -0
- titan_plugin_jira/plugin.py +264 -0
- titan_plugin_jira/steps/ai_analyze_issue_step.py +105 -0
- titan_plugin_jira/steps/get_issue_step.py +82 -0
- titan_plugin_jira/steps/prompt_select_issue_step.py +80 -0
- titan_plugin_jira/steps/search_saved_query_step.py +238 -0
- titan_plugin_jira/utils/__init__.py +13 -0
- titan_plugin_jira/utils/issue_sorter.py +140 -0
- titan_plugin_jira/utils/saved_queries.py +150 -0
- titan_plugin_jira/workflows/analyze-jira-issues.yaml +34 -0
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
# plugins/titan-plugin-jira/titan_plugin_jira/agents/response_parser.py
|
|
2
|
+
"""
|
|
3
|
+
Robust AI response parser for JiraAgent.
|
|
4
|
+
|
|
5
|
+
This module provides a generic, robust parsing strategy for AI responses:
|
|
6
|
+
1. Try JSON parsing first (most reliable)
|
|
7
|
+
2. Fall back to regex-based parsing if JSON fails
|
|
8
|
+
3. Provide sensible defaults if both fail
|
|
9
|
+
4. Validate all extracted data
|
|
10
|
+
|
|
11
|
+
Based on lessons learned from PR #91 (IssueGeneratorAgent).
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import re
|
|
16
|
+
from typing import Dict, Any, List, Optional, Callable
|
|
17
|
+
from dataclasses import dataclass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ParseResult:
|
|
22
|
+
"""Result of parsing with metadata."""
|
|
23
|
+
data: Dict[str, Any]
|
|
24
|
+
method_used: str # "json", "regex", or "fallback"
|
|
25
|
+
success: bool
|
|
26
|
+
errors: List[str] = None
|
|
27
|
+
|
|
28
|
+
def __post_init__(self):
|
|
29
|
+
if self.errors is None:
|
|
30
|
+
self.errors = []
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class AIResponseParser:
|
|
34
|
+
"""
|
|
35
|
+
Generic parser for AI responses with multiple fallback strategies.
|
|
36
|
+
|
|
37
|
+
Parsing strategies (in order of preference):
|
|
38
|
+
1. JSON - Most reliable, structured format
|
|
39
|
+
2. Regex - Fallback for text-based responses
|
|
40
|
+
3. Default - Sensible defaults if all else fails
|
|
41
|
+
|
|
42
|
+
Example:
|
|
43
|
+
parser = AIResponseParser()
|
|
44
|
+
|
|
45
|
+
# Define schema
|
|
46
|
+
schema = {
|
|
47
|
+
"functional": (list, []),
|
|
48
|
+
"non_functional": (list, []),
|
|
49
|
+
"technical_approach": (str, None)
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Parse with JSON
|
|
53
|
+
result = parser.parse_json(content, schema)
|
|
54
|
+
|
|
55
|
+
# Or parse with regex patterns
|
|
56
|
+
patterns = {
|
|
57
|
+
"functional": r"FUNCTIONAL_REQUIREMENTS:\\s*\\n((?:- .+\\n?)+)",
|
|
58
|
+
"non_functional": r"NON_FUNCTIONAL_REQUIREMENTS:\\s*\\n((?:- .+\\n?)+)"
|
|
59
|
+
}
|
|
60
|
+
result = parser.parse_regex(content, patterns, schema)
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(self, strict: bool = False):
|
|
64
|
+
"""
|
|
65
|
+
Initialize parser.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
strict: If True, raise exceptions on parsing errors.
|
|
69
|
+
If False, log warnings and use defaults.
|
|
70
|
+
"""
|
|
71
|
+
self.strict = strict
|
|
72
|
+
|
|
73
|
+
def parse_json(
|
|
74
|
+
self,
|
|
75
|
+
content: str,
|
|
76
|
+
schema: Dict[str, tuple],
|
|
77
|
+
validate_fn: Optional[Callable] = None
|
|
78
|
+
) -> ParseResult:
|
|
79
|
+
"""
|
|
80
|
+
Parse JSON response with schema validation.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
content: Raw AI response content
|
|
84
|
+
schema: Dict mapping field names to (type, default) tuples
|
|
85
|
+
Example: {"risks": (list, []), "complexity": (str, None)}
|
|
86
|
+
validate_fn: Optional validation function(data) -> List[str] errors
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
ParseResult with parsed data
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
# Try to find JSON block in content
|
|
93
|
+
json_match = re.search(r'```json\s*\n(.+?)\n```', content, re.DOTALL)
|
|
94
|
+
if json_match:
|
|
95
|
+
json_str = json_match.group(1)
|
|
96
|
+
else:
|
|
97
|
+
# Try parsing entire content as JSON
|
|
98
|
+
json_str = content.strip()
|
|
99
|
+
|
|
100
|
+
data = json.loads(json_str)
|
|
101
|
+
|
|
102
|
+
# Validate and fill defaults
|
|
103
|
+
result_data = {}
|
|
104
|
+
errors = []
|
|
105
|
+
|
|
106
|
+
for field, (expected_type, default) in schema.items():
|
|
107
|
+
if field in data:
|
|
108
|
+
value = data[field]
|
|
109
|
+
# Type check
|
|
110
|
+
if not isinstance(value, expected_type):
|
|
111
|
+
errors.append(
|
|
112
|
+
f"Field '{field}' has wrong type: "
|
|
113
|
+
f"expected {expected_type.__name__}, got {type(value).__name__}"
|
|
114
|
+
)
|
|
115
|
+
result_data[field] = default
|
|
116
|
+
else:
|
|
117
|
+
result_data[field] = value
|
|
118
|
+
else:
|
|
119
|
+
# Use default if field missing
|
|
120
|
+
result_data[field] = default
|
|
121
|
+
if default is None:
|
|
122
|
+
errors.append(f"Field '{field}' is missing")
|
|
123
|
+
|
|
124
|
+
# Custom validation
|
|
125
|
+
if validate_fn:
|
|
126
|
+
validation_errors = validate_fn(result_data)
|
|
127
|
+
errors.extend(validation_errors)
|
|
128
|
+
|
|
129
|
+
# Collect errors but don't log (no logging strategy defined yet)
|
|
130
|
+
|
|
131
|
+
return ParseResult(
|
|
132
|
+
data=result_data,
|
|
133
|
+
method_used="json",
|
|
134
|
+
success=len(errors) == 0,
|
|
135
|
+
errors=errors
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
except json.JSONDecodeError as e:
|
|
139
|
+
# Return empty result, caller will try fallback
|
|
140
|
+
return ParseResult(
|
|
141
|
+
data={field: default for field, (_, default) in schema.items()},
|
|
142
|
+
method_used="json",
|
|
143
|
+
success=False,
|
|
144
|
+
errors=[f"JSON decode error: {e}"]
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def parse_regex(
|
|
148
|
+
self,
|
|
149
|
+
content: str,
|
|
150
|
+
patterns: Dict[str, str],
|
|
151
|
+
schema: Dict[str, tuple],
|
|
152
|
+
list_separator: str = r"\n-\s*"
|
|
153
|
+
) -> ParseResult:
|
|
154
|
+
"""
|
|
155
|
+
Parse text response using regex patterns.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
content: Raw AI response content
|
|
159
|
+
patterns: Dict mapping field names to regex patterns
|
|
160
|
+
schema: Dict mapping field names to (type, default) tuples
|
|
161
|
+
list_separator: Regex pattern for splitting list items
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
ParseResult with parsed data
|
|
165
|
+
"""
|
|
166
|
+
result_data = {}
|
|
167
|
+
errors = []
|
|
168
|
+
|
|
169
|
+
for field, (expected_type, default) in schema.items():
|
|
170
|
+
if field not in patterns:
|
|
171
|
+
# No pattern, use default
|
|
172
|
+
result_data[field] = default
|
|
173
|
+
continue
|
|
174
|
+
|
|
175
|
+
pattern = patterns[field]
|
|
176
|
+
match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
|
|
177
|
+
|
|
178
|
+
if not match:
|
|
179
|
+
# Pattern didn't match, use default
|
|
180
|
+
result_data[field] = default
|
|
181
|
+
errors.append(f"Pattern for '{field}' not found in content")
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
# Extract matched content
|
|
185
|
+
matched_text = match.group(1).strip()
|
|
186
|
+
|
|
187
|
+
# Convert to expected type
|
|
188
|
+
if expected_type is list:
|
|
189
|
+
# Split by list separator (e.g., "- item")
|
|
190
|
+
items = re.split(list_separator, matched_text)
|
|
191
|
+
items = [item.strip() for item in items if item.strip()]
|
|
192
|
+
result_data[field] = items
|
|
193
|
+
elif expected_type is str:
|
|
194
|
+
result_data[field] = matched_text
|
|
195
|
+
elif expected_type is int:
|
|
196
|
+
try:
|
|
197
|
+
result_data[field] = int(matched_text)
|
|
198
|
+
except ValueError:
|
|
199
|
+
result_data[field] = default
|
|
200
|
+
errors.append(f"Could not convert '{field}' to int: {matched_text}")
|
|
201
|
+
else:
|
|
202
|
+
# Unknown type, store as string
|
|
203
|
+
result_data[field] = matched_text
|
|
204
|
+
|
|
205
|
+
return ParseResult(
|
|
206
|
+
data=result_data,
|
|
207
|
+
method_used="regex",
|
|
208
|
+
success=len(errors) == 0,
|
|
209
|
+
errors=errors
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
def parse_with_fallback(
|
|
213
|
+
self,
|
|
214
|
+
content: str,
|
|
215
|
+
schema: Dict[str, tuple],
|
|
216
|
+
json_first: bool = True,
|
|
217
|
+
regex_patterns: Optional[Dict[str, str]] = None,
|
|
218
|
+
validate_fn: Optional[Callable] = None
|
|
219
|
+
) -> ParseResult:
|
|
220
|
+
"""
|
|
221
|
+
Parse response with automatic fallback strategy.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
content: Raw AI response
|
|
225
|
+
schema: Field schema
|
|
226
|
+
json_first: Try JSON before regex
|
|
227
|
+
regex_patterns: Patterns for regex fallback
|
|
228
|
+
validate_fn: Optional validation function
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
ParseResult with parsed data using best available method
|
|
232
|
+
"""
|
|
233
|
+
if json_first:
|
|
234
|
+
# Try JSON first
|
|
235
|
+
result = self.parse_json(content, schema, validate_fn)
|
|
236
|
+
if result.success:
|
|
237
|
+
return result
|
|
238
|
+
|
|
239
|
+
# Fall back to regex
|
|
240
|
+
if regex_patterns:
|
|
241
|
+
result = self.parse_regex(content, regex_patterns, schema)
|
|
242
|
+
if result.success or not result.errors:
|
|
243
|
+
return result
|
|
244
|
+
else:
|
|
245
|
+
# Try regex first (for legacy text-based responses)
|
|
246
|
+
if regex_patterns:
|
|
247
|
+
result = self.parse_regex(content, regex_patterns, schema)
|
|
248
|
+
if result.success:
|
|
249
|
+
return result
|
|
250
|
+
|
|
251
|
+
# Fall back to JSON
|
|
252
|
+
result = self.parse_json(content, schema, validate_fn)
|
|
253
|
+
if result.success:
|
|
254
|
+
return result
|
|
255
|
+
|
|
256
|
+
# Both failed, return defaults
|
|
257
|
+
return ParseResult(
|
|
258
|
+
data={field: default for field, (_, default) in schema.items()},
|
|
259
|
+
method_used="fallback",
|
|
260
|
+
success=False,
|
|
261
|
+
errors=["All parsing strategies failed"]
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
# ==================== SPECIFIC PARSERS FOR JIRA AGENT ====================
|
|
266
|
+
|
|
267
|
+
class JiraAgentParser:
|
|
268
|
+
"""
|
|
269
|
+
Specialized parser for JiraAgent responses.
|
|
270
|
+
|
|
271
|
+
Provides pre-configured parsers for all JiraAgent AI operations:
|
|
272
|
+
- Requirements extraction
|
|
273
|
+
- Risk analysis
|
|
274
|
+
- Dependencies detection
|
|
275
|
+
- Subtasks suggestion
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
def __init__(self, strict: bool = False):
|
|
279
|
+
self.parser = AIResponseParser(strict=strict)
|
|
280
|
+
|
|
281
|
+
def parse_requirements(self, content: str) -> Dict[str, Any]:
|
|
282
|
+
"""
|
|
283
|
+
Parse requirements extraction response.
|
|
284
|
+
|
|
285
|
+
Expected JSON format:
|
|
286
|
+
{
|
|
287
|
+
"functional": ["req1", "req2"],
|
|
288
|
+
"non_functional": ["nfr1", "nfr2"],
|
|
289
|
+
"acceptance_criteria": ["ac1", "ac2"],
|
|
290
|
+
"technical_approach": "approach description"
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
Fallback regex patterns for text-based responses.
|
|
294
|
+
"""
|
|
295
|
+
schema = {
|
|
296
|
+
"functional": (list, []),
|
|
297
|
+
"non_functional": (list, []),
|
|
298
|
+
"acceptance_criteria": (list, []),
|
|
299
|
+
"technical_approach": (str, None)
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
regex_patterns = {
|
|
303
|
+
"functional": r'FUNCTIONAL_REQUIREMENTS:\s*\n((?:-\s*.+\n?)+)',
|
|
304
|
+
"non_functional": r'NON_FUNCTIONAL_REQUIREMENTS:\s*\n((?:-\s*.+\n?)+)',
|
|
305
|
+
"acceptance_criteria": r'ACCEPTANCE_CRITERIA:\s*\n((?:-\s*.+\n?)+)',
|
|
306
|
+
"technical_approach": r'TECHNICAL_APPROACH:\s*\n(.+?)(?=\n[A-Z_]+:|$)'
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
result = self.parser.parse_with_fallback(
|
|
310
|
+
content,
|
|
311
|
+
schema,
|
|
312
|
+
json_first=True,
|
|
313
|
+
regex_patterns=regex_patterns
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
return result.data
|
|
317
|
+
|
|
318
|
+
def parse_risks(self, content: str) -> Dict[str, Any]:
|
|
319
|
+
"""
|
|
320
|
+
Parse risk analysis response.
|
|
321
|
+
|
|
322
|
+
Expected JSON format:
|
|
323
|
+
{
|
|
324
|
+
"risks": ["risk1", "risk2"],
|
|
325
|
+
"edge_cases": ["case1", "case2"],
|
|
326
|
+
"complexity": "Medium",
|
|
327
|
+
"effort": "3-5 days"
|
|
328
|
+
}
|
|
329
|
+
"""
|
|
330
|
+
schema = {
|
|
331
|
+
"risks": (list, []),
|
|
332
|
+
"edge_cases": (list, []),
|
|
333
|
+
"complexity": (str, None),
|
|
334
|
+
"effort": (str, None)
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
regex_patterns = {
|
|
338
|
+
"risks": r'RISKS:\s*\n((?:-\s*.+\n?)+)',
|
|
339
|
+
"edge_cases": r'EDGE_CASES:\s*\n((?:-\s*.+\n?)+)',
|
|
340
|
+
"complexity": r'COMPLEXITY:\s*(.+)',
|
|
341
|
+
"effort": r'EFFORT_ESTIMATE:\s*(.+)'
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
result = self.parser.parse_with_fallback(
|
|
345
|
+
content,
|
|
346
|
+
schema,
|
|
347
|
+
json_first=True,
|
|
348
|
+
regex_patterns=regex_patterns
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
return result.data
|
|
352
|
+
|
|
353
|
+
def parse_dependencies(self, content: str) -> Dict[str, Any]:
|
|
354
|
+
"""
|
|
355
|
+
Parse dependencies detection response.
|
|
356
|
+
|
|
357
|
+
Expected JSON format:
|
|
358
|
+
{
|
|
359
|
+
"dependencies": ["dep1", "dep2"]
|
|
360
|
+
}
|
|
361
|
+
"""
|
|
362
|
+
schema = {
|
|
363
|
+
"dependencies": (list, [])
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
regex_patterns = {
|
|
367
|
+
"dependencies": r'DEPENDENCIES:\s*\n((?:-\s*.+\n?)+)'
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
result = self.parser.parse_with_fallback(
|
|
371
|
+
content,
|
|
372
|
+
schema,
|
|
373
|
+
json_first=True,
|
|
374
|
+
regex_patterns=regex_patterns
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
return result.data
|
|
378
|
+
|
|
379
|
+
def parse_subtasks(self, content: str) -> Dict[str, Any]:
|
|
380
|
+
"""
|
|
381
|
+
Parse subtasks suggestion response.
|
|
382
|
+
|
|
383
|
+
Expected JSON format:
|
|
384
|
+
{
|
|
385
|
+
"subtasks": [
|
|
386
|
+
{"summary": "Task 1", "description": "Desc 1"},
|
|
387
|
+
{"summary": "Task 2", "description": "Desc 2"}
|
|
388
|
+
]
|
|
389
|
+
}
|
|
390
|
+
"""
|
|
391
|
+
schema = {
|
|
392
|
+
"subtasks": (list, [])
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
# Try JSON first
|
|
396
|
+
result = self.parser.parse_json(content, schema)
|
|
397
|
+
|
|
398
|
+
if not result.success:
|
|
399
|
+
# Fallback: manual regex parsing for text-based subtasks
|
|
400
|
+
subtasks = self._parse_subtasks_regex(content)
|
|
401
|
+
return {"subtasks": subtasks}
|
|
402
|
+
|
|
403
|
+
# Validate subtask structure
|
|
404
|
+
valid_subtasks = []
|
|
405
|
+
for subtask in result.data.get("subtasks", []):
|
|
406
|
+
if isinstance(subtask, dict) and "summary" in subtask:
|
|
407
|
+
valid_subtasks.append({
|
|
408
|
+
"summary": subtask.get("summary", ""),
|
|
409
|
+
"description": subtask.get("description", "")
|
|
410
|
+
})
|
|
411
|
+
|
|
412
|
+
return {"subtasks": valid_subtasks}
|
|
413
|
+
|
|
414
|
+
def _parse_subtasks_regex(self, content: str) -> List[Dict[str, str]]:
|
|
415
|
+
"""Fallback regex parser for subtasks in text format."""
|
|
416
|
+
subtasks = []
|
|
417
|
+
current_subtask = None
|
|
418
|
+
|
|
419
|
+
for line in content.split("\n"):
|
|
420
|
+
line = line.strip()
|
|
421
|
+
|
|
422
|
+
if line.startswith("SUBTASK_"):
|
|
423
|
+
if current_subtask:
|
|
424
|
+
subtasks.append(current_subtask)
|
|
425
|
+
current_subtask = {"summary": "", "description": ""}
|
|
426
|
+
elif current_subtask:
|
|
427
|
+
if line.startswith("Summary:"):
|
|
428
|
+
current_subtask["summary"] = line.split(":", 1)[1].strip()
|
|
429
|
+
elif line.startswith("Description:"):
|
|
430
|
+
current_subtask["description"] = line.split(":", 1)[1].strip()
|
|
431
|
+
|
|
432
|
+
if current_subtask:
|
|
433
|
+
subtasks.append(current_subtask)
|
|
434
|
+
|
|
435
|
+
return subtasks
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
# plugins/titan-plugin-jira/titan_plugin_jira/agents/token_tracker.py
|
|
2
|
+
"""
|
|
3
|
+
Centralized token tracking for JiraAgent.
|
|
4
|
+
|
|
5
|
+
Addresses PR #74 comment: "Token Tracking Inconsistente"
|
|
6
|
+
Provides consistent, transparent token usage tracking across all AI operations.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
from enum import Enum
|
|
12
|
+
|
|
13
|
+
MAX_BUDGET_MULTIPLIER = 10
|
|
14
|
+
|
|
15
|
+
class OperationType(Enum):
|
|
16
|
+
"""Types of AI operations that consume tokens."""
|
|
17
|
+
REQUIREMENTS_EXTRACTION = "requirements_extraction"
|
|
18
|
+
RISK_ANALYSIS = "risk_analysis"
|
|
19
|
+
DEPENDENCY_DETECTION = "dependency_detection"
|
|
20
|
+
SUBTASK_SUGGESTION = "subtask_suggestion"
|
|
21
|
+
COMMENT_GENERATION = "comment_generation"
|
|
22
|
+
DESCRIPTION_ENHANCEMENT = "description_enhancement"
|
|
23
|
+
SMART_LABELING = "smart_labeling"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class TokenUsage:
|
|
28
|
+
"""Record of token usage for a single operation."""
|
|
29
|
+
operation: OperationType
|
|
30
|
+
tokens_used: int
|
|
31
|
+
issue_key: Optional[str] = None
|
|
32
|
+
success: bool = True
|
|
33
|
+
error: Optional[str] = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class TokenBudget:
|
|
38
|
+
"""
|
|
39
|
+
Token budget configuration for different operation types.
|
|
40
|
+
|
|
41
|
+
Based on PR #74 comment about hardcoded magic numbers.
|
|
42
|
+
Centralizes token allocation instead of using `max_tokens // 4` throughout code.
|
|
43
|
+
"""
|
|
44
|
+
# Base max_tokens from config (e.g., 2000)
|
|
45
|
+
base_max_tokens: int
|
|
46
|
+
|
|
47
|
+
# Multipliers for different operations (0.0 to 1.0)
|
|
48
|
+
requirements_multiplier: float = 1.0 # Full budget
|
|
49
|
+
risk_multiplier: float = 1.0 # Full budget
|
|
50
|
+
dependency_multiplier: float = 0.25 # 1/4 budget (was hardcoded as // 4)
|
|
51
|
+
subtask_multiplier: float = 1.0 # Full budget
|
|
52
|
+
comment_multiplier: float = 0.5 # 1/2 budget (was hardcoded as // 2)
|
|
53
|
+
description_multiplier: float = 1.0 # Full budget
|
|
54
|
+
labeling_multiplier: float = 0.25 # 1/4 budget
|
|
55
|
+
|
|
56
|
+
def get_budget(self, operation: OperationType) -> int:
|
|
57
|
+
"""
|
|
58
|
+
Get token budget for a specific operation.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
operation: The type of operation
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Maximum tokens allowed for this operation
|
|
65
|
+
"""
|
|
66
|
+
multipliers = {
|
|
67
|
+
OperationType.REQUIREMENTS_EXTRACTION: self.requirements_multiplier,
|
|
68
|
+
OperationType.RISK_ANALYSIS: self.risk_multiplier,
|
|
69
|
+
OperationType.DEPENDENCY_DETECTION: self.dependency_multiplier,
|
|
70
|
+
OperationType.SUBTASK_SUGGESTION: self.subtask_multiplier,
|
|
71
|
+
OperationType.COMMENT_GENERATION: self.comment_multiplier,
|
|
72
|
+
OperationType.DESCRIPTION_ENHANCEMENT: self.description_multiplier,
|
|
73
|
+
OperationType.SMART_LABELING: self.labeling_multiplier,
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
multiplier = multipliers.get(operation, 1.0)
|
|
77
|
+
return int(self.base_max_tokens * multiplier)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class TokenTracker:
|
|
81
|
+
"""
|
|
82
|
+
Tracks token usage across all AI operations in a session.
|
|
83
|
+
|
|
84
|
+
Features:
|
|
85
|
+
- Consistent tracking across all operations
|
|
86
|
+
- Budget enforcement
|
|
87
|
+
- Usage reporting and analytics
|
|
88
|
+
- Per-operation and total tracking
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
def __init__(self, budget: TokenBudget):
|
|
92
|
+
"""
|
|
93
|
+
Initialize token tracker.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
budget: Token budget configuration
|
|
97
|
+
"""
|
|
98
|
+
self.budget = budget
|
|
99
|
+
self.usage_history: List[TokenUsage] = []
|
|
100
|
+
self._total_tokens = 0
|
|
101
|
+
|
|
102
|
+
def record_usage(
|
|
103
|
+
self,
|
|
104
|
+
operation: OperationType,
|
|
105
|
+
tokens_used: int,
|
|
106
|
+
issue_key: Optional[str] = None,
|
|
107
|
+
success: bool = True,
|
|
108
|
+
error: Optional[str] = None
|
|
109
|
+
) -> None:
|
|
110
|
+
"""
|
|
111
|
+
Record token usage for an operation.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
operation: Type of operation
|
|
115
|
+
tokens_used: Number of tokens consumed
|
|
116
|
+
issue_key: Optional issue key being processed
|
|
117
|
+
success: Whether the operation succeeded
|
|
118
|
+
error: Optional error message if operation failed
|
|
119
|
+
"""
|
|
120
|
+
usage = TokenUsage(
|
|
121
|
+
operation=operation,
|
|
122
|
+
tokens_used=tokens_used,
|
|
123
|
+
issue_key=issue_key,
|
|
124
|
+
success=success,
|
|
125
|
+
error=error
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
self.usage_history.append(usage)
|
|
129
|
+
self._total_tokens += tokens_used
|
|
130
|
+
|
|
131
|
+
def get_total_tokens(self) -> int:
|
|
132
|
+
"""Get total tokens used across all operations."""
|
|
133
|
+
return self._total_tokens
|
|
134
|
+
|
|
135
|
+
def get_tokens_by_operation(self) -> Dict[OperationType, int]:
|
|
136
|
+
"""
|
|
137
|
+
Get token usage broken down by operation type.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Dict mapping operation type to total tokens used
|
|
141
|
+
"""
|
|
142
|
+
result = {}
|
|
143
|
+
for usage in self.usage_history:
|
|
144
|
+
op_type = usage.operation
|
|
145
|
+
result[op_type] = result.get(op_type, 0) + usage.tokens_used
|
|
146
|
+
|
|
147
|
+
return result
|
|
148
|
+
|
|
149
|
+
def get_failed_operations(self) -> List[TokenUsage]:
|
|
150
|
+
"""Get list of operations that failed."""
|
|
151
|
+
return [u for u in self.usage_history if not u.success]
|
|
152
|
+
|
|
153
|
+
def get_summary(self) -> Dict[str, Any]:
|
|
154
|
+
"""
|
|
155
|
+
Get comprehensive usage summary.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Dict with total, by_operation, failed_count, etc.
|
|
159
|
+
"""
|
|
160
|
+
by_operation = self.get_tokens_by_operation()
|
|
161
|
+
failed = self.get_failed_operations()
|
|
162
|
+
|
|
163
|
+
return {
|
|
164
|
+
"total_tokens": self._total_tokens,
|
|
165
|
+
"operation_count": len(self.usage_history),
|
|
166
|
+
"by_operation": {
|
|
167
|
+
op.value: tokens for op, tokens in by_operation.items()
|
|
168
|
+
},
|
|
169
|
+
"failed_operations": len(failed),
|
|
170
|
+
"budget_base": self.budget.base_max_tokens,
|
|
171
|
+
"success_rate": (
|
|
172
|
+
(len(self.usage_history) - len(failed)) / len(self.usage_history)
|
|
173
|
+
if self.usage_history else 1.0
|
|
174
|
+
)
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
def format_summary(self) -> str:
|
|
178
|
+
"""
|
|
179
|
+
Format usage summary as human-readable string.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Formatted summary text
|
|
183
|
+
"""
|
|
184
|
+
summary = self.get_summary()
|
|
185
|
+
|
|
186
|
+
lines = [
|
|
187
|
+
"Token Usage Summary:",
|
|
188
|
+
f" Total Tokens: {summary['total_tokens']}",
|
|
189
|
+
f" Operations: {summary['operation_count']}",
|
|
190
|
+
f" Success Rate: {summary['success_rate']:.1%}",
|
|
191
|
+
"",
|
|
192
|
+
"By Operation:"
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
for op_name, tokens in summary['by_operation'].items():
|
|
196
|
+
lines.append(f" - {op_name}: {tokens} tokens")
|
|
197
|
+
|
|
198
|
+
if summary['failed_operations'] > 0:
|
|
199
|
+
lines.append("")
|
|
200
|
+
lines.append(f"Failed Operations: {summary['failed_operations']}")
|
|
201
|
+
|
|
202
|
+
return "\n".join(lines)
|
|
203
|
+
|
|
204
|
+
def check_budget(self, operation: OperationType) -> bool:
|
|
205
|
+
"""
|
|
206
|
+
Check if there's budget remaining for an operation.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
operation: Operation type to check
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
True if within budget, False otherwise
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
# For simplicity, just check if we haven't exceeded total budget
|
|
216
|
+
# Could implement more sophisticated per-operation tracking
|
|
217
|
+
|
|
218
|
+
return self._total_tokens < (self.budget.base_max_tokens * MAX_BUDGET_MULTIPLIER) # Allow 10x for multi-issue analysis
|
|
219
|
+
|
|
220
|
+
def reset(self) -> None:
|
|
221
|
+
"""Reset tracker (useful for new analysis session)."""
|
|
222
|
+
self.usage_history = []
|
|
223
|
+
self._total_tokens = 0
|