titan-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- titan_cli/__init__.py +3 -0
- titan_cli/__main__.py +4 -0
- titan_cli/ai/__init__.py +0 -0
- titan_cli/ai/agents/__init__.py +15 -0
- titan_cli/ai/agents/base.py +152 -0
- titan_cli/ai/client.py +170 -0
- titan_cli/ai/constants.py +56 -0
- titan_cli/ai/exceptions.py +48 -0
- titan_cli/ai/models.py +34 -0
- titan_cli/ai/oauth_helper.py +120 -0
- titan_cli/ai/providers/__init__.py +9 -0
- titan_cli/ai/providers/anthropic.py +117 -0
- titan_cli/ai/providers/base.py +75 -0
- titan_cli/ai/providers/gemini.py +278 -0
- titan_cli/cli.py +59 -0
- titan_cli/clients/__init__.py +1 -0
- titan_cli/clients/gcloud_client.py +52 -0
- titan_cli/core/__init__.py +3 -0
- titan_cli/core/config.py +274 -0
- titan_cli/core/discovery.py +51 -0
- titan_cli/core/errors.py +81 -0
- titan_cli/core/models.py +52 -0
- titan_cli/core/plugins/available.py +36 -0
- titan_cli/core/plugins/models.py +67 -0
- titan_cli/core/plugins/plugin_base.py +108 -0
- titan_cli/core/plugins/plugin_registry.py +163 -0
- titan_cli/core/secrets.py +141 -0
- titan_cli/core/workflows/__init__.py +22 -0
- titan_cli/core/workflows/models.py +88 -0
- titan_cli/core/workflows/project_step_source.py +86 -0
- titan_cli/core/workflows/workflow_exceptions.py +17 -0
- titan_cli/core/workflows/workflow_filter_service.py +137 -0
- titan_cli/core/workflows/workflow_registry.py +419 -0
- titan_cli/core/workflows/workflow_sources.py +307 -0
- titan_cli/engine/__init__.py +39 -0
- titan_cli/engine/builder.py +159 -0
- titan_cli/engine/context.py +82 -0
- titan_cli/engine/mock_context.py +176 -0
- titan_cli/engine/results.py +91 -0
- titan_cli/engine/steps/ai_assistant_step.py +185 -0
- titan_cli/engine/steps/command_step.py +93 -0
- titan_cli/engine/utils/__init__.py +3 -0
- titan_cli/engine/utils/venv.py +31 -0
- titan_cli/engine/workflow_executor.py +187 -0
- titan_cli/external_cli/__init__.py +0 -0
- titan_cli/external_cli/configs.py +17 -0
- titan_cli/external_cli/launcher.py +65 -0
- titan_cli/messages.py +121 -0
- titan_cli/ui/tui/__init__.py +205 -0
- titan_cli/ui/tui/__previews__/statusbar_preview.py +88 -0
- titan_cli/ui/tui/app.py +113 -0
- titan_cli/ui/tui/icons.py +70 -0
- titan_cli/ui/tui/screens/__init__.py +24 -0
- titan_cli/ui/tui/screens/ai_config.py +498 -0
- titan_cli/ui/tui/screens/ai_config_wizard.py +882 -0
- titan_cli/ui/tui/screens/base.py +110 -0
- titan_cli/ui/tui/screens/cli_launcher.py +151 -0
- titan_cli/ui/tui/screens/global_setup_wizard.py +363 -0
- titan_cli/ui/tui/screens/main_menu.py +162 -0
- titan_cli/ui/tui/screens/plugin_config_wizard.py +550 -0
- titan_cli/ui/tui/screens/plugin_management.py +377 -0
- titan_cli/ui/tui/screens/project_setup_wizard.py +686 -0
- titan_cli/ui/tui/screens/workflow_execution.py +592 -0
- titan_cli/ui/tui/screens/workflows.py +249 -0
- titan_cli/ui/tui/textual_components.py +537 -0
- titan_cli/ui/tui/textual_workflow_executor.py +405 -0
- titan_cli/ui/tui/theme.py +102 -0
- titan_cli/ui/tui/widgets/__init__.py +40 -0
- titan_cli/ui/tui/widgets/button.py +108 -0
- titan_cli/ui/tui/widgets/header.py +116 -0
- titan_cli/ui/tui/widgets/panel.py +81 -0
- titan_cli/ui/tui/widgets/status_bar.py +115 -0
- titan_cli/ui/tui/widgets/table.py +77 -0
- titan_cli/ui/tui/widgets/text.py +177 -0
- titan_cli/utils/__init__.py +0 -0
- titan_cli/utils/autoupdate.py +155 -0
- titan_cli-0.1.0.dist-info/METADATA +149 -0
- titan_cli-0.1.0.dist-info/RECORD +146 -0
- titan_cli-0.1.0.dist-info/WHEEL +4 -0
- titan_cli-0.1.0.dist-info/entry_points.txt +9 -0
- titan_cli-0.1.0.dist-info/licenses/LICENSE +201 -0
- titan_plugin_git/__init__.py +1 -0
- titan_plugin_git/clients/__init__.py +8 -0
- titan_plugin_git/clients/git_client.py +772 -0
- titan_plugin_git/exceptions.py +40 -0
- titan_plugin_git/messages.py +112 -0
- titan_plugin_git/models.py +39 -0
- titan_plugin_git/plugin.py +118 -0
- titan_plugin_git/steps/__init__.py +1 -0
- titan_plugin_git/steps/ai_commit_message_step.py +171 -0
- titan_plugin_git/steps/branch_steps.py +104 -0
- titan_plugin_git/steps/commit_step.py +80 -0
- titan_plugin_git/steps/push_step.py +63 -0
- titan_plugin_git/steps/status_step.py +59 -0
- titan_plugin_git/workflows/__previews__/__init__.py +1 -0
- titan_plugin_git/workflows/__previews__/commit_ai_preview.py +124 -0
- titan_plugin_git/workflows/commit-ai.yaml +28 -0
- titan_plugin_github/__init__.py +11 -0
- titan_plugin_github/agents/__init__.py +6 -0
- titan_plugin_github/agents/config_loader.py +130 -0
- titan_plugin_github/agents/issue_generator.py +353 -0
- titan_plugin_github/agents/pr_agent.py +528 -0
- titan_plugin_github/clients/__init__.py +8 -0
- titan_plugin_github/clients/github_client.py +1105 -0
- titan_plugin_github/config/__init__.py +0 -0
- titan_plugin_github/config/pr_agent.toml +85 -0
- titan_plugin_github/exceptions.py +28 -0
- titan_plugin_github/messages.py +88 -0
- titan_plugin_github/models.py +330 -0
- titan_plugin_github/plugin.py +131 -0
- titan_plugin_github/steps/__init__.py +12 -0
- titan_plugin_github/steps/ai_pr_step.py +172 -0
- titan_plugin_github/steps/create_pr_step.py +86 -0
- titan_plugin_github/steps/github_prompt_steps.py +171 -0
- titan_plugin_github/steps/issue_steps.py +143 -0
- titan_plugin_github/steps/preview_step.py +40 -0
- titan_plugin_github/utils.py +82 -0
- titan_plugin_github/workflows/__previews__/__init__.py +1 -0
- titan_plugin_github/workflows/__previews__/create_pr_ai_preview.py +140 -0
- titan_plugin_github/workflows/create-issue-ai.yaml +32 -0
- titan_plugin_github/workflows/create-pr-ai.yaml +49 -0
- titan_plugin_jira/__init__.py +8 -0
- titan_plugin_jira/agents/__init__.py +6 -0
- titan_plugin_jira/agents/config_loader.py +154 -0
- titan_plugin_jira/agents/jira_agent.py +553 -0
- titan_plugin_jira/agents/prompts.py +364 -0
- titan_plugin_jira/agents/response_parser.py +435 -0
- titan_plugin_jira/agents/token_tracker.py +223 -0
- titan_plugin_jira/agents/validators.py +246 -0
- titan_plugin_jira/clients/jira_client.py +745 -0
- titan_plugin_jira/config/jira_agent.toml +92 -0
- titan_plugin_jira/config/templates/issue_analysis.md.j2 +78 -0
- titan_plugin_jira/exceptions.py +37 -0
- titan_plugin_jira/formatters/__init__.py +6 -0
- titan_plugin_jira/formatters/markdown_formatter.py +245 -0
- titan_plugin_jira/messages.py +115 -0
- titan_plugin_jira/models.py +89 -0
- titan_plugin_jira/plugin.py +264 -0
- titan_plugin_jira/steps/ai_analyze_issue_step.py +105 -0
- titan_plugin_jira/steps/get_issue_step.py +82 -0
- titan_plugin_jira/steps/prompt_select_issue_step.py +80 -0
- titan_plugin_jira/steps/search_saved_query_step.py +238 -0
- titan_plugin_jira/utils/__init__.py +13 -0
- titan_plugin_jira/utils/issue_sorter.py +140 -0
- titan_plugin_jira/utils/saved_queries.py +150 -0
- titan_plugin_jira/workflows/analyze-jira-issues.yaml +34 -0
titan_cli/__init__.py
ADDED
titan_cli/__main__.py
ADDED
titan_cli/ai/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# titan_cli/ai/agents/__init__.py
|
|
2
|
+
"""AI Agents base classes.
|
|
3
|
+
|
|
4
|
+
This module provides base classes for building AI agents.
|
|
5
|
+
Specific agents live in their respective plugins.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .base import BaseAIAgent, AgentRequest, AgentResponse, AIGenerator
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"BaseAIAgent",
|
|
12
|
+
"AgentRequest",
|
|
13
|
+
"AgentResponse",
|
|
14
|
+
"AIGenerator",
|
|
15
|
+
]
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# titan_cli/ai/agents/base.py
|
|
2
|
+
"""Base classes for AI agents."""
|
|
3
|
+
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Optional, Protocol, List
|
|
7
|
+
|
|
8
|
+
from titan_cli.ai.models import AIMessage, AIResponse
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class AgentRequest:
|
|
13
|
+
"""Generic request for AI generation."""
|
|
14
|
+
context: str
|
|
15
|
+
max_tokens: int = 2000
|
|
16
|
+
temperature: float = 0.7
|
|
17
|
+
system_prompt: Optional[str] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class AgentResponse:
|
|
22
|
+
"""Generic response from AI generation."""
|
|
23
|
+
content: str
|
|
24
|
+
tokens_used: int
|
|
25
|
+
provider: str
|
|
26
|
+
cached: bool = False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AIGenerator(Protocol):
|
|
30
|
+
"""
|
|
31
|
+
Protocol defining the interface for AI generation.
|
|
32
|
+
|
|
33
|
+
This allows BaseAIAgent to depend on an abstraction rather than
|
|
34
|
+
concrete implementations like AIClient or AIProvider.
|
|
35
|
+
|
|
36
|
+
Any class implementing these methods can be used with agents.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def generate(
|
|
40
|
+
self,
|
|
41
|
+
messages: List[AIMessage],
|
|
42
|
+
max_tokens: Optional[int] = None,
|
|
43
|
+
temperature: Optional[float] = None
|
|
44
|
+
) -> AIResponse:
|
|
45
|
+
"""
|
|
46
|
+
Generate AI response from messages.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
messages: List of AIMessage objects
|
|
50
|
+
max_tokens: Maximum tokens to generate
|
|
51
|
+
temperature: Sampling temperature
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
AIResponse object with content and metadata
|
|
55
|
+
"""
|
|
56
|
+
...
|
|
57
|
+
|
|
58
|
+
def is_available(self) -> bool:
|
|
59
|
+
"""
|
|
60
|
+
Check if AI generation is available.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
True if AI can be used
|
|
64
|
+
"""
|
|
65
|
+
...
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class BaseAIAgent(ABC):
|
|
69
|
+
"""
|
|
70
|
+
Abstract base class for all AI agents.
|
|
71
|
+
|
|
72
|
+
Agents wrap AI generation with specialized domain logic.
|
|
73
|
+
They depend on AIGenerator protocol for loose coupling.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(self, generator: AIGenerator):
|
|
77
|
+
"""
|
|
78
|
+
Initialize agent with AI generator.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
generator: Any object implementing AIGenerator protocol
|
|
82
|
+
(e.g., AIClient, AIProvider, or mock for testing)
|
|
83
|
+
"""
|
|
84
|
+
self.generator = generator
|
|
85
|
+
|
|
86
|
+
@abstractmethod
|
|
87
|
+
def get_system_prompt(self) -> str:
|
|
88
|
+
"""
|
|
89
|
+
Get the system prompt for this agent's expertise.
|
|
90
|
+
|
|
91
|
+
Each agent defines its specialized role.
|
|
92
|
+
"""
|
|
93
|
+
pass
|
|
94
|
+
|
|
95
|
+
def generate(self, request: AgentRequest) -> AgentResponse:
|
|
96
|
+
"""
|
|
97
|
+
Generate AI response using the underlying generator.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
request: AgentRequest with context and parameters
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
AgentResponse with generated content
|
|
104
|
+
"""
|
|
105
|
+
# Build messages with system prompt
|
|
106
|
+
messages = []
|
|
107
|
+
|
|
108
|
+
# Use agent's system prompt if not overridden
|
|
109
|
+
system_prompt = request.system_prompt or self.get_system_prompt()
|
|
110
|
+
if system_prompt:
|
|
111
|
+
messages.append(AIMessage(role="system", content=system_prompt))
|
|
112
|
+
|
|
113
|
+
messages.append(AIMessage(role="user", content=request.context))
|
|
114
|
+
|
|
115
|
+
# Call underlying generator (AIClient, AIProvider, etc.)
|
|
116
|
+
response = self.generator.generate(
|
|
117
|
+
messages=messages,
|
|
118
|
+
max_tokens=request.max_tokens,
|
|
119
|
+
temperature=request.temperature
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Convert to AgentResponse
|
|
123
|
+
# Calculate tokens used - handle both patterns
|
|
124
|
+
if response.usage:
|
|
125
|
+
# Try total_tokens first (some providers)
|
|
126
|
+
tokens_used = response.usage.get("total_tokens", 0)
|
|
127
|
+
|
|
128
|
+
# If not available, try input_tokens + output_tokens (Anthropic, etc.)
|
|
129
|
+
if tokens_used == 0:
|
|
130
|
+
input_tokens = response.usage.get("input_tokens", 0)
|
|
131
|
+
output_tokens = response.usage.get("output_tokens", 0)
|
|
132
|
+
tokens_used = input_tokens + output_tokens
|
|
133
|
+
else:
|
|
134
|
+
tokens_used = 0
|
|
135
|
+
|
|
136
|
+
# Get provider name safely
|
|
137
|
+
try:
|
|
138
|
+
provider_obj = getattr(self.generator, '_provider', self.generator)
|
|
139
|
+
provider_name = provider_obj.__class__.__name__ if provider_obj else "Unknown"
|
|
140
|
+
except AttributeError:
|
|
141
|
+
provider_name = "Unknown"
|
|
142
|
+
|
|
143
|
+
return AgentResponse(
|
|
144
|
+
content=response.content,
|
|
145
|
+
tokens_used=tokens_used,
|
|
146
|
+
provider=provider_name,
|
|
147
|
+
cached=False
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
def is_available(self) -> bool:
|
|
151
|
+
"""Check if AI is available."""
|
|
152
|
+
return self.generator and self.generator.is_available()
|
titan_cli/ai/client.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI Client - Main facade for AI functionality
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Optional, List
|
|
6
|
+
|
|
7
|
+
from titan_cli.core.models import AIConfig
|
|
8
|
+
from titan_cli.core.secrets import SecretManager
|
|
9
|
+
from .exceptions import AIConfigurationError
|
|
10
|
+
from .models import AIMessage, AIRequest, AIResponse
|
|
11
|
+
from .providers import AIProvider, AnthropicProvider, GeminiProvider
|
|
12
|
+
|
|
13
|
+
# A mapping from provider names to classes
|
|
14
|
+
PROVIDER_CLASSES = {
|
|
15
|
+
"anthropic": AnthropicProvider,
|
|
16
|
+
"gemini": GeminiProvider,
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
class AIClient:
|
|
20
|
+
"""
|
|
21
|
+
Main client for AI functionality.
|
|
22
|
+
|
|
23
|
+
This facade simplifies AI usage by:
|
|
24
|
+
- Reading configuration from AIConfig.
|
|
25
|
+
- Retrieving secrets from SecretManager.
|
|
26
|
+
- Instantiating the correct AI provider.
|
|
27
|
+
- Providing a simple `generate()` and `chat()` interface.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, ai_config: AIConfig, secrets: SecretManager, provider_id: Optional[str] = None):
|
|
31
|
+
"""
|
|
32
|
+
Initialize AI client.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
ai_config: The AI configuration.
|
|
36
|
+
secrets: The SecretManager for handling API keys.
|
|
37
|
+
provider_id: The specific provider ID to use. If None, uses the default.
|
|
38
|
+
"""
|
|
39
|
+
self.ai_config = ai_config
|
|
40
|
+
self.secrets = secrets
|
|
41
|
+
|
|
42
|
+
# Determine provider_id with fallback
|
|
43
|
+
requested_id = provider_id or ai_config.default
|
|
44
|
+
|
|
45
|
+
# Validate that the provider exists, fallback to first available if default is invalid
|
|
46
|
+
if requested_id and requested_id in ai_config.providers:
|
|
47
|
+
self.provider_id = requested_id
|
|
48
|
+
elif ai_config.providers:
|
|
49
|
+
# Fallback to first available provider
|
|
50
|
+
self.provider_id = list(ai_config.providers.keys())[0]
|
|
51
|
+
else:
|
|
52
|
+
raise AIConfigurationError("No AI providers configured.")
|
|
53
|
+
|
|
54
|
+
self._provider: Optional[AIProvider] = None
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def provider(self) -> AIProvider:
|
|
58
|
+
"""
|
|
59
|
+
Get configured provider (lazy loading).
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Provider instance.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
AIConfigurationError: If AI is not enabled or configured incorrectly.
|
|
66
|
+
"""
|
|
67
|
+
if self._provider:
|
|
68
|
+
return self._provider
|
|
69
|
+
|
|
70
|
+
provider_config = self.ai_config.providers.get(self.provider_id)
|
|
71
|
+
if not provider_config:
|
|
72
|
+
raise AIConfigurationError(f"AI provider '{self.provider_id}' not found in configuration.")
|
|
73
|
+
|
|
74
|
+
provider_name = provider_config.provider
|
|
75
|
+
provider_class = PROVIDER_CLASSES.get(provider_name)
|
|
76
|
+
|
|
77
|
+
if not provider_class:
|
|
78
|
+
raise AIConfigurationError(f"Unknown AI provider type: {provider_name}")
|
|
79
|
+
|
|
80
|
+
# Get API key
|
|
81
|
+
api_key_name = f"{self.provider_id}_api_key"
|
|
82
|
+
api_key = self.secrets.get(api_key_name)
|
|
83
|
+
|
|
84
|
+
if not api_key:
|
|
85
|
+
raise AIConfigurationError(f"API key for provider '{self.provider_id}' ({provider_name}) not found.")
|
|
86
|
+
|
|
87
|
+
kwargs = {"api_key": api_key, "model": provider_config.model}
|
|
88
|
+
if provider_config.base_url:
|
|
89
|
+
kwargs["base_url"] = provider_config.base_url
|
|
90
|
+
|
|
91
|
+
self._provider = provider_class(**kwargs)
|
|
92
|
+
return self._provider
|
|
93
|
+
|
|
94
|
+
def generate(
|
|
95
|
+
self,
|
|
96
|
+
messages: List[AIMessage],
|
|
97
|
+
max_tokens: Optional[int] = None,
|
|
98
|
+
temperature: Optional[float] = None,
|
|
99
|
+
) -> AIResponse:
|
|
100
|
+
"""
|
|
101
|
+
Generate response using configured AI provider.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
messages: List of conversation messages.
|
|
105
|
+
max_tokens: Optional override for the maximum number of tokens.
|
|
106
|
+
temperature: Optional override for the temperature.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
AI response with generated content.
|
|
110
|
+
"""
|
|
111
|
+
provider_cfg = self.ai_config.providers.get(self.provider_id)
|
|
112
|
+
if not provider_cfg:
|
|
113
|
+
raise AIConfigurationError(f"AI provider '{self.provider_id}' not found for generation.")
|
|
114
|
+
|
|
115
|
+
request = AIRequest(
|
|
116
|
+
messages=messages,
|
|
117
|
+
max_tokens=max_tokens if max_tokens is not None else provider_cfg.max_tokens,
|
|
118
|
+
temperature=temperature if temperature is not None else provider_cfg.temperature,
|
|
119
|
+
)
|
|
120
|
+
return self.provider.generate(request)
|
|
121
|
+
|
|
122
|
+
def chat(
|
|
123
|
+
self,
|
|
124
|
+
prompt: str,
|
|
125
|
+
system_prompt: Optional[str] = None,
|
|
126
|
+
max_tokens: Optional[int] = None,
|
|
127
|
+
temperature: Optional[float] = None,
|
|
128
|
+
) -> str:
|
|
129
|
+
"""
|
|
130
|
+
Simple chat interface for single-turn conversations.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
prompt: User prompt/question.
|
|
134
|
+
system_prompt: Optional system prompt to set context.
|
|
135
|
+
max_tokens: Optional override for the maximum number of tokens.
|
|
136
|
+
temperature: Optional override for the temperature.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
AI response text.
|
|
140
|
+
"""
|
|
141
|
+
messages = []
|
|
142
|
+
if system_prompt:
|
|
143
|
+
messages.append(AIMessage(role="system", content=system_prompt))
|
|
144
|
+
messages.append(AIMessage(role="user", content=prompt))
|
|
145
|
+
|
|
146
|
+
response = self.generate(
|
|
147
|
+
messages, max_tokens=max_tokens, temperature=temperature
|
|
148
|
+
)
|
|
149
|
+
return response.content
|
|
150
|
+
|
|
151
|
+
def is_available(self) -> bool:
|
|
152
|
+
"""
|
|
153
|
+
Check if AI is available and configured correctly.
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
True if AI can be used.
|
|
157
|
+
"""
|
|
158
|
+
if not self.ai_config or not self.ai_config.providers:
|
|
159
|
+
return False
|
|
160
|
+
|
|
161
|
+
provider_cfg = self.ai_config.providers.get(self.provider_id)
|
|
162
|
+
if not provider_cfg:
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
# This will attempt to instantiate the provider, which includes key checks.
|
|
167
|
+
# Make sure to call self.provider to trigger the instantiation and checks
|
|
168
|
+
return self.provider is not None
|
|
169
|
+
except AIConfigurationError:
|
|
170
|
+
return False
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI Provider Constants
|
|
3
|
+
|
|
4
|
+
Minimal constants for AI providers. Models are not hardcoded to allow
|
|
5
|
+
for easy updates and custom/enterprise model support.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Default models (can be overridden by user)
|
|
12
|
+
PROVIDER_DEFAULTS: Dict[str, str] = {
|
|
13
|
+
"anthropic": "claude-3-5-sonnet-20241022",
|
|
14
|
+
"gemini": "gemini-1.5-pro",
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Provider metadata
|
|
19
|
+
PROVIDER_INFO: Dict[str, Dict[str, str]] = {
|
|
20
|
+
"anthropic": {
|
|
21
|
+
"name": "Claude (Anthropic)",
|
|
22
|
+
"api_key_url": "https://console.anthropic.com/",
|
|
23
|
+
"api_key_prefix": "sk-ant-",
|
|
24
|
+
},
|
|
25
|
+
"gemini": {
|
|
26
|
+
"name": "Gemini (Google)",
|
|
27
|
+
"api_key_url": "https://makersuite.google.com/app/apikey",
|
|
28
|
+
"api_key_prefix": "AIza",
|
|
29
|
+
},
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def get_default_model(provider: str) -> str:
|
|
34
|
+
"""
|
|
35
|
+
Get default model for a provider
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
provider: Provider key (e.g., "anthropic")
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Default model string
|
|
42
|
+
"""
|
|
43
|
+
return PROVIDER_DEFAULTS.get(provider, "")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def get_provider_name(provider: str) -> str:
|
|
47
|
+
"""
|
|
48
|
+
Get human-readable provider name
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
provider: Provider key
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Provider display name
|
|
55
|
+
"""
|
|
56
|
+
return PROVIDER_INFO.get(provider, {}).get("name", provider.title())
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Exceptions for AI system
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AIError(Exception):
|
|
7
|
+
"""Base exception for AI errors"""
|
|
8
|
+
pass
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AIConfigurationError(AIError):
|
|
12
|
+
"""AI configuration is invalid or missing"""
|
|
13
|
+
pass
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AIProviderError(AIError):
|
|
17
|
+
"""Base exception for AI provider errors"""
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AIProviderAuthenticationError(AIProviderError):
|
|
22
|
+
"""Authentication failed (invalid API key)"""
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AIProviderRateLimitError(AIProviderError):
|
|
27
|
+
"""Rate limit exceeded"""
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AIProviderAPIError(AIProviderError):
|
|
32
|
+
"""API error from provider"""
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AIAnalysisError(AIError):
|
|
37
|
+
"""Base exception for AI analysis errors"""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class AIResponseParseError(AIAnalysisError):
|
|
42
|
+
"""Failed to parse AI response (e.g., invalid JSON)"""
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class AINotAvailableError(AIError):
|
|
47
|
+
"""AI is not available or not configured"""
|
|
48
|
+
pass
|
titan_cli/ai/models.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Data models for AI system
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import List, Dict
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class AIMessage:
|
|
11
|
+
"""Message in an AI conversation"""
|
|
12
|
+
role: str # "system", "user", "assistant"
|
|
13
|
+
content: str
|
|
14
|
+
|
|
15
|
+
def to_dict(self) -> Dict[str, str]:
|
|
16
|
+
"""Convert to dictionary for API calls"""
|
|
17
|
+
return {"role": self.role, "content": self.content}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class AIRequest:
|
|
22
|
+
"""Request to an AI provider"""
|
|
23
|
+
messages: List[AIMessage]
|
|
24
|
+
max_tokens: int = 4096
|
|
25
|
+
temperature: float = 0.7
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class AIResponse:
|
|
30
|
+
"""Response from an AI provider"""
|
|
31
|
+
content: str
|
|
32
|
+
model: str
|
|
33
|
+
usage: Dict[str, int] = field(default_factory=dict)
|
|
34
|
+
finish_reason: str = "unknown"
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OAuth Helper for AI Providers
|
|
3
|
+
|
|
4
|
+
Handles OAuth authentication for providers that support it (e.g., Gemini with gcloud).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional, Tuple
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
from titan_cli.clients.gcloud_client import GCloudClient, GCloudClientError
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class OAuthStatus:
|
|
15
|
+
"""OAuth authentication status"""
|
|
16
|
+
available: bool
|
|
17
|
+
authenticated: bool
|
|
18
|
+
account: Optional[str] = None
|
|
19
|
+
error: Optional[str] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OAuthHelper:
|
|
23
|
+
"""
|
|
24
|
+
Helper for OAuth authentication with AI providers
|
|
25
|
+
|
|
26
|
+
Currently supports:
|
|
27
|
+
- Google Cloud OAuth (gcloud) for Gemini
|
|
28
|
+
|
|
29
|
+
Examples:
|
|
30
|
+
>>> helper = OAuthHelper()
|
|
31
|
+
>>> status = helper.check_gcloud_auth()
|
|
32
|
+
>>> if status.authenticated:
|
|
33
|
+
... print(f"Authenticated as: {status.account}")
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, gcloud_client: Optional[GCloudClient] = None):
|
|
37
|
+
self.gcloud = gcloud_client or GCloudClient()
|
|
38
|
+
|
|
39
|
+
def check_gcloud_auth(self) -> OAuthStatus:
|
|
40
|
+
"""
|
|
41
|
+
Check if Google Cloud CLI is installed and authenticated
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
OAuthStatus with authentication information
|
|
45
|
+
"""
|
|
46
|
+
if not self.gcloud.is_installed():
|
|
47
|
+
return OAuthStatus(
|
|
48
|
+
available=False,
|
|
49
|
+
authenticated=False,
|
|
50
|
+
error="gcloud CLI not installed"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
account = self.gcloud.get_active_account()
|
|
55
|
+
if account:
|
|
56
|
+
return OAuthStatus(
|
|
57
|
+
available=True,
|
|
58
|
+
authenticated=True,
|
|
59
|
+
account=account
|
|
60
|
+
)
|
|
61
|
+
else:
|
|
62
|
+
return OAuthStatus(
|
|
63
|
+
available=True,
|
|
64
|
+
authenticated=False,
|
|
65
|
+
error="No active gcloud account found"
|
|
66
|
+
)
|
|
67
|
+
except GCloudClientError as e:
|
|
68
|
+
return OAuthStatus(
|
|
69
|
+
available=True, # It's installed, but auth failed
|
|
70
|
+
authenticated=False,
|
|
71
|
+
error=str(e)
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
@staticmethod
|
|
75
|
+
def get_install_instructions() -> str:
|
|
76
|
+
"""
|
|
77
|
+
Get installation instructions for gcloud CLI
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Formatted installation instructions
|
|
81
|
+
"""
|
|
82
|
+
return """Install Google Cloud CLI:
|
|
83
|
+
|
|
84
|
+
1. Visit: https://cloud.google.com/sdk/docs/install
|
|
85
|
+
2. Download and install for your platform
|
|
86
|
+
3. Run: gcloud init
|
|
87
|
+
4. Run: gcloud auth application-default login
|
|
88
|
+
|
|
89
|
+
This will authenticate your Google account for use with Gemini."""
|
|
90
|
+
|
|
91
|
+
@staticmethod
|
|
92
|
+
def get_auth_instructions() -> str:
|
|
93
|
+
"""
|
|
94
|
+
Get authentication instructions for gcloud
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Formatted authentication instructions
|
|
98
|
+
"""
|
|
99
|
+
return """Authenticate with Google Cloud:
|
|
100
|
+
|
|
101
|
+
Run: gcloud auth application-default login
|
|
102
|
+
|
|
103
|
+
This will open your browser to sign in with your Google account."""
|
|
104
|
+
|
|
105
|
+
def validate_gcloud_auth(self) -> Tuple[bool, Optional[str]]:
|
|
106
|
+
"""
|
|
107
|
+
Validate that gcloud auth is properly configured
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Tuple of (is_valid, error_message)
|
|
111
|
+
"""
|
|
112
|
+
status = self.check_gcloud_auth()
|
|
113
|
+
|
|
114
|
+
if not status.available:
|
|
115
|
+
return False, status.error
|
|
116
|
+
|
|
117
|
+
if not status.authenticated:
|
|
118
|
+
return False, "Not authenticated. Run: gcloud auth application-default login"
|
|
119
|
+
|
|
120
|
+
return True, None
|