titan-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- titan_cli/__init__.py +3 -0
- titan_cli/__main__.py +4 -0
- titan_cli/ai/__init__.py +0 -0
- titan_cli/ai/agents/__init__.py +15 -0
- titan_cli/ai/agents/base.py +152 -0
- titan_cli/ai/client.py +170 -0
- titan_cli/ai/constants.py +56 -0
- titan_cli/ai/exceptions.py +48 -0
- titan_cli/ai/models.py +34 -0
- titan_cli/ai/oauth_helper.py +120 -0
- titan_cli/ai/providers/__init__.py +9 -0
- titan_cli/ai/providers/anthropic.py +117 -0
- titan_cli/ai/providers/base.py +75 -0
- titan_cli/ai/providers/gemini.py +278 -0
- titan_cli/cli.py +59 -0
- titan_cli/clients/__init__.py +1 -0
- titan_cli/clients/gcloud_client.py +52 -0
- titan_cli/core/__init__.py +3 -0
- titan_cli/core/config.py +274 -0
- titan_cli/core/discovery.py +51 -0
- titan_cli/core/errors.py +81 -0
- titan_cli/core/models.py +52 -0
- titan_cli/core/plugins/available.py +36 -0
- titan_cli/core/plugins/models.py +67 -0
- titan_cli/core/plugins/plugin_base.py +108 -0
- titan_cli/core/plugins/plugin_registry.py +163 -0
- titan_cli/core/secrets.py +141 -0
- titan_cli/core/workflows/__init__.py +22 -0
- titan_cli/core/workflows/models.py +88 -0
- titan_cli/core/workflows/project_step_source.py +86 -0
- titan_cli/core/workflows/workflow_exceptions.py +17 -0
- titan_cli/core/workflows/workflow_filter_service.py +137 -0
- titan_cli/core/workflows/workflow_registry.py +419 -0
- titan_cli/core/workflows/workflow_sources.py +307 -0
- titan_cli/engine/__init__.py +39 -0
- titan_cli/engine/builder.py +159 -0
- titan_cli/engine/context.py +82 -0
- titan_cli/engine/mock_context.py +176 -0
- titan_cli/engine/results.py +91 -0
- titan_cli/engine/steps/ai_assistant_step.py +185 -0
- titan_cli/engine/steps/command_step.py +93 -0
- titan_cli/engine/utils/__init__.py +3 -0
- titan_cli/engine/utils/venv.py +31 -0
- titan_cli/engine/workflow_executor.py +187 -0
- titan_cli/external_cli/__init__.py +0 -0
- titan_cli/external_cli/configs.py +17 -0
- titan_cli/external_cli/launcher.py +65 -0
- titan_cli/messages.py +121 -0
- titan_cli/ui/tui/__init__.py +205 -0
- titan_cli/ui/tui/__previews__/statusbar_preview.py +88 -0
- titan_cli/ui/tui/app.py +113 -0
- titan_cli/ui/tui/icons.py +70 -0
- titan_cli/ui/tui/screens/__init__.py +24 -0
- titan_cli/ui/tui/screens/ai_config.py +498 -0
- titan_cli/ui/tui/screens/ai_config_wizard.py +882 -0
- titan_cli/ui/tui/screens/base.py +110 -0
- titan_cli/ui/tui/screens/cli_launcher.py +151 -0
- titan_cli/ui/tui/screens/global_setup_wizard.py +363 -0
- titan_cli/ui/tui/screens/main_menu.py +162 -0
- titan_cli/ui/tui/screens/plugin_config_wizard.py +550 -0
- titan_cli/ui/tui/screens/plugin_management.py +377 -0
- titan_cli/ui/tui/screens/project_setup_wizard.py +686 -0
- titan_cli/ui/tui/screens/workflow_execution.py +592 -0
- titan_cli/ui/tui/screens/workflows.py +249 -0
- titan_cli/ui/tui/textual_components.py +537 -0
- titan_cli/ui/tui/textual_workflow_executor.py +405 -0
- titan_cli/ui/tui/theme.py +102 -0
- titan_cli/ui/tui/widgets/__init__.py +40 -0
- titan_cli/ui/tui/widgets/button.py +108 -0
- titan_cli/ui/tui/widgets/header.py +116 -0
- titan_cli/ui/tui/widgets/panel.py +81 -0
- titan_cli/ui/tui/widgets/status_bar.py +115 -0
- titan_cli/ui/tui/widgets/table.py +77 -0
- titan_cli/ui/tui/widgets/text.py +177 -0
- titan_cli/utils/__init__.py +0 -0
- titan_cli/utils/autoupdate.py +155 -0
- titan_cli-0.1.0.dist-info/METADATA +149 -0
- titan_cli-0.1.0.dist-info/RECORD +146 -0
- titan_cli-0.1.0.dist-info/WHEEL +4 -0
- titan_cli-0.1.0.dist-info/entry_points.txt +9 -0
- titan_cli-0.1.0.dist-info/licenses/LICENSE +201 -0
- titan_plugin_git/__init__.py +1 -0
- titan_plugin_git/clients/__init__.py +8 -0
- titan_plugin_git/clients/git_client.py +772 -0
- titan_plugin_git/exceptions.py +40 -0
- titan_plugin_git/messages.py +112 -0
- titan_plugin_git/models.py +39 -0
- titan_plugin_git/plugin.py +118 -0
- titan_plugin_git/steps/__init__.py +1 -0
- titan_plugin_git/steps/ai_commit_message_step.py +171 -0
- titan_plugin_git/steps/branch_steps.py +104 -0
- titan_plugin_git/steps/commit_step.py +80 -0
- titan_plugin_git/steps/push_step.py +63 -0
- titan_plugin_git/steps/status_step.py +59 -0
- titan_plugin_git/workflows/__previews__/__init__.py +1 -0
- titan_plugin_git/workflows/__previews__/commit_ai_preview.py +124 -0
- titan_plugin_git/workflows/commit-ai.yaml +28 -0
- titan_plugin_github/__init__.py +11 -0
- titan_plugin_github/agents/__init__.py +6 -0
- titan_plugin_github/agents/config_loader.py +130 -0
- titan_plugin_github/agents/issue_generator.py +353 -0
- titan_plugin_github/agents/pr_agent.py +528 -0
- titan_plugin_github/clients/__init__.py +8 -0
- titan_plugin_github/clients/github_client.py +1105 -0
- titan_plugin_github/config/__init__.py +0 -0
- titan_plugin_github/config/pr_agent.toml +85 -0
- titan_plugin_github/exceptions.py +28 -0
- titan_plugin_github/messages.py +88 -0
- titan_plugin_github/models.py +330 -0
- titan_plugin_github/plugin.py +131 -0
- titan_plugin_github/steps/__init__.py +12 -0
- titan_plugin_github/steps/ai_pr_step.py +172 -0
- titan_plugin_github/steps/create_pr_step.py +86 -0
- titan_plugin_github/steps/github_prompt_steps.py +171 -0
- titan_plugin_github/steps/issue_steps.py +143 -0
- titan_plugin_github/steps/preview_step.py +40 -0
- titan_plugin_github/utils.py +82 -0
- titan_plugin_github/workflows/__previews__/__init__.py +1 -0
- titan_plugin_github/workflows/__previews__/create_pr_ai_preview.py +140 -0
- titan_plugin_github/workflows/create-issue-ai.yaml +32 -0
- titan_plugin_github/workflows/create-pr-ai.yaml +49 -0
- titan_plugin_jira/__init__.py +8 -0
- titan_plugin_jira/agents/__init__.py +6 -0
- titan_plugin_jira/agents/config_loader.py +154 -0
- titan_plugin_jira/agents/jira_agent.py +553 -0
- titan_plugin_jira/agents/prompts.py +364 -0
- titan_plugin_jira/agents/response_parser.py +435 -0
- titan_plugin_jira/agents/token_tracker.py +223 -0
- titan_plugin_jira/agents/validators.py +246 -0
- titan_plugin_jira/clients/jira_client.py +745 -0
- titan_plugin_jira/config/jira_agent.toml +92 -0
- titan_plugin_jira/config/templates/issue_analysis.md.j2 +78 -0
- titan_plugin_jira/exceptions.py +37 -0
- titan_plugin_jira/formatters/__init__.py +6 -0
- titan_plugin_jira/formatters/markdown_formatter.py +245 -0
- titan_plugin_jira/messages.py +115 -0
- titan_plugin_jira/models.py +89 -0
- titan_plugin_jira/plugin.py +264 -0
- titan_plugin_jira/steps/ai_analyze_issue_step.py +105 -0
- titan_plugin_jira/steps/get_issue_step.py +82 -0
- titan_plugin_jira/steps/prompt_select_issue_step.py +80 -0
- titan_plugin_jira/steps/search_saved_query_step.py +238 -0
- titan_plugin_jira/utils/__init__.py +13 -0
- titan_plugin_jira/utils/issue_sorter.py +140 -0
- titan_plugin_jira/utils/saved_queries.py +150 -0
- titan_plugin_jira/workflows/analyze-jira-issues.yaml +34 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic AI provider (Claude)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .base import AIProvider
|
|
6
|
+
from ..models import AIRequest, AIResponse
|
|
7
|
+
from ..exceptions import (
|
|
8
|
+
AIProviderAuthenticationError,
|
|
9
|
+
AIProviderRateLimitError,
|
|
10
|
+
AIProviderAPIError
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
from ..constants import get_default_model
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AnthropicProvider(AIProvider):
|
|
18
|
+
"""
|
|
19
|
+
Provider for Claude API (Anthropic).
|
|
20
|
+
|
|
21
|
+
Requires:
|
|
22
|
+
- pip install anthropic
|
|
23
|
+
- API key from https://console.anthropic.com/
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, api_key: str, model: str = get_default_model("anthropic"), base_url: str = None):
|
|
27
|
+
super().__init__(api_key, model)
|
|
28
|
+
try:
|
|
29
|
+
from anthropic import Anthropic
|
|
30
|
+
# Support custom base_url for enterprise endpoints
|
|
31
|
+
# Normalize base_url by removing trailing slash
|
|
32
|
+
normalized_base_url = base_url.rstrip('/') if base_url else None
|
|
33
|
+
if normalized_base_url:
|
|
34
|
+
self.client = Anthropic(api_key=api_key, base_url=normalized_base_url)
|
|
35
|
+
else:
|
|
36
|
+
self.client = Anthropic(api_key=api_key)
|
|
37
|
+
except ImportError:
|
|
38
|
+
raise ImportError(
|
|
39
|
+
"Anthropic provider requires 'anthropic' library.\n"
|
|
40
|
+
"Install with: poetry add anthropic"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
def generate(self, request: AIRequest) -> AIResponse:
|
|
44
|
+
"""
|
|
45
|
+
Generate response using Claude API.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
request: Request with messages and parameters
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Response with generated content
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
AIProviderAuthenticationError: Invalid API key
|
|
55
|
+
AIProviderRateLimitError: Rate limit exceeded
|
|
56
|
+
AIProviderAPIError: Other API errors
|
|
57
|
+
"""
|
|
58
|
+
try:
|
|
59
|
+
# Separate system messages from other messages
|
|
60
|
+
# Claude API requires system as a top-level parameter, not in messages array
|
|
61
|
+
system_messages = [msg for msg in request.messages if msg.role == "system"]
|
|
62
|
+
regular_messages = [msg for msg in request.messages if msg.role != "system"]
|
|
63
|
+
|
|
64
|
+
# Build system parameter (combine all system messages)
|
|
65
|
+
system_content = "\n\n".join(msg.content for msg in system_messages) if system_messages else None
|
|
66
|
+
|
|
67
|
+
# Convert regular messages to Claude format
|
|
68
|
+
messages = [msg.to_dict() for msg in regular_messages]
|
|
69
|
+
|
|
70
|
+
# Call Claude API with system as separate parameter
|
|
71
|
+
api_params = {
|
|
72
|
+
"model": self.model,
|
|
73
|
+
"max_tokens": request.max_tokens,
|
|
74
|
+
"temperature": request.temperature,
|
|
75
|
+
"messages": messages
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if system_content:
|
|
79
|
+
api_params["system"] = system_content
|
|
80
|
+
|
|
81
|
+
response = self.client.messages.create(**api_params)
|
|
82
|
+
|
|
83
|
+
# Calculate total tokens
|
|
84
|
+
input_tokens = response.usage.input_tokens
|
|
85
|
+
output_tokens = response.usage.output_tokens
|
|
86
|
+
total_tokens = input_tokens + output_tokens
|
|
87
|
+
|
|
88
|
+
return AIResponse(
|
|
89
|
+
content=response.content[0].text,
|
|
90
|
+
model=response.model,
|
|
91
|
+
usage={
|
|
92
|
+
"input_tokens": input_tokens,
|
|
93
|
+
"output_tokens": output_tokens,
|
|
94
|
+
"total_tokens": total_tokens
|
|
95
|
+
},
|
|
96
|
+
finish_reason=response.stop_reason
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
except Exception as e:
|
|
100
|
+
error_msg = str(e).lower()
|
|
101
|
+
|
|
102
|
+
if "authentication" in error_msg or "api key" in error_msg:
|
|
103
|
+
raise AIProviderAuthenticationError(
|
|
104
|
+
f"Anthropic authentication failed: {e}\n"
|
|
105
|
+
f"Check your API key via `titan ai configure`"
|
|
106
|
+
)
|
|
107
|
+
elif "rate limit" in error_msg:
|
|
108
|
+
raise AIProviderRateLimitError(
|
|
109
|
+
f"Anthropic rate limit exceeded: {e}\n"
|
|
110
|
+
f"Wait a moment and try again"
|
|
111
|
+
)
|
|
112
|
+
else:
|
|
113
|
+
raise AIProviderAPIError(f"Anthropic API error: {e}")
|
|
114
|
+
|
|
115
|
+
@property
|
|
116
|
+
def name(self) -> str:
|
|
117
|
+
return "anthropic"
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base AI provider interface
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
|
|
7
|
+
from ..models import AIRequest, AIResponse
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AIProvider(ABC):
|
|
11
|
+
"""
|
|
12
|
+
Base interface for AI providers.
|
|
13
|
+
|
|
14
|
+
Each provider implements how to interact with a specific AI API
|
|
15
|
+
(Claude, Gemini, OpenAI, etc.)
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, api_key: str, model: str):
|
|
19
|
+
"""
|
|
20
|
+
Initialize provider.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
api_key: API key for the provider
|
|
24
|
+
model: Model identifier (e.g., "claude-3-5-sonnet-20241022")
|
|
25
|
+
"""
|
|
26
|
+
self.api_key = api_key
|
|
27
|
+
self.model = model
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def generate(self, request: AIRequest) -> AIResponse:
|
|
31
|
+
"""
|
|
32
|
+
Generate response using the AI model.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
request: Request with messages and parameters
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Response with generated content
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
AIProviderError: If generation fails
|
|
42
|
+
"""
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
@abstractmethod
|
|
47
|
+
def name(self) -> str:
|
|
48
|
+
"""
|
|
49
|
+
Provider name (e.g., "claude", "gemini", "openai").
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Provider identifier
|
|
53
|
+
"""
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
def validate_api_key(self) -> bool:
|
|
57
|
+
"""
|
|
58
|
+
Validate that the API key works.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
True if API key is valid
|
|
62
|
+
|
|
63
|
+
Note: Default implementation tries a simple generation.
|
|
64
|
+
Providers can override for more efficient validation.
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
from ..models import AIMessage
|
|
68
|
+
test_request = AIRequest(
|
|
69
|
+
messages=[AIMessage(role="user", content="Hi")],
|
|
70
|
+
max_tokens=10
|
|
71
|
+
)
|
|
72
|
+
self.generate(test_request)
|
|
73
|
+
return True
|
|
74
|
+
except Exception:
|
|
75
|
+
return False
|
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""Gemini AI provider (Google)
|
|
2
|
+
|
|
3
|
+
Supports both API key and OAuth authentication via gcloud.
|
|
4
|
+
Also supports custom endpoints with Anthropic-compatible API format."""
|
|
5
|
+
|
|
6
|
+
from .base import AIProvider
|
|
7
|
+
from ..models import AIRequest, AIResponse, AIMessage
|
|
8
|
+
from ..exceptions import AIProviderAPIError
|
|
9
|
+
|
|
10
|
+
from ..constants import get_default_model
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
import google.genai as genai
|
|
14
|
+
import google.auth
|
|
15
|
+
from google.genai.types import GenerateContentConfig
|
|
16
|
+
GEMINI_AVAILABLE = True
|
|
17
|
+
GEMINI_IMPORT_ERROR = None
|
|
18
|
+
except ImportError as e:
|
|
19
|
+
GEMINI_AVAILABLE = False
|
|
20
|
+
GEMINI_IMPORT_ERROR = str(e)
|
|
21
|
+
|
|
22
|
+
# For custom endpoint support
|
|
23
|
+
import requests
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class GeminiProvider(AIProvider):
|
|
27
|
+
"""
|
|
28
|
+
Provider for Gemini API (Google).
|
|
29
|
+
|
|
30
|
+
Supports:
|
|
31
|
+
- API key authentication
|
|
32
|
+
- OAuth via gcloud (Application Default Credentials)
|
|
33
|
+
|
|
34
|
+
Requires:
|
|
35
|
+
- pip install google-genai google-auth
|
|
36
|
+
- API key from https://makersuite.google.com/app/apikey
|
|
37
|
+
- OR: gcloud auth application-default login
|
|
38
|
+
|
|
39
|
+
Usage:
|
|
40
|
+
# With API key
|
|
41
|
+
provider = GeminiProvider("AIza...", model="gemini-pro")
|
|
42
|
+
|
|
43
|
+
# With OAuth (gcloud)
|
|
44
|
+
provider = GeminiProvider("GCLOUD_OAUTH", model="gemini-pro")
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, api_key: str, model: str = get_default_model("gemini"), base_url: str = None):
|
|
48
|
+
super().__init__(api_key, model)
|
|
49
|
+
|
|
50
|
+
# Normalize base_url by removing trailing slash
|
|
51
|
+
self.base_url = base_url.rstrip('/') if base_url else None
|
|
52
|
+
self.use_custom_endpoint = bool(base_url)
|
|
53
|
+
|
|
54
|
+
# Check if using OAuth or API key
|
|
55
|
+
self.use_oauth = (api_key == "GCLOUD_OAUTH")
|
|
56
|
+
|
|
57
|
+
if self.use_custom_endpoint:
|
|
58
|
+
# Custom endpoint mode - use HTTP requests manually
|
|
59
|
+
# Corporate endpoint uses same API format as Anthropic
|
|
60
|
+
if self.use_oauth:
|
|
61
|
+
raise AIProviderAPIError(
|
|
62
|
+
"OAuth is not supported with custom endpoints. Please use an API key."
|
|
63
|
+
)
|
|
64
|
+
# No additional setup needed, will use requests directly
|
|
65
|
+
else:
|
|
66
|
+
# Standard Google Gemini endpoint - use google-genai library
|
|
67
|
+
if not GEMINI_AVAILABLE:
|
|
68
|
+
error_msg = "google-genai not installed.\n"
|
|
69
|
+
if GEMINI_IMPORT_ERROR:
|
|
70
|
+
error_msg += f"Import error: {GEMINI_IMPORT_ERROR}\n"
|
|
71
|
+
error_msg += "Install with: poetry add google-genai google-auth"
|
|
72
|
+
raise AIProviderAPIError(error_msg)
|
|
73
|
+
|
|
74
|
+
if self.use_oauth:
|
|
75
|
+
# Use Application Default Credentials with Client, assuming Vertex AI context for OAuth
|
|
76
|
+
try:
|
|
77
|
+
google.auth.default() # This is for ADC
|
|
78
|
+
self._genai_client = genai.Client(vertexai=True)
|
|
79
|
+
except Exception as e:
|
|
80
|
+
raise AIProviderAPIError(
|
|
81
|
+
f"Failed to get Google Cloud credentials for Vertex AI: {e}\n"
|
|
82
|
+
"Run: gcloud auth application-default login"
|
|
83
|
+
)
|
|
84
|
+
else:
|
|
85
|
+
# Use API key with Client for official Google endpoint
|
|
86
|
+
self._genai_client = genai.Client(api_key=api_key)
|
|
87
|
+
|
|
88
|
+
def generate(self, request: AIRequest) -> AIResponse:
|
|
89
|
+
"""
|
|
90
|
+
Generate response using Gemini API
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
request: AI request with messages
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
AI response
|
|
97
|
+
|
|
98
|
+
Raises:
|
|
99
|
+
AIProviderAPIError: If generation fails
|
|
100
|
+
"""
|
|
101
|
+
if self.use_custom_endpoint:
|
|
102
|
+
return self._generate_custom_endpoint(request)
|
|
103
|
+
else:
|
|
104
|
+
return self._generate_google_endpoint(request)
|
|
105
|
+
|
|
106
|
+
def _generate_custom_endpoint(self, request: AIRequest) -> AIResponse:
|
|
107
|
+
"""
|
|
108
|
+
Generate using custom corporate endpoint.
|
|
109
|
+
Uses same API format as Anthropic (messages API).
|
|
110
|
+
"""
|
|
111
|
+
try:
|
|
112
|
+
# Separate system messages from regular messages
|
|
113
|
+
system_messages = [msg for msg in request.messages if msg.role == "system"]
|
|
114
|
+
regular_messages = [msg for msg in request.messages if msg.role != "system"]
|
|
115
|
+
|
|
116
|
+
# Build system parameter
|
|
117
|
+
system_content = "\n\n".join(msg.content for msg in system_messages) if system_messages else None
|
|
118
|
+
|
|
119
|
+
# Convert messages to API format
|
|
120
|
+
messages = [{"role": msg.role, "content": msg.content} for msg in regular_messages]
|
|
121
|
+
|
|
122
|
+
# Build request payload
|
|
123
|
+
payload = {
|
|
124
|
+
"model": self.model,
|
|
125
|
+
"max_tokens": request.max_tokens or 4096,
|
|
126
|
+
"messages": messages
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
if system_content:
|
|
130
|
+
payload["system"] = system_content
|
|
131
|
+
|
|
132
|
+
if request.temperature is not None:
|
|
133
|
+
payload["temperature"] = request.temperature
|
|
134
|
+
|
|
135
|
+
# Build headers
|
|
136
|
+
headers = {
|
|
137
|
+
"x-api-key": self.api_key,
|
|
138
|
+
"anthropic-version": "2023-06-01",
|
|
139
|
+
"content-type": "application/json"
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
# Make HTTP request
|
|
143
|
+
response = requests.post(
|
|
144
|
+
f"{self.base_url}/v1/messages",
|
|
145
|
+
headers=headers,
|
|
146
|
+
json=payload,
|
|
147
|
+
timeout=120
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if response.status_code != 200:
|
|
151
|
+
raise AIProviderAPIError(
|
|
152
|
+
f"Custom endpoint error: {response.status_code} - {response.text}"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
data = response.json()
|
|
156
|
+
|
|
157
|
+
# Extract response content (Anthropic format)
|
|
158
|
+
content = ""
|
|
159
|
+
if "content" in data and len(data["content"]) > 0:
|
|
160
|
+
content = data["content"][0].get("text", "")
|
|
161
|
+
|
|
162
|
+
# Handle empty content (e.g., max_tokens reached)
|
|
163
|
+
if not content and data.get("stop_reason") == "max_tokens":
|
|
164
|
+
raise AIProviderAPIError(
|
|
165
|
+
"Response truncated due to max_tokens limit. Increase max_tokens in request."
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Extract usage
|
|
169
|
+
usage_data = {}
|
|
170
|
+
if "usage" in data:
|
|
171
|
+
usage_data = {
|
|
172
|
+
"input_tokens": data["usage"].get("input_tokens", 0),
|
|
173
|
+
"output_tokens": data["usage"].get("output_tokens", 0),
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
return AIResponse(
|
|
177
|
+
content=content,
|
|
178
|
+
model=data.get("model", self.model),
|
|
179
|
+
usage=usage_data,
|
|
180
|
+
finish_reason=data.get("stop_reason", "stop")
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
except requests.exceptions.RequestException as e:
|
|
184
|
+
raise AIProviderAPIError(f"Custom endpoint request failed: {e}")
|
|
185
|
+
except Exception as e:
|
|
186
|
+
raise AIProviderAPIError(f"Gemini API error: {e}")
|
|
187
|
+
|
|
188
|
+
def _generate_google_endpoint(self, request: AIRequest) -> AIResponse:
|
|
189
|
+
"""Generate using official Google Gemini endpoint"""
|
|
190
|
+
try:
|
|
191
|
+
# Convert messages to Gemini format
|
|
192
|
+
gemini_messages = self._convert_messages(request.messages)
|
|
193
|
+
|
|
194
|
+
# Prepare generation config
|
|
195
|
+
config = GenerateContentConfig(
|
|
196
|
+
temperature=request.temperature,
|
|
197
|
+
maxOutputTokens=request.max_tokens
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Generate response
|
|
201
|
+
if len(gemini_messages) == 1 and gemini_messages[0].get("role") == "user":
|
|
202
|
+
# Single message - use generate_content
|
|
203
|
+
response = self._genai_client.models.generate_content(
|
|
204
|
+
model=self.model,
|
|
205
|
+
contents=gemini_messages[0]["parts"],
|
|
206
|
+
config=config
|
|
207
|
+
)
|
|
208
|
+
else:
|
|
209
|
+
# Multiple messages - use chat
|
|
210
|
+
chat_session = self._genai_client.chats.create(
|
|
211
|
+
model=self.model,
|
|
212
|
+
history=gemini_messages[:-1] if len(gemini_messages) > 1 else [],
|
|
213
|
+
config=config
|
|
214
|
+
)
|
|
215
|
+
response = chat_session.send_message(
|
|
216
|
+
gemini_messages[-1]["parts"]
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Extract text
|
|
220
|
+
text = response.text
|
|
221
|
+
|
|
222
|
+
# Extract usage data if available
|
|
223
|
+
usage_data = {}
|
|
224
|
+
if hasattr(response, 'usage_metadata'):
|
|
225
|
+
usage_data = {
|
|
226
|
+
"input_tokens": response.usage_metadata.prompt_token_count,
|
|
227
|
+
"output_tokens": response.usage_metadata.candidates_token_count,
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return AIResponse(
|
|
231
|
+
content=text,
|
|
232
|
+
model=self.model,
|
|
233
|
+
usage=usage_data,
|
|
234
|
+
finish_reason="stop" # Not easily available in all cases
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
raise AIProviderAPIError(f"Gemini API error: {e}")
|
|
239
|
+
|
|
240
|
+
def _convert_messages(self, messages: list[AIMessage]) -> list[dict]:
|
|
241
|
+
"""
|
|
242
|
+
Convert AIMessage format to Gemini format
|
|
243
|
+
|
|
244
|
+
Gemini uses:
|
|
245
|
+
- role: "user" or "model" (not "assistant")
|
|
246
|
+
- parts: list of text content
|
|
247
|
+
|
|
248
|
+
System messages are prepended to the first user message
|
|
249
|
+
"""
|
|
250
|
+
gemini_messages = []
|
|
251
|
+
system_context = ""
|
|
252
|
+
|
|
253
|
+
for msg in messages:
|
|
254
|
+
if msg.role == "system":
|
|
255
|
+
# Accumulate system messages
|
|
256
|
+
system_context += msg.content + "\n\n"
|
|
257
|
+
elif msg.role == "user":
|
|
258
|
+
content = msg.content
|
|
259
|
+
if system_context:
|
|
260
|
+
# Prepend system context to first user message
|
|
261
|
+
content = f"{system_context}{content}"
|
|
262
|
+
system_context = ""
|
|
263
|
+
|
|
264
|
+
gemini_messages.append({
|
|
265
|
+
"role": "user",
|
|
266
|
+
"parts": [content]
|
|
267
|
+
})
|
|
268
|
+
elif msg.role == "assistant":
|
|
269
|
+
gemini_messages.append({
|
|
270
|
+
"role": "model", # Gemini uses "model" instead of "assistant"
|
|
271
|
+
"parts": [msg.content]
|
|
272
|
+
})
|
|
273
|
+
|
|
274
|
+
return gemini_messages
|
|
275
|
+
|
|
276
|
+
@property
|
|
277
|
+
def name(self) -> str:
|
|
278
|
+
return "gemini"
|
titan_cli/cli.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Titan CLI - Main CLI application
|
|
3
|
+
|
|
4
|
+
Combines all tool commands into a single CLI interface.
|
|
5
|
+
"""
|
|
6
|
+
import typer
|
|
7
|
+
|
|
8
|
+
from titan_cli import __version__
|
|
9
|
+
from titan_cli.messages import msg
|
|
10
|
+
from titan_cli.ui.tui import launch_tui
|
|
11
|
+
from titan_cli.utils.autoupdate import check_for_updates, get_update_message
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Main Typer Application
|
|
16
|
+
app = typer.Typer(
|
|
17
|
+
name=msg.CLI.APP_NAME,
|
|
18
|
+
help=msg.CLI.APP_DESCRIPTION,
|
|
19
|
+
invoke_without_command=True,
|
|
20
|
+
no_args_is_help=False,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# --- Helper function for version retrieval ---
|
|
25
|
+
def get_version() -> str:
|
|
26
|
+
"""Retrieves the package version."""
|
|
27
|
+
return __version__
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@app.callback()
|
|
31
|
+
def main(ctx: typer.Context):
|
|
32
|
+
"""Titan CLI - Main entry point"""
|
|
33
|
+
if ctx.invoked_subcommand is None:
|
|
34
|
+
# Check for updates (non-blocking, silent on errors)
|
|
35
|
+
try:
|
|
36
|
+
update_info = check_for_updates()
|
|
37
|
+
message = get_update_message(update_info)
|
|
38
|
+
if message:
|
|
39
|
+
typer.echo(message)
|
|
40
|
+
typer.echo() # Empty line for spacing
|
|
41
|
+
except Exception:
|
|
42
|
+
# Silently ignore update check failures
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
# Launch TUI by default
|
|
46
|
+
launch_tui()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@app.command()
|
|
50
|
+
def version():
|
|
51
|
+
"""Show Titan CLI version."""
|
|
52
|
+
cli_version = get_version()
|
|
53
|
+
typer.echo(msg.CLI.VERSION.format(version=cli_version))
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@app.command()
|
|
57
|
+
def tui():
|
|
58
|
+
"""Launch Titan in TUI mode (Textual interface)."""
|
|
59
|
+
launch_tui()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# clients/__init__.py
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# clients/gcloud_client.py
|
|
2
|
+
import subprocess
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
class GCloudClientError(Exception):
|
|
6
|
+
"""Custom exception for GCloudClient errors."""
|
|
7
|
+
pass
|
|
8
|
+
|
|
9
|
+
class GCloudClient:
|
|
10
|
+
"""A wrapper for interacting with the gcloud CLI."""
|
|
11
|
+
|
|
12
|
+
def is_installed(self) -> bool:
|
|
13
|
+
"""Check if the gcloud CLI is installed and available in the system's PATH."""
|
|
14
|
+
try:
|
|
15
|
+
result = subprocess.run(
|
|
16
|
+
["gcloud", "--version"],
|
|
17
|
+
capture_output=True,
|
|
18
|
+
text=True,
|
|
19
|
+
timeout=5,
|
|
20
|
+
check=False # Don't raise CalledProcessError on non-zero exit
|
|
21
|
+
)
|
|
22
|
+
return result.returncode == 0
|
|
23
|
+
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
24
|
+
return False
|
|
25
|
+
|
|
26
|
+
def get_active_account(self) -> Optional[str]:
|
|
27
|
+
"""
|
|
28
|
+
Retrieves the active, authenticated gcloud account.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
The account email (str) if authenticated, otherwise None.
|
|
32
|
+
|
|
33
|
+
Raises:
|
|
34
|
+
GCloudClientError: If the gcloud command fails.
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
result = subprocess.run(
|
|
38
|
+
["gcloud", "auth", "list", "--filter=status:ACTIVE", "--format=value(account)"],
|
|
39
|
+
capture_output=True,
|
|
40
|
+
text=True,
|
|
41
|
+
timeout=5,
|
|
42
|
+
check=True # Raise CalledProcessError on non-zero exit
|
|
43
|
+
)
|
|
44
|
+
account = result.stdout.strip()
|
|
45
|
+
return account if account else None
|
|
46
|
+
except FileNotFoundError:
|
|
47
|
+
# This case is handled by is_installed, but included for robustness
|
|
48
|
+
raise GCloudClientError("gcloud CLI not found.")
|
|
49
|
+
except subprocess.CalledProcessError as e:
|
|
50
|
+
raise GCloudClientError(f"gcloud command failed: {e.stderr}")
|
|
51
|
+
except subprocess.TimeoutExpired:
|
|
52
|
+
raise GCloudClientError("gcloud command timed out.")
|