titan-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- titan_cli/__init__.py +3 -0
- titan_cli/__main__.py +4 -0
- titan_cli/ai/__init__.py +0 -0
- titan_cli/ai/agents/__init__.py +15 -0
- titan_cli/ai/agents/base.py +152 -0
- titan_cli/ai/client.py +170 -0
- titan_cli/ai/constants.py +56 -0
- titan_cli/ai/exceptions.py +48 -0
- titan_cli/ai/models.py +34 -0
- titan_cli/ai/oauth_helper.py +120 -0
- titan_cli/ai/providers/__init__.py +9 -0
- titan_cli/ai/providers/anthropic.py +117 -0
- titan_cli/ai/providers/base.py +75 -0
- titan_cli/ai/providers/gemini.py +278 -0
- titan_cli/cli.py +59 -0
- titan_cli/clients/__init__.py +1 -0
- titan_cli/clients/gcloud_client.py +52 -0
- titan_cli/core/__init__.py +3 -0
- titan_cli/core/config.py +274 -0
- titan_cli/core/discovery.py +51 -0
- titan_cli/core/errors.py +81 -0
- titan_cli/core/models.py +52 -0
- titan_cli/core/plugins/available.py +36 -0
- titan_cli/core/plugins/models.py +67 -0
- titan_cli/core/plugins/plugin_base.py +108 -0
- titan_cli/core/plugins/plugin_registry.py +163 -0
- titan_cli/core/secrets.py +141 -0
- titan_cli/core/workflows/__init__.py +22 -0
- titan_cli/core/workflows/models.py +88 -0
- titan_cli/core/workflows/project_step_source.py +86 -0
- titan_cli/core/workflows/workflow_exceptions.py +17 -0
- titan_cli/core/workflows/workflow_filter_service.py +137 -0
- titan_cli/core/workflows/workflow_registry.py +419 -0
- titan_cli/core/workflows/workflow_sources.py +307 -0
- titan_cli/engine/__init__.py +39 -0
- titan_cli/engine/builder.py +159 -0
- titan_cli/engine/context.py +82 -0
- titan_cli/engine/mock_context.py +176 -0
- titan_cli/engine/results.py +91 -0
- titan_cli/engine/steps/ai_assistant_step.py +185 -0
- titan_cli/engine/steps/command_step.py +93 -0
- titan_cli/engine/utils/__init__.py +3 -0
- titan_cli/engine/utils/venv.py +31 -0
- titan_cli/engine/workflow_executor.py +187 -0
- titan_cli/external_cli/__init__.py +0 -0
- titan_cli/external_cli/configs.py +17 -0
- titan_cli/external_cli/launcher.py +65 -0
- titan_cli/messages.py +121 -0
- titan_cli/ui/tui/__init__.py +205 -0
- titan_cli/ui/tui/__previews__/statusbar_preview.py +88 -0
- titan_cli/ui/tui/app.py +113 -0
- titan_cli/ui/tui/icons.py +70 -0
- titan_cli/ui/tui/screens/__init__.py +24 -0
- titan_cli/ui/tui/screens/ai_config.py +498 -0
- titan_cli/ui/tui/screens/ai_config_wizard.py +882 -0
- titan_cli/ui/tui/screens/base.py +110 -0
- titan_cli/ui/tui/screens/cli_launcher.py +151 -0
- titan_cli/ui/tui/screens/global_setup_wizard.py +363 -0
- titan_cli/ui/tui/screens/main_menu.py +162 -0
- titan_cli/ui/tui/screens/plugin_config_wizard.py +550 -0
- titan_cli/ui/tui/screens/plugin_management.py +377 -0
- titan_cli/ui/tui/screens/project_setup_wizard.py +686 -0
- titan_cli/ui/tui/screens/workflow_execution.py +592 -0
- titan_cli/ui/tui/screens/workflows.py +249 -0
- titan_cli/ui/tui/textual_components.py +537 -0
- titan_cli/ui/tui/textual_workflow_executor.py +405 -0
- titan_cli/ui/tui/theme.py +102 -0
- titan_cli/ui/tui/widgets/__init__.py +40 -0
- titan_cli/ui/tui/widgets/button.py +108 -0
- titan_cli/ui/tui/widgets/header.py +116 -0
- titan_cli/ui/tui/widgets/panel.py +81 -0
- titan_cli/ui/tui/widgets/status_bar.py +115 -0
- titan_cli/ui/tui/widgets/table.py +77 -0
- titan_cli/ui/tui/widgets/text.py +177 -0
- titan_cli/utils/__init__.py +0 -0
- titan_cli/utils/autoupdate.py +155 -0
- titan_cli-0.1.0.dist-info/METADATA +149 -0
- titan_cli-0.1.0.dist-info/RECORD +146 -0
- titan_cli-0.1.0.dist-info/WHEEL +4 -0
- titan_cli-0.1.0.dist-info/entry_points.txt +9 -0
- titan_cli-0.1.0.dist-info/licenses/LICENSE +201 -0
- titan_plugin_git/__init__.py +1 -0
- titan_plugin_git/clients/__init__.py +8 -0
- titan_plugin_git/clients/git_client.py +772 -0
- titan_plugin_git/exceptions.py +40 -0
- titan_plugin_git/messages.py +112 -0
- titan_plugin_git/models.py +39 -0
- titan_plugin_git/plugin.py +118 -0
- titan_plugin_git/steps/__init__.py +1 -0
- titan_plugin_git/steps/ai_commit_message_step.py +171 -0
- titan_plugin_git/steps/branch_steps.py +104 -0
- titan_plugin_git/steps/commit_step.py +80 -0
- titan_plugin_git/steps/push_step.py +63 -0
- titan_plugin_git/steps/status_step.py +59 -0
- titan_plugin_git/workflows/__previews__/__init__.py +1 -0
- titan_plugin_git/workflows/__previews__/commit_ai_preview.py +124 -0
- titan_plugin_git/workflows/commit-ai.yaml +28 -0
- titan_plugin_github/__init__.py +11 -0
- titan_plugin_github/agents/__init__.py +6 -0
- titan_plugin_github/agents/config_loader.py +130 -0
- titan_plugin_github/agents/issue_generator.py +353 -0
- titan_plugin_github/agents/pr_agent.py +528 -0
- titan_plugin_github/clients/__init__.py +8 -0
- titan_plugin_github/clients/github_client.py +1105 -0
- titan_plugin_github/config/__init__.py +0 -0
- titan_plugin_github/config/pr_agent.toml +85 -0
- titan_plugin_github/exceptions.py +28 -0
- titan_plugin_github/messages.py +88 -0
- titan_plugin_github/models.py +330 -0
- titan_plugin_github/plugin.py +131 -0
- titan_plugin_github/steps/__init__.py +12 -0
- titan_plugin_github/steps/ai_pr_step.py +172 -0
- titan_plugin_github/steps/create_pr_step.py +86 -0
- titan_plugin_github/steps/github_prompt_steps.py +171 -0
- titan_plugin_github/steps/issue_steps.py +143 -0
- titan_plugin_github/steps/preview_step.py +40 -0
- titan_plugin_github/utils.py +82 -0
- titan_plugin_github/workflows/__previews__/__init__.py +1 -0
- titan_plugin_github/workflows/__previews__/create_pr_ai_preview.py +140 -0
- titan_plugin_github/workflows/create-issue-ai.yaml +32 -0
- titan_plugin_github/workflows/create-pr-ai.yaml +49 -0
- titan_plugin_jira/__init__.py +8 -0
- titan_plugin_jira/agents/__init__.py +6 -0
- titan_plugin_jira/agents/config_loader.py +154 -0
- titan_plugin_jira/agents/jira_agent.py +553 -0
- titan_plugin_jira/agents/prompts.py +364 -0
- titan_plugin_jira/agents/response_parser.py +435 -0
- titan_plugin_jira/agents/token_tracker.py +223 -0
- titan_plugin_jira/agents/validators.py +246 -0
- titan_plugin_jira/clients/jira_client.py +745 -0
- titan_plugin_jira/config/jira_agent.toml +92 -0
- titan_plugin_jira/config/templates/issue_analysis.md.j2 +78 -0
- titan_plugin_jira/exceptions.py +37 -0
- titan_plugin_jira/formatters/__init__.py +6 -0
- titan_plugin_jira/formatters/markdown_formatter.py +245 -0
- titan_plugin_jira/messages.py +115 -0
- titan_plugin_jira/models.py +89 -0
- titan_plugin_jira/plugin.py +264 -0
- titan_plugin_jira/steps/ai_analyze_issue_step.py +105 -0
- titan_plugin_jira/steps/get_issue_step.py +82 -0
- titan_plugin_jira/steps/prompt_select_issue_step.py +80 -0
- titan_plugin_jira/steps/search_saved_query_step.py +238 -0
- titan_plugin_jira/utils/__init__.py +13 -0
- titan_plugin_jira/utils/issue_sorter.py +140 -0
- titan_plugin_jira/utils/saved_queries.py +150 -0
- titan_plugin_jira/workflows/analyze-jira-issues.yaml +34 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mock Clients for Workflow Previews
|
|
3
|
+
|
|
4
|
+
Provides mock implementations of clients (Git, AI, GitHub) that can be used
|
|
5
|
+
by workflow previews to execute real step functions with fake data.
|
|
6
|
+
|
|
7
|
+
Each preview should create its own mock context with customized data.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Optional
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class MockGitStatus:
|
|
16
|
+
"""Mock git status for previews"""
|
|
17
|
+
is_clean: bool = False
|
|
18
|
+
modified_files: list = None
|
|
19
|
+
untracked_files: list = None
|
|
20
|
+
staged_files: list = None
|
|
21
|
+
|
|
22
|
+
def __post_init__(self):
|
|
23
|
+
if self.modified_files is None:
|
|
24
|
+
self.modified_files = ["cli.py", "messages.py"]
|
|
25
|
+
if self.untracked_files is None:
|
|
26
|
+
self.untracked_files = ["preview.py"]
|
|
27
|
+
if self.staged_files is None:
|
|
28
|
+
self.staged_files = []
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class MockGitClient:
|
|
32
|
+
"""Mock GitClient for previews"""
|
|
33
|
+
|
|
34
|
+
def __init__(self):
|
|
35
|
+
self.main_branch = "master"
|
|
36
|
+
self.current_branch = "feat/workflow-preview"
|
|
37
|
+
self.default_remote = "origin"
|
|
38
|
+
|
|
39
|
+
def get_status(self):
|
|
40
|
+
return MockGitStatus(is_clean=False)
|
|
41
|
+
|
|
42
|
+
def get_current_branch(self) -> str:
|
|
43
|
+
return self.current_branch
|
|
44
|
+
|
|
45
|
+
def branch_exists_on_remote(self, branch: str, remote: str = "origin") -> bool:
|
|
46
|
+
# Mock: always return False to trigger set_upstream
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
def get_uncommitted_diff(self) -> str:
|
|
50
|
+
return """diff --git a/cli.py b/cli.py
|
|
51
|
+
index abc123..def456 100644
|
|
52
|
+
--- a/cli.py
|
|
53
|
+
+++ b/cli.py
|
|
54
|
+
@@ -1,3 +1,5 @@
|
|
55
|
+
+from titan_cli.preview import preview_workflow
|
|
56
|
+
+
|
|
57
|
+
def main():
|
|
58
|
+
- pass
|
|
59
|
+
+ # Added preview functionality
|
|
60
|
+
+ preview_workflow()"""
|
|
61
|
+
|
|
62
|
+
def get_branch_diff(self, base: str, head: str) -> str:
|
|
63
|
+
return """diff --git a/preview.py b/preview.py
|
|
64
|
+
new file mode 100644
|
|
65
|
+
index 0000000..abc123
|
|
66
|
+
--- /dev/null
|
|
67
|
+
+++ b/preview.py
|
|
68
|
+
@@ -0,0 +1,50 @@
|
|
69
|
+
+# Preview system for workflows
|
|
70
|
+
+def preview_workflow(name):
|
|
71
|
+
+ pass"""
|
|
72
|
+
|
|
73
|
+
def get_branch_commits(self, base: str, head: str) -> list[str]:
|
|
74
|
+
return [
|
|
75
|
+
"feat(workflows): add preview system",
|
|
76
|
+
"feat(ui): improve panel rendering",
|
|
77
|
+
"fix(git): handle uncommitted changes"
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
def commit(self, message: str, all: bool = True) -> str:
|
|
81
|
+
return "abc1234567890"
|
|
82
|
+
|
|
83
|
+
def push(self, remote: str = "origin", branch: Optional[str] = None, set_upstream: bool = False):
|
|
84
|
+
return True
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class MockAIResponse:
|
|
88
|
+
"""Mock AI response"""
|
|
89
|
+
def __init__(self, content: str):
|
|
90
|
+
self.content = content
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class MockAIClient:
|
|
94
|
+
"""Mock AIClient for previews"""
|
|
95
|
+
|
|
96
|
+
def is_available(self) -> bool:
|
|
97
|
+
return True
|
|
98
|
+
|
|
99
|
+
def generate(self, messages, max_tokens: int = 1000, temperature: float = 0.7):
|
|
100
|
+
# Extract the prompt
|
|
101
|
+
prompt = messages[0].content if messages else ""
|
|
102
|
+
|
|
103
|
+
# Generate different responses based on context
|
|
104
|
+
if "commit message" in prompt.lower():
|
|
105
|
+
return MockAIResponse(
|
|
106
|
+
"feat(workflows): add preview system for workflows with mocked data"
|
|
107
|
+
)
|
|
108
|
+
elif "pull request" in prompt.lower() or "pr" in prompt.lower():
|
|
109
|
+
return MockAIResponse("""TITLE: feat(workflows): Add preview system for testing workflow UI
|
|
110
|
+
|
|
111
|
+
DESCRIPTION:
|
|
112
|
+
## Summary
|
|
113
|
+
- Added `titan preview workflow <name>` command
|
|
114
|
+
- Created mock context system for workflow previews
|
|
115
|
+
- Implemented preview for create-pr-ai workflow
|
|
116
|
+
|
|
117
|
+
## Type of Change
|
|
118
|
+
- [x] New feature (non-breaking)
|
|
119
|
+
- [ ] Bug fix
|
|
120
|
+
- [ ] Breaking change
|
|
121
|
+
|
|
122
|
+
## Testing
|
|
123
|
+
- [x] Tested preview command with create-pr-ai
|
|
124
|
+
- [x] Verified mocked data displays correctly""")
|
|
125
|
+
else:
|
|
126
|
+
return MockAIResponse("Mocked AI response")
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class MockGitHubClient:
|
|
130
|
+
"""Mock GitHubClient for previews"""
|
|
131
|
+
|
|
132
|
+
def __init__(self):
|
|
133
|
+
self.repo_owner = "mockuser"
|
|
134
|
+
self.repo_name = "mock-repo"
|
|
135
|
+
|
|
136
|
+
def create_pull_request(self, title: str, body: str, head: str, base: str, draft: bool = False):
|
|
137
|
+
"""Mock create PR - returns fake PR data"""
|
|
138
|
+
return {
|
|
139
|
+
"number": 123,
|
|
140
|
+
"url": "https://github.com/mockuser/mock-repo/pull/123",
|
|
141
|
+
"title": title,
|
|
142
|
+
"body": body,
|
|
143
|
+
"head": head,
|
|
144
|
+
"base": base,
|
|
145
|
+
"draft": draft
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class MockSecretManager:
|
|
150
|
+
"""Mock SecretManager for previews"""
|
|
151
|
+
|
|
152
|
+
def __init__(self, project_path=None):
|
|
153
|
+
self.project_path = project_path
|
|
154
|
+
|
|
155
|
+
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
|
|
156
|
+
"""Mock get secret - returns fake values"""
|
|
157
|
+
mock_secrets = {
|
|
158
|
+
"github_token": "ghp_mocktoken123",
|
|
159
|
+
"anthropic_api_key": "sk-ant-mock123",
|
|
160
|
+
}
|
|
161
|
+
return mock_secrets.get(key, default)
|
|
162
|
+
|
|
163
|
+
def set(self, key: str, value: str) -> None:
|
|
164
|
+
"""Mock set secret - does nothing"""
|
|
165
|
+
pass
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# Export all mock classes for use in previews
|
|
169
|
+
__all__ = [
|
|
170
|
+
"MockGitStatus",
|
|
171
|
+
"MockGitClient",
|
|
172
|
+
"MockAIResponse",
|
|
173
|
+
"MockAIClient",
|
|
174
|
+
"MockGitHubClient",
|
|
175
|
+
"MockSecretManager",
|
|
176
|
+
]
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow result types for atomic steps.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Optional, Union
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass(frozen=True)
|
|
10
|
+
class Success:
|
|
11
|
+
"""
|
|
12
|
+
Step completed successfully.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
message: Success message (optional)
|
|
16
|
+
metadata: Metadata to auto-merge into ctx.data
|
|
17
|
+
|
|
18
|
+
Examples:
|
|
19
|
+
>>> return Success("User validated")
|
|
20
|
+
>>> return Success("PR created", metadata={"pr_number": 123})
|
|
21
|
+
"""
|
|
22
|
+
message: str = ""
|
|
23
|
+
metadata: Optional[dict[str, Any]] = None
|
|
24
|
+
|
|
25
|
+
@dataclass(frozen=True)
|
|
26
|
+
class Error:
|
|
27
|
+
"""
|
|
28
|
+
Step failed with an error.
|
|
29
|
+
|
|
30
|
+
Attributes:
|
|
31
|
+
message: Error message (required)
|
|
32
|
+
code: Error code (default: 1)
|
|
33
|
+
exception: Original exception if available
|
|
34
|
+
recoverable: Whether error can be recovered from
|
|
35
|
+
|
|
36
|
+
Examples:
|
|
37
|
+
>>> return Error("GitHub not available")
|
|
38
|
+
>>> return Error("API rate limit", code=429, recoverable=True)
|
|
39
|
+
>>> return Error("Connection failed", exception=exc)
|
|
40
|
+
"""
|
|
41
|
+
message: str
|
|
42
|
+
code: int = 1
|
|
43
|
+
exception: Optional[Exception] = None
|
|
44
|
+
recoverable: bool = False
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass(frozen=True)
|
|
48
|
+
class Skip:
|
|
49
|
+
"""
|
|
50
|
+
Step was skipped (not applicable).
|
|
51
|
+
|
|
52
|
+
Use when a step doesn't need to run:
|
|
53
|
+
- Optional tool not configured
|
|
54
|
+
- Condition not met
|
|
55
|
+
- User chose to skip
|
|
56
|
+
|
|
57
|
+
Attributes:
|
|
58
|
+
message: Why the step was skipped (required)
|
|
59
|
+
metadata: Metadata to auto-merge into ctx.data
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
>>> if not ctx.ai:
|
|
63
|
+
>>> return Skip("AI not configured")
|
|
64
|
+
>>> return Skip("No changes detected", metadata={"clean": True})
|
|
65
|
+
>>> return Skip("PR title already provided")
|
|
66
|
+
"""
|
|
67
|
+
message: str
|
|
68
|
+
metadata: Optional[dict[str, Any]] = None
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# Type alias for workflow results
|
|
72
|
+
WorkflowResult = Union[Success, Error, Skip]
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# ============================================================================
|
|
76
|
+
# Helper functions for type checking
|
|
77
|
+
# ============================================================================
|
|
78
|
+
|
|
79
|
+
def is_success(result: WorkflowResult) -> bool:
|
|
80
|
+
"""Check if result is Success."""
|
|
81
|
+
return isinstance(result, Success)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def is_error(result: WorkflowResult) -> bool:
|
|
85
|
+
"""Check if result is Error."""
|
|
86
|
+
return isinstance(result, Error)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def is_skip(result: WorkflowResult) -> bool:
|
|
90
|
+
"""Check if result is Skip."""
|
|
91
|
+
return isinstance(result, Skip)
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI Code Assistant Step
|
|
3
|
+
|
|
4
|
+
Generic step that launches an AI coding assistant CLI (like Claude Code)
|
|
5
|
+
with context from previous workflow steps.
|
|
6
|
+
|
|
7
|
+
Can be used after linting, testing, builds, or any step that produces
|
|
8
|
+
errors or context that could benefit from AI assistance.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
|
|
13
|
+
from titan_cli.core.workflows.models import WorkflowStepModel
|
|
14
|
+
from titan_cli.engine.context import WorkflowContext
|
|
15
|
+
from titan_cli.engine.results import Success, Error, Skip, WorkflowResult
|
|
16
|
+
from titan_cli.external_cli.launcher import CLILauncher
|
|
17
|
+
from titan_cli.external_cli.configs import CLI_REGISTRY
|
|
18
|
+
from titan_cli.messages import msg
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def execute_ai_assistant_step(step: WorkflowStepModel, ctx: WorkflowContext) -> WorkflowResult:
|
|
22
|
+
"""
|
|
23
|
+
Launch AI coding assistant with context from workflow.
|
|
24
|
+
|
|
25
|
+
Parameters (in step.params):
|
|
26
|
+
context_key: str - Key in ctx.data to read context from
|
|
27
|
+
prompt_template: str - Template for the prompt (use {context} placeholder)
|
|
28
|
+
ask_confirmation: bool - Whether to ask user before launching (default: True)
|
|
29
|
+
fail_on_decline: bool - If True, return Error when user declines (default: False)
|
|
30
|
+
cli_preference: str - Which CLI to use: "claude", "gemini", "auto" (default: "auto")
|
|
31
|
+
|
|
32
|
+
Example workflow usage:
|
|
33
|
+
- id: ai-help
|
|
34
|
+
plugin: core
|
|
35
|
+
step: ai_code_assistant
|
|
36
|
+
params:
|
|
37
|
+
context_key: "test_failures"
|
|
38
|
+
prompt_template: "Help me fix these test failures:\n{context}"
|
|
39
|
+
ask_confirmation: true
|
|
40
|
+
fail_on_decline: true
|
|
41
|
+
on_error: fail
|
|
42
|
+
"""
|
|
43
|
+
if not ctx.textual:
|
|
44
|
+
return Error(msg.AIAssistant.UI_CONTEXT_NOT_AVAILABLE)
|
|
45
|
+
|
|
46
|
+
# Get parameters
|
|
47
|
+
context_key = step.params.get("context_key")
|
|
48
|
+
prompt_template = step.params.get("prompt_template", "{context}")
|
|
49
|
+
ask_confirmation = step.params.get("ask_confirmation", True)
|
|
50
|
+
fail_on_decline = step.params.get("fail_on_decline", False)
|
|
51
|
+
cli_preference = step.params.get("cli_preference", "auto")
|
|
52
|
+
|
|
53
|
+
# Validate cli_preference
|
|
54
|
+
VALID_CLI_PREFERENCES = {"auto", "claude", "gemini"}
|
|
55
|
+
if cli_preference not in VALID_CLI_PREFERENCES:
|
|
56
|
+
return Error(f"Invalid cli_preference: {cli_preference}. Must be one of {VALID_CLI_PREFERENCES}")
|
|
57
|
+
|
|
58
|
+
# Validate required parameters
|
|
59
|
+
if not context_key:
|
|
60
|
+
return Error(msg.AIAssistant.CONTEXT_KEY_REQUIRED)
|
|
61
|
+
|
|
62
|
+
# Get context data
|
|
63
|
+
context_data = ctx.data.get(context_key)
|
|
64
|
+
if not context_data:
|
|
65
|
+
# No context to work with - skip silently
|
|
66
|
+
return Skip(msg.AIAssistant.NO_DATA_IN_CONTEXT.format(context_key=context_key))
|
|
67
|
+
|
|
68
|
+
# Clear the context data immediately to prevent contamination of subsequent steps
|
|
69
|
+
if context_key in ctx.data:
|
|
70
|
+
del ctx.data[context_key]
|
|
71
|
+
|
|
72
|
+
# Build the prompt
|
|
73
|
+
try:
|
|
74
|
+
if isinstance(context_data, str):
|
|
75
|
+
prompt = prompt_template.format(context=context_data)
|
|
76
|
+
else:
|
|
77
|
+
# If it's not a string, convert to string representation
|
|
78
|
+
context_str = json.dumps(context_data, indent=2)
|
|
79
|
+
prompt = prompt_template.format(context=context_str)
|
|
80
|
+
except KeyError as e:
|
|
81
|
+
return Error(msg.AIAssistant.INVALID_PROMPT_TEMPLATE.format(e=e))
|
|
82
|
+
except Exception as e:
|
|
83
|
+
return Error(msg.AIAssistant.FAILED_TO_BUILD_PROMPT.format(e=e))
|
|
84
|
+
|
|
85
|
+
# Ask for confirmation if needed
|
|
86
|
+
if ask_confirmation:
|
|
87
|
+
ctx.textual.text("") # spacing
|
|
88
|
+
should_launch = ctx.textual.ask_confirm(
|
|
89
|
+
msg.AIAssistant.CONFIRM_LAUNCH_ASSISTANT,
|
|
90
|
+
default=True
|
|
91
|
+
)
|
|
92
|
+
if not should_launch:
|
|
93
|
+
if fail_on_decline:
|
|
94
|
+
return Error(msg.AIAssistant.DECLINED_ASSISTANCE_STOPPED)
|
|
95
|
+
return Skip(msg.AIAssistant.DECLINED_ASSISTANCE_SKIPPED)
|
|
96
|
+
|
|
97
|
+
# Determine which CLI to use
|
|
98
|
+
cli_to_launch = None
|
|
99
|
+
|
|
100
|
+
preferred_clis = []
|
|
101
|
+
if cli_preference == "auto":
|
|
102
|
+
preferred_clis = list(CLI_REGISTRY.keys())
|
|
103
|
+
else:
|
|
104
|
+
preferred_clis = [cli_preference]
|
|
105
|
+
|
|
106
|
+
available_launchers = {}
|
|
107
|
+
for cli_name in preferred_clis:
|
|
108
|
+
config = CLI_REGISTRY.get(cli_name)
|
|
109
|
+
if config:
|
|
110
|
+
launcher = CLILauncher(
|
|
111
|
+
cli_name=cli_name,
|
|
112
|
+
install_instructions=config.get("install_instructions"),
|
|
113
|
+
prompt_flag=config.get("prompt_flag")
|
|
114
|
+
)
|
|
115
|
+
if launcher.is_available():
|
|
116
|
+
available_launchers[cli_name] = launcher
|
|
117
|
+
|
|
118
|
+
if not available_launchers:
|
|
119
|
+
from titan_cli.ui.tui.widgets import Panel
|
|
120
|
+
ctx.textual.mount(Panel(msg.AIAssistant.NO_ASSISTANT_CLI_FOUND, panel_type="warning"))
|
|
121
|
+
return Skip(msg.AIAssistant.NO_ASSISTANT_CLI_FOUND)
|
|
122
|
+
|
|
123
|
+
if len(available_launchers) == 1:
|
|
124
|
+
cli_to_launch = list(available_launchers.keys())[0]
|
|
125
|
+
else:
|
|
126
|
+
# Show available CLIs with numbers
|
|
127
|
+
ctx.textual.text("") # spacing
|
|
128
|
+
ctx.textual.text(msg.AIAssistant.SELECT_ASSISTANT_CLI, markup="bold cyan")
|
|
129
|
+
|
|
130
|
+
cli_options = list(available_launchers.keys())
|
|
131
|
+
for idx, cli_name in enumerate(cli_options, 1):
|
|
132
|
+
display_name = CLI_REGISTRY[cli_name].get("display_name", cli_name)
|
|
133
|
+
ctx.textual.text(f" {idx}. {display_name}")
|
|
134
|
+
|
|
135
|
+
ctx.textual.text("") # spacing
|
|
136
|
+
choice_str = ctx.textual.ask_text("Select option (or press Enter to cancel):", default="")
|
|
137
|
+
|
|
138
|
+
if not choice_str or choice_str.strip() == "":
|
|
139
|
+
return Skip(msg.AIAssistant.DECLINED_ASSISTANCE_SKIPPED)
|
|
140
|
+
|
|
141
|
+
try:
|
|
142
|
+
choice_idx = int(choice_str.strip()) - 1
|
|
143
|
+
if 0 <= choice_idx < len(cli_options):
|
|
144
|
+
cli_to_launch = cli_options[choice_idx]
|
|
145
|
+
else:
|
|
146
|
+
ctx.textual.text("Invalid option selected", markup="red")
|
|
147
|
+
return Skip(msg.AIAssistant.DECLINED_ASSISTANCE_SKIPPED)
|
|
148
|
+
except ValueError:
|
|
149
|
+
ctx.textual.text("Invalid input - must be a number", markup="red")
|
|
150
|
+
return Skip(msg.AIAssistant.DECLINED_ASSISTANCE_SKIPPED)
|
|
151
|
+
|
|
152
|
+
# Validate selection
|
|
153
|
+
if cli_to_launch not in available_launchers:
|
|
154
|
+
return Error(f"Unknown CLI to launch: {cli_to_launch}")
|
|
155
|
+
|
|
156
|
+
cli_name = CLI_REGISTRY[cli_to_launch].get("display_name", cli_to_launch)
|
|
157
|
+
|
|
158
|
+
# Launch the CLI
|
|
159
|
+
from titan_cli.ui.tui.widgets import Panel
|
|
160
|
+
ctx.textual.text("") # spacing
|
|
161
|
+
ctx.textual.text(msg.AIAssistant.LAUNCHING_ASSISTANT.format(cli_name=cli_name), markup="cyan")
|
|
162
|
+
|
|
163
|
+
# Show prompt preview
|
|
164
|
+
prompt_preview_text = msg.AIAssistant.PROMPT_PREVIEW.format(
|
|
165
|
+
prompt_preview=f"{prompt[:100]}..." if len(prompt) > 100 else prompt
|
|
166
|
+
)
|
|
167
|
+
ctx.textual.text(prompt_preview_text, markup="dim")
|
|
168
|
+
ctx.textual.text("") # spacing
|
|
169
|
+
|
|
170
|
+
project_root = ctx.get("project_root", ".")
|
|
171
|
+
|
|
172
|
+
# Launch CLI and suspend TUI while it runs
|
|
173
|
+
exit_code = ctx.textual.launch_external_cli(
|
|
174
|
+
cli_name=cli_to_launch,
|
|
175
|
+
prompt=prompt,
|
|
176
|
+
cwd=project_root
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
ctx.textual.text("") # spacing
|
|
180
|
+
ctx.textual.mount(Panel(msg.AIAssistant.BACK_IN_TITAN, panel_type="success"))
|
|
181
|
+
|
|
182
|
+
if exit_code != 0:
|
|
183
|
+
return Error(msg.AIAssistant.ASSISTANT_EXITED_WITH_CODE.format(cli_name=cli_name, exit_code=exit_code))
|
|
184
|
+
|
|
185
|
+
return Success(msg.AIAssistant.ASSISTANT_EXITED_WITH_CODE.format(cli_name=cli_name, exit_code=exit_code), metadata={"ai_exit_code": exit_code})
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from subprocess import Popen, PIPE
|
|
3
|
+
import re
|
|
4
|
+
import shlex
|
|
5
|
+
from titan_cli.core.workflows.models import WorkflowStepModel
|
|
6
|
+
from titan_cli.engine.context import WorkflowContext
|
|
7
|
+
from titan_cli.engine.results import Success, Error, WorkflowResult
|
|
8
|
+
from titan_cli.engine.utils import get_poetry_venv_env
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def resolve_parameters_in_string(text: str, ctx: WorkflowContext) -> str:
|
|
12
|
+
"""
|
|
13
|
+
Substitutes ${placeholder} in a string using values from ctx.data.
|
|
14
|
+
Public function so it can be used by workflow_executor.
|
|
15
|
+
"""
|
|
16
|
+
def replace_placeholder(match):
|
|
17
|
+
placeholder = match.group(1)
|
|
18
|
+
if placeholder in ctx.data:
|
|
19
|
+
return str(ctx.data[placeholder])
|
|
20
|
+
return match.group(0)
|
|
21
|
+
|
|
22
|
+
return re.sub(r'\$\{(\w+)\}', replace_placeholder, text)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def execute_command_step(step: WorkflowStepModel, ctx: WorkflowContext) -> WorkflowResult:
|
|
26
|
+
"""
|
|
27
|
+
Executes a shell command defined in a workflow step.
|
|
28
|
+
"""
|
|
29
|
+
command_template = step.command
|
|
30
|
+
if not command_template:
|
|
31
|
+
return Error("Command step is missing the 'command' attribute.")
|
|
32
|
+
|
|
33
|
+
command = resolve_parameters_in_string(command_template, ctx)
|
|
34
|
+
|
|
35
|
+
if ctx.ui:
|
|
36
|
+
ctx.ui.text.info(f"Executing command: {command}")
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
use_venv = step.params.get("use_venv", False)
|
|
40
|
+
process_env = os.environ.copy()
|
|
41
|
+
cwd = ctx.get("cwd") or os.getcwd()
|
|
42
|
+
|
|
43
|
+
if use_venv:
|
|
44
|
+
if ctx.ui:
|
|
45
|
+
ctx.ui.text.body("Activating poetry virtual environment for step...", style="dim")
|
|
46
|
+
|
|
47
|
+
venv_env = get_poetry_venv_env(cwd=cwd)
|
|
48
|
+
if venv_env:
|
|
49
|
+
process_env = venv_env
|
|
50
|
+
else:
|
|
51
|
+
return Error("Could not determine poetry virtual environment.")
|
|
52
|
+
|
|
53
|
+
# Determine command execution arguments based on security model
|
|
54
|
+
if step.use_shell:
|
|
55
|
+
# Insecure method for commands that need shell features (e.g., pipes)
|
|
56
|
+
popen_args = {"args": command, "shell": True}
|
|
57
|
+
else:
|
|
58
|
+
# Secure method: split command into a list to avoid injection
|
|
59
|
+
popen_args = {"args": shlex.split(command), "shell": False}
|
|
60
|
+
|
|
61
|
+
process = Popen(
|
|
62
|
+
**popen_args,
|
|
63
|
+
stdout=PIPE,
|
|
64
|
+
stderr=PIPE,
|
|
65
|
+
text=True,
|
|
66
|
+
cwd=cwd,
|
|
67
|
+
env=process_env
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
stdout_output, stderr_output = process.communicate()
|
|
71
|
+
|
|
72
|
+
if stdout_output:
|
|
73
|
+
# Print any output from the command
|
|
74
|
+
ctx.ui.text.body(stdout_output)
|
|
75
|
+
|
|
76
|
+
if process.returncode != 0:
|
|
77
|
+
error_message = f"Command failed with exit code {process.returncode}"
|
|
78
|
+
if stderr_output:
|
|
79
|
+
error_message += f"\n{stderr_output}"
|
|
80
|
+
|
|
81
|
+
return Error(error_message)
|
|
82
|
+
|
|
83
|
+
return Success(
|
|
84
|
+
message=f"Command '{command}' executed successfully.",
|
|
85
|
+
metadata={"command_output": stdout_output}
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
except FileNotFoundError:
|
|
89
|
+
command_to_report = command.split()[0] if not step.use_shell else command
|
|
90
|
+
return Error(f"Command not found: {command_to_report}")
|
|
91
|
+
except Exception as e:
|
|
92
|
+
return Error(f"An unexpected error occurred: {e}", exception=e)
|
|
93
|
+
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from subprocess import Popen, PIPE
|
|
4
|
+
from typing import Optional, Dict
|
|
5
|
+
|
|
6
|
+
def get_poetry_venv_env(cwd: Optional[str] = None) -> Optional[Dict[str, str]]:
|
|
7
|
+
"""
|
|
8
|
+
Detects the poetry virtual environment and returns a modified environment
|
|
9
|
+
dictionary with the venv's bin path prepended to PATH.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
cwd: The working directory to run poetry commands from.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
A dictionary for the 'env' parameter of subprocess calls, or None if
|
|
16
|
+
the venv could not be determined.
|
|
17
|
+
"""
|
|
18
|
+
process_env = os.environ.copy()
|
|
19
|
+
try:
|
|
20
|
+
env_proc = Popen(["poetry", "env", "info", "-p"], stdout=PIPE, stderr=PIPE, text=True, cwd=cwd)
|
|
21
|
+
venv_path, _ = env_proc.communicate()
|
|
22
|
+
|
|
23
|
+
if env_proc.returncode == 0 and venv_path.strip():
|
|
24
|
+
bin_path = Path(venv_path.strip()) / "bin"
|
|
25
|
+
process_env["PATH"] = f"{bin_path}:{process_env['PATH']}"
|
|
26
|
+
return process_env
|
|
27
|
+
except FileNotFoundError:
|
|
28
|
+
# poetry command not found
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
return None
|