shotgun-sh 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (94) hide show
  1. shotgun/__init__.py +3 -0
  2. shotgun/agents/__init__.py +1 -0
  3. shotgun/agents/agent_manager.py +196 -0
  4. shotgun/agents/common.py +295 -0
  5. shotgun/agents/config/__init__.py +13 -0
  6. shotgun/agents/config/manager.py +215 -0
  7. shotgun/agents/config/models.py +120 -0
  8. shotgun/agents/config/provider.py +91 -0
  9. shotgun/agents/history/__init__.py +5 -0
  10. shotgun/agents/history/history_processors.py +213 -0
  11. shotgun/agents/models.py +94 -0
  12. shotgun/agents/plan.py +119 -0
  13. shotgun/agents/research.py +131 -0
  14. shotgun/agents/tasks.py +122 -0
  15. shotgun/agents/tools/__init__.py +26 -0
  16. shotgun/agents/tools/codebase/__init__.py +28 -0
  17. shotgun/agents/tools/codebase/codebase_shell.py +256 -0
  18. shotgun/agents/tools/codebase/directory_lister.py +141 -0
  19. shotgun/agents/tools/codebase/file_read.py +144 -0
  20. shotgun/agents/tools/codebase/models.py +252 -0
  21. shotgun/agents/tools/codebase/query_graph.py +67 -0
  22. shotgun/agents/tools/codebase/retrieve_code.py +81 -0
  23. shotgun/agents/tools/file_management.py +130 -0
  24. shotgun/agents/tools/user_interaction.py +36 -0
  25. shotgun/agents/tools/web_search.py +69 -0
  26. shotgun/cli/__init__.py +1 -0
  27. shotgun/cli/codebase/__init__.py +5 -0
  28. shotgun/cli/codebase/commands.py +202 -0
  29. shotgun/cli/codebase/models.py +21 -0
  30. shotgun/cli/config.py +261 -0
  31. shotgun/cli/models.py +10 -0
  32. shotgun/cli/plan.py +65 -0
  33. shotgun/cli/research.py +78 -0
  34. shotgun/cli/tasks.py +71 -0
  35. shotgun/cli/utils.py +25 -0
  36. shotgun/codebase/__init__.py +12 -0
  37. shotgun/codebase/core/__init__.py +46 -0
  38. shotgun/codebase/core/change_detector.py +358 -0
  39. shotgun/codebase/core/code_retrieval.py +243 -0
  40. shotgun/codebase/core/ingestor.py +1497 -0
  41. shotgun/codebase/core/language_config.py +297 -0
  42. shotgun/codebase/core/manager.py +1554 -0
  43. shotgun/codebase/core/nl_query.py +327 -0
  44. shotgun/codebase/core/parser_loader.py +152 -0
  45. shotgun/codebase/models.py +107 -0
  46. shotgun/codebase/service.py +148 -0
  47. shotgun/logging_config.py +172 -0
  48. shotgun/main.py +73 -0
  49. shotgun/prompts/__init__.py +5 -0
  50. shotgun/prompts/agents/__init__.py +1 -0
  51. shotgun/prompts/agents/partials/codebase_understanding.j2 +79 -0
  52. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +10 -0
  53. shotgun/prompts/agents/partials/interactive_mode.j2 +8 -0
  54. shotgun/prompts/agents/plan.j2 +57 -0
  55. shotgun/prompts/agents/research.j2 +38 -0
  56. shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2 +13 -0
  57. shotgun/prompts/agents/state/system_state.j2 +1 -0
  58. shotgun/prompts/agents/tasks.j2 +67 -0
  59. shotgun/prompts/codebase/__init__.py +1 -0
  60. shotgun/prompts/codebase/cypher_query_patterns.j2 +221 -0
  61. shotgun/prompts/codebase/cypher_system.j2 +28 -0
  62. shotgun/prompts/codebase/enhanced_query_context.j2 +10 -0
  63. shotgun/prompts/codebase/partials/cypher_rules.j2 +24 -0
  64. shotgun/prompts/codebase/partials/graph_schema.j2 +28 -0
  65. shotgun/prompts/codebase/partials/temporal_context.j2 +21 -0
  66. shotgun/prompts/history/__init__.py +1 -0
  67. shotgun/prompts/history/summarization.j2 +46 -0
  68. shotgun/prompts/loader.py +140 -0
  69. shotgun/prompts/user/research.j2 +5 -0
  70. shotgun/py.typed +0 -0
  71. shotgun/sdk/__init__.py +13 -0
  72. shotgun/sdk/codebase.py +195 -0
  73. shotgun/sdk/exceptions.py +17 -0
  74. shotgun/sdk/models.py +189 -0
  75. shotgun/sdk/services.py +23 -0
  76. shotgun/telemetry.py +68 -0
  77. shotgun/tui/__init__.py +0 -0
  78. shotgun/tui/app.py +49 -0
  79. shotgun/tui/components/prompt_input.py +69 -0
  80. shotgun/tui/components/spinner.py +86 -0
  81. shotgun/tui/components/splash.py +25 -0
  82. shotgun/tui/components/vertical_tail.py +28 -0
  83. shotgun/tui/screens/chat.py +415 -0
  84. shotgun/tui/screens/chat.tcss +28 -0
  85. shotgun/tui/screens/provider_config.py +221 -0
  86. shotgun/tui/screens/splash.py +31 -0
  87. shotgun/tui/styles.tcss +10 -0
  88. shotgun/utils/__init__.py +5 -0
  89. shotgun/utils/file_system_utils.py +31 -0
  90. shotgun_sh-0.1.0.dev1.dist-info/METADATA +318 -0
  91. shotgun_sh-0.1.0.dev1.dist-info/RECORD +94 -0
  92. shotgun_sh-0.1.0.dev1.dist-info/WHEEL +4 -0
  93. shotgun_sh-0.1.0.dev1.dist-info/entry_points.txt +3 -0
  94. shotgun_sh-0.1.0.dev1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,94 @@
1
+ """Pydantic models for agent dependencies and configuration."""
2
+
3
+ from asyncio import Future, Queue
4
+ from pathlib import Path
5
+ from typing import TYPE_CHECKING
6
+
7
+ from pydantic import BaseModel, ConfigDict, Field
8
+
9
+ from .config.models import ModelConfig
10
+
11
+ if TYPE_CHECKING:
12
+ from shotgun.codebase.service import CodebaseService
13
+
14
+
15
+ class UserAnswer(BaseModel):
16
+ """A answer from the user."""
17
+
18
+ answer: str = Field(
19
+ description="The answer from the user",
20
+ )
21
+ tool_call_id: str = Field(
22
+ description="Tool call id",
23
+ )
24
+
25
+
26
+ class UserQuestion(BaseModel):
27
+ """A question asked by the user."""
28
+
29
+ model_config = ConfigDict(arbitrary_types_allowed=True)
30
+
31
+ question: str = Field(
32
+ description="The question asked by the user",
33
+ )
34
+ tool_call_id: str = Field(
35
+ description="Tool call id",
36
+ )
37
+ result: Future[UserAnswer] = Field(
38
+ description="Future that will contain the user's answer"
39
+ )
40
+
41
+
42
+ class AgentRuntimeOptions(BaseModel):
43
+ """User interface options for agents."""
44
+
45
+ model_config = ConfigDict(arbitrary_types_allowed=True)
46
+
47
+ interactive_mode: bool = Field(
48
+ default=True,
49
+ description="Whether agents can interact with users (ask questions, etc.)",
50
+ )
51
+
52
+ working_directory: Path = Field(
53
+ default_factory=lambda: Path.cwd(),
54
+ description="Working directory for agent operations",
55
+ )
56
+
57
+ max_iterations: int = Field(
58
+ default=10,
59
+ ge=1,
60
+ le=100,
61
+ description="Maximum number of iterations for agent loops",
62
+ )
63
+
64
+ queue: Queue[UserQuestion] = Field(
65
+ default_factory=Queue,
66
+ description="Queue for storing user responses",
67
+ )
68
+
69
+ tasks: list[Future[UserAnswer]] = Field(
70
+ default_factory=list,
71
+ description="Tasks for storing deferred tool results",
72
+ )
73
+
74
+
75
+ class AgentDeps(AgentRuntimeOptions):
76
+ """Dependencies passed to all agents for configuration and runtime behavior."""
77
+
78
+ llm_model: ModelConfig = Field(
79
+ description="Model configuration with token limits and provider info",
80
+ )
81
+
82
+ codebase_service: "CodebaseService" = Field(
83
+ description="Codebase service for code analysis tools",
84
+ )
85
+
86
+
87
+ # Rebuild model to resolve forward references after imports are available
88
+ try:
89
+ from shotgun.codebase.service import CodebaseService
90
+
91
+ AgentDeps.model_rebuild()
92
+ except ImportError:
93
+ # CodebaseService may not be available in all contexts
94
+ pass
shotgun/agents/plan.py ADDED
@@ -0,0 +1,119 @@
1
+ """Plan agent factory and functions using Pydantic AI with file-based memory."""
2
+
3
+ from pydantic_ai import (
4
+ Agent,
5
+ DeferredToolRequests,
6
+ RunContext,
7
+ )
8
+ from pydantic_ai.agent import AgentRunResult
9
+ from pydantic_ai.messages import ModelMessage
10
+
11
+ from shotgun.agents.config import ProviderType
12
+ from shotgun.logging_config import get_logger
13
+ from shotgun.prompts import PromptLoader
14
+
15
+ from .common import (
16
+ add_system_status_message,
17
+ create_base_agent,
18
+ create_usage_limits,
19
+ ensure_file_exists,
20
+ get_file_history,
21
+ run_agent,
22
+ )
23
+ from .models import AgentDeps, AgentRuntimeOptions
24
+
25
+ logger = get_logger(__name__)
26
+
27
+ # Global prompt loader instance
28
+ prompt_loader = PromptLoader()
29
+
30
+
31
+ def _build_plan_agent_system_prompt(ctx: RunContext[AgentDeps]) -> str:
32
+ """Build the system prompt for the plan agent.
33
+
34
+ Args:
35
+ ctx: RunContext containing AgentDeps with interactive_mode and other settings
36
+
37
+ Returns:
38
+ The complete system prompt string for the plan agent
39
+ """
40
+ return prompt_loader.render(
41
+ "agents/plan.j2", interactive_mode=ctx.deps.interactive_mode, context="plans"
42
+ )
43
+
44
+
45
+ def create_plan_agent(
46
+ agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
47
+ ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
48
+ """Create a plan agent with file management capabilities.
49
+
50
+ Args:
51
+ agent_runtime_options: Agent runtime options for the agent
52
+ provider: Optional provider override. If None, uses configured default
53
+
54
+ Returns:
55
+ Tuple of (Configured Pydantic AI agent for planning tasks, Agent dependencies)
56
+ """
57
+ logger.debug("Initializing plan agent")
58
+ agent, deps = create_base_agent(
59
+ _build_plan_agent_system_prompt, agent_runtime_options, provider=provider
60
+ )
61
+ return agent, deps
62
+
63
+
64
+ async def run_plan_agent(
65
+ agent: Agent[AgentDeps, str | DeferredToolRequests],
66
+ goal: str,
67
+ deps: AgentDeps,
68
+ message_history: list[ModelMessage] | None = None,
69
+ ) -> AgentRunResult[str | DeferredToolRequests]:
70
+ """Create or update a plan based on the given goal.
71
+
72
+ Args:
73
+ agent: The configured plan agent
74
+ goal: The planning goal or instruction
75
+ deps: Agent dependencies
76
+ message_history: Optional message history for conversation continuity
77
+
78
+ Returns:
79
+ AgentRunResult containing the planning process output
80
+ """
81
+ logger.debug("📋 Starting planning for goal: %s", goal)
82
+
83
+ # Ensure plan.md exists
84
+ ensure_file_exists("plan.md", "# Plan")
85
+
86
+ # Let the agent use its tools to read existing plan and research
87
+ full_prompt = f"Create a comprehensive plan for: {goal}"
88
+ try:
89
+ # Create usage limits for responsible API usage
90
+ usage_limits = create_usage_limits()
91
+
92
+ message_history = await add_system_status_message(deps, message_history)
93
+
94
+ result = await run_agent(
95
+ agent=agent,
96
+ prompt=full_prompt,
97
+ deps=deps,
98
+ message_history=message_history,
99
+ usage_limits=usage_limits,
100
+ )
101
+
102
+ logger.debug("✅ Planning completed successfully")
103
+ return result
104
+
105
+ except Exception as e:
106
+ import traceback
107
+
108
+ logger.error("Full traceback:\n%s", traceback.format_exc())
109
+ logger.error("❌ Planning failed: %s", str(e))
110
+ raise
111
+
112
+
113
+ def get_plan_history() -> str:
114
+ """Get the full plan history from the file.
115
+
116
+ Returns:
117
+ Plan history content or fallback message
118
+ """
119
+ return get_file_history("plan.md")
@@ -0,0 +1,131 @@
1
+ """Research agent factory and functions using Pydantic AI with file-based memory."""
2
+
3
+ from pydantic_ai import (
4
+ Agent,
5
+ DeferredToolRequests,
6
+ RunContext,
7
+ )
8
+ from pydantic_ai.agent import AgentRunResult
9
+ from pydantic_ai.messages import (
10
+ ModelMessage,
11
+ )
12
+
13
+ from shotgun.agents.config import ProviderType
14
+ from shotgun.logging_config import get_logger
15
+ from shotgun.prompts import PromptLoader
16
+
17
+ from .common import (
18
+ add_system_status_message,
19
+ create_base_agent,
20
+ create_usage_limits,
21
+ ensure_file_exists,
22
+ get_file_history,
23
+ run_agent,
24
+ )
25
+ from .models import AgentDeps, AgentRuntimeOptions
26
+ from .tools import web_search_tool
27
+
28
+ logger = get_logger(__name__)
29
+
30
+ # Global prompt loader instance
31
+ prompt_loader = PromptLoader()
32
+
33
+
34
+ def _build_research_agent_system_prompt(ctx: RunContext[AgentDeps]) -> str:
35
+ """Build the system prompt for the research agent.
36
+
37
+ Args:
38
+ ctx: RunContext containing AgentDeps with interactive_mode and other settings
39
+
40
+ Returns:
41
+ The complete system prompt string for the research agent
42
+ """
43
+ return prompt_loader.render(
44
+ "agents/research.j2",
45
+ interactive_mode=ctx.deps.interactive_mode,
46
+ context="research output",
47
+ )
48
+
49
+
50
+ def create_research_agent(
51
+ agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
52
+ ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
53
+ """Create a research agent with web search capabilities.
54
+
55
+ Args:
56
+ agent_runtime_options: Agent runtime options for the agent
57
+ provider: Optional provider override. If None, uses configured default
58
+
59
+ Returns:
60
+ Tuple of (Configured Pydantic AI agent for research tasks, Agent dependencies)
61
+ """
62
+ logger.debug("Initializing research agent")
63
+ agent, deps = create_base_agent(
64
+ _build_research_agent_system_prompt,
65
+ agent_runtime_options,
66
+ load_codebase_understanding_tools=True,
67
+ additional_tools=[web_search_tool],
68
+ provider=provider,
69
+ )
70
+ return agent, deps
71
+
72
+
73
+ async def run_research_agent(
74
+ agent: Agent[AgentDeps, str | DeferredToolRequests],
75
+ query: str,
76
+ deps: AgentDeps,
77
+ message_history: list[ModelMessage] | None = None,
78
+ ) -> AgentRunResult[str | DeferredToolRequests]:
79
+ """Perform research on the given query and update the research file.
80
+
81
+ Args:
82
+ agent: The configured research agent
83
+ query: The research query to investigate
84
+ deps: Agent dependencies
85
+
86
+ Returns:
87
+ Summary of research findings
88
+ """
89
+ logger.debug("🔬 Starting research for query: %s", query)
90
+
91
+ # Ensure research.md exists
92
+ ensure_file_exists("research.md", "# Research")
93
+
94
+ message_history = await add_system_status_message(deps, message_history)
95
+
96
+ user_prompt = prompt_loader.render(
97
+ "user/research.j2",
98
+ user_query=query,
99
+ context="research output",
100
+ )
101
+
102
+ try:
103
+ # Create usage limits for responsible API usage
104
+ usage_limits = create_usage_limits()
105
+
106
+ result = await run_agent(
107
+ agent=agent,
108
+ prompt=user_prompt,
109
+ deps=deps,
110
+ message_history=message_history,
111
+ usage_limits=usage_limits,
112
+ )
113
+
114
+ logger.debug("✅ Research completed successfully")
115
+ return result
116
+
117
+ except Exception as e:
118
+ import traceback
119
+
120
+ logger.error("Full traceback:\n%s", traceback.format_exc())
121
+ logger.error("❌ Research failed: %s", str(e))
122
+ raise
123
+
124
+
125
+ def get_research_history() -> str:
126
+ """Get the full research history from the file.
127
+
128
+ Returns:
129
+ Research history content or fallback message
130
+ """
131
+ return get_file_history("research.md")
@@ -0,0 +1,122 @@
1
+ """Tasks agent factory and functions using Pydantic AI with file-based memory."""
2
+
3
+ from pydantic_ai import (
4
+ Agent,
5
+ DeferredToolRequests,
6
+ RunContext,
7
+ )
8
+ from pydantic_ai.agent import AgentRunResult
9
+ from pydantic_ai.messages import ModelMessage
10
+
11
+ from shotgun.agents.config import ProviderType
12
+ from shotgun.logging_config import get_logger
13
+ from shotgun.prompts import PromptLoader
14
+
15
+ from .common import (
16
+ add_system_status_message,
17
+ create_base_agent,
18
+ create_usage_limits,
19
+ ensure_file_exists,
20
+ get_file_history,
21
+ run_agent,
22
+ )
23
+ from .models import AgentDeps, AgentRuntimeOptions
24
+
25
+ logger = get_logger(__name__)
26
+
27
+ # Global prompt loader instance
28
+ prompt_loader = PromptLoader()
29
+
30
+
31
+ def _build_tasks_agent_system_prompt(ctx: RunContext[AgentDeps]) -> str:
32
+ """Build the system prompt for the tasks agent.
33
+
34
+ Args:
35
+ ctx: RunContext containing AgentDeps with interactive_mode and other settings
36
+
37
+ Returns:
38
+ The complete system prompt string for the tasks agent
39
+ """
40
+ return prompt_loader.render(
41
+ "agents/tasks.j2",
42
+ interactive_mode=ctx.deps.interactive_mode,
43
+ context="task lists",
44
+ )
45
+
46
+
47
+ def create_tasks_agent(
48
+ agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
49
+ ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
50
+ """Create a tasks agent with file management capabilities.
51
+
52
+ Args:
53
+ agent_runtime_options: Agent runtime options for the agent
54
+ provider: Optional provider override. If None, uses configured default
55
+
56
+ Returns:
57
+ Tuple of (Configured Pydantic AI agent for task management, Agent dependencies)
58
+ """
59
+ logger.debug("Initializing tasks agent")
60
+ agent, deps = create_base_agent(
61
+ _build_tasks_agent_system_prompt, agent_runtime_options, provider=provider
62
+ )
63
+ return agent, deps
64
+
65
+
66
+ async def run_tasks_agent(
67
+ agent: Agent[AgentDeps, str | DeferredToolRequests],
68
+ instruction: str,
69
+ deps: AgentDeps,
70
+ message_history: list[ModelMessage] | None = None,
71
+ ) -> AgentRunResult[str | DeferredToolRequests]:
72
+ """Create or update tasks based on the given instruction.
73
+
74
+ Args:
75
+ agent: The configured tasks agent
76
+ instruction: The task creation/update instruction
77
+ deps: Agent dependencies
78
+ message_history: Optional message history for conversation continuity
79
+
80
+ Returns:
81
+ AgentRunResult containing the task creation process output
82
+ """
83
+ logger.debug("📋 Starting task creation for instruction: %s", instruction)
84
+
85
+ # Ensure tasks.md exists
86
+ ensure_file_exists("tasks.md", "# Tasks")
87
+
88
+ message_history = await add_system_status_message(deps, message_history)
89
+
90
+ # Let the agent use its tools to read existing tasks, plan, and research
91
+ full_prompt = f"Create or update tasks based on: {instruction}"
92
+
93
+ try:
94
+ # Create usage limits for responsible API usage
95
+ usage_limits = create_usage_limits()
96
+
97
+ result = await run_agent(
98
+ agent=agent,
99
+ prompt=full_prompt,
100
+ deps=deps,
101
+ message_history=message_history,
102
+ usage_limits=usage_limits,
103
+ )
104
+
105
+ logger.debug("✅ Task creation completed successfully")
106
+ return result
107
+
108
+ except Exception as e:
109
+ import traceback
110
+
111
+ logger.error("Full traceback:\n%s", traceback.format_exc())
112
+ logger.error("❌ Task creation failed: %s", str(e))
113
+ raise
114
+
115
+
116
+ def get_tasks_history() -> str:
117
+ """Get the full tasks history from the file.
118
+
119
+ Returns:
120
+ Tasks history content or fallback message
121
+ """
122
+ return get_file_history("tasks.md")
@@ -0,0 +1,26 @@
1
+ """Tools package for Pydantic AI agents."""
2
+
3
+ from .codebase import (
4
+ codebase_shell,
5
+ directory_lister,
6
+ file_read,
7
+ query_graph,
8
+ retrieve_code,
9
+ )
10
+ from .file_management import append_file, read_file, write_file
11
+ from .user_interaction import ask_user
12
+ from .web_search import web_search_tool
13
+
14
+ __all__ = [
15
+ "web_search_tool",
16
+ "ask_user",
17
+ "read_file",
18
+ "write_file",
19
+ "append_file",
20
+ # Codebase understanding tools
21
+ "query_graph",
22
+ "retrieve_code",
23
+ "file_read",
24
+ "directory_lister",
25
+ "codebase_shell",
26
+ ]
@@ -0,0 +1,28 @@
1
+ """Codebase understanding tools for Pydantic AI agents."""
2
+
3
+ from .codebase_shell import codebase_shell
4
+ from .directory_lister import directory_lister
5
+ from .file_read import file_read
6
+ from .models import (
7
+ CodeSnippetResult,
8
+ DirectoryListResult,
9
+ FileReadResult,
10
+ QueryGraphResult,
11
+ ShellCommandResult,
12
+ )
13
+ from .query_graph import query_graph
14
+ from .retrieve_code import retrieve_code
15
+
16
+ __all__ = [
17
+ "query_graph",
18
+ "retrieve_code",
19
+ "file_read",
20
+ "directory_lister",
21
+ "codebase_shell",
22
+ # Result models
23
+ "QueryGraphResult",
24
+ "CodeSnippetResult",
25
+ "FileReadResult",
26
+ "DirectoryListResult",
27
+ "ShellCommandResult",
28
+ ]