shotgun-sh 0.1.0.dev3__tar.gz → 0.1.0.dev4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (101) hide show
  1. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/PKG-INFO +5 -5
  2. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/pyproject.toml +5 -5
  3. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/common.py +2 -1
  4. shotgun_sh-0.1.0.dev4/src/shotgun/agents/config/models.py +125 -0
  5. shotgun_sh-0.1.0.dev4/src/shotgun/agents/config/provider.py +171 -0
  6. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/research.py +13 -2
  7. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/__init__.py +10 -2
  8. shotgun_sh-0.1.0.dev4/src/shotgun/agents/tools/web_search/__init__.py +60 -0
  9. shotgun_sh-0.1.0.dev4/src/shotgun/agents/tools/web_search/anthropic.py +86 -0
  10. shotgun_sh-0.1.0.dev4/src/shotgun/agents/tools/web_search/gemini.py +85 -0
  11. shotgun_sh-0.1.0.dev3/src/shotgun/agents/tools/web_search.py → shotgun_sh-0.1.0.dev4/src/shotgun/agents/tools/web_search/openai.py +23 -8
  12. shotgun_sh-0.1.0.dev4/src/shotgun/agents/tools/web_search/utils.py +20 -0
  13. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/research.py +3 -7
  14. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/nl_query.py +2 -1
  15. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/parser_loader.py +4 -28
  16. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/main.py +10 -5
  17. shotgun_sh-0.1.0.dev4/src/shotgun/telemetry.py +44 -0
  18. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/screens/chat.py +22 -4
  19. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/screens/chat.tcss +4 -0
  20. shotgun_sh-0.1.0.dev3/src/shotgun/agents/config/models.py +0 -120
  21. shotgun_sh-0.1.0.dev3/src/shotgun/agents/config/provider.py +0 -91
  22. shotgun_sh-0.1.0.dev3/src/shotgun/telemetry.py +0 -68
  23. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/.gitignore +0 -0
  24. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/LICENSE +0 -0
  25. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/README.md +0 -0
  26. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/__init__.py +0 -0
  27. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/__init__.py +0 -0
  28. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/agent_manager.py +0 -0
  29. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/config/__init__.py +0 -0
  30. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/config/manager.py +0 -0
  31. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/history/__init__.py +0 -0
  32. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/history/history_processors.py +0 -0
  33. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/models.py +0 -0
  34. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/plan.py +0 -0
  35. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tasks.py +0 -0
  36. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/__init__.py +0 -0
  37. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/codebase_shell.py +0 -0
  38. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/directory_lister.py +0 -0
  39. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/file_read.py +0 -0
  40. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/models.py +0 -0
  41. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/query_graph.py +0 -0
  42. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/codebase/retrieve_code.py +0 -0
  43. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/file_management.py +0 -0
  44. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/agents/tools/user_interaction.py +0 -0
  45. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/__init__.py +0 -0
  46. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/codebase/__init__.py +0 -0
  47. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/codebase/commands.py +0 -0
  48. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/codebase/models.py +0 -0
  49. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/config.py +0 -0
  50. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/models.py +0 -0
  51. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/plan.py +0 -0
  52. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/tasks.py +0 -0
  53. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/cli/utils.py +0 -0
  54. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/__init__.py +0 -0
  55. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/__init__.py +0 -0
  56. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/change_detector.py +0 -0
  57. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/code_retrieval.py +0 -0
  58. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/ingestor.py +0 -0
  59. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/language_config.py +0 -0
  60. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/core/manager.py +0 -0
  61. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/models.py +0 -0
  62. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/codebase/service.py +0 -0
  63. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/logging_config.py +0 -0
  64. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/__init__.py +0 -0
  65. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/__init__.py +0 -0
  66. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/partials/codebase_understanding.j2 +0 -0
  67. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +0 -0
  68. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/partials/interactive_mode.j2 +0 -0
  69. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/plan.j2 +0 -0
  70. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/research.j2 +0 -0
  71. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2 +0 -0
  72. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/state/system_state.j2 +0 -0
  73. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/agents/tasks.j2 +0 -0
  74. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/__init__.py +0 -0
  75. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/cypher_query_patterns.j2 +0 -0
  76. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/cypher_system.j2 +0 -0
  77. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/enhanced_query_context.j2 +0 -0
  78. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/partials/cypher_rules.j2 +0 -0
  79. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/partials/graph_schema.j2 +0 -0
  80. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/codebase/partials/temporal_context.j2 +0 -0
  81. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/history/__init__.py +0 -0
  82. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/history/summarization.j2 +0 -0
  83. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/loader.py +0 -0
  84. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/prompts/user/research.j2 +0 -0
  85. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/py.typed +0 -0
  86. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/sdk/__init__.py +0 -0
  87. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/sdk/codebase.py +0 -0
  88. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/sdk/exceptions.py +0 -0
  89. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/sdk/models.py +0 -0
  90. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/sdk/services.py +0 -0
  91. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/__init__.py +0 -0
  92. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/app.py +0 -0
  93. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/components/prompt_input.py +0 -0
  94. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/components/spinner.py +0 -0
  95. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/components/splash.py +0 -0
  96. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/components/vertical_tail.py +0 -0
  97. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/screens/provider_config.py +0 -0
  98. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/screens/splash.py +0 -0
  99. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/tui/styles.tcss +0 -0
  100. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/utils/__init__.py +0 -0
  101. {shotgun_sh-0.1.0.dev3 → shotgun_sh-0.1.0.dev4}/src/shotgun/utils/file_system_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: shotgun-sh
3
- Version: 0.1.0.dev3
3
+ Version: 0.1.0.dev4
4
4
  Summary: AI-powered research, planning, and task management CLI tool
5
5
  Project-URL: Homepage, https://shotgun.sh/
6
6
  Project-URL: Repository, https://github.com/shotgun-sh/shotgun
@@ -22,13 +22,13 @@ Classifier: Programming Language :: Python :: 3.12
22
22
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
23
  Classifier: Topic :: Utilities
24
24
  Requires-Python: >=3.10
25
+ Requires-Dist: anthropic>=0.39.0
26
+ Requires-Dist: google-generativeai>=0.8.0
25
27
  Requires-Dist: httpx>=0.27.0
26
28
  Requires-Dist: jinja2>=3.1.0
27
29
  Requires-Dist: kuzu>=0.7.0
28
- Requires-Dist: openinference-instrumentation-pydantic-ai
29
- Requires-Dist: opentelemetry-api
30
- Requires-Dist: opentelemetry-exporter-otlp
31
- Requires-Dist: opentelemetry-sdk
30
+ Requires-Dist: logfire[pydantic-ai]>=2.0.0
31
+ Requires-Dist: openai>=1.0.0
32
32
  Requires-Dist: pydantic-ai>=0.0.14
33
33
  Requires-Dist: rich>=13.0.0
34
34
  Requires-Dist: textual-dev>=1.7.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "shotgun-sh"
3
- version = "0.1.0.dev3"
3
+ version = "0.1.0.dev4"
4
4
  description = "AI-powered research, planning, and task management CLI tool"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -28,10 +28,7 @@ dependencies = [
28
28
  "pydantic-ai>=0.0.14",
29
29
  "httpx>=0.27.0",
30
30
  "jinja2>=3.1.0",
31
- "openinference-instrumentation-pydantic-ai",
32
- "opentelemetry-sdk",
33
- "opentelemetry-exporter-otlp",
34
- "opentelemetry-api",
31
+ "logfire[pydantic-ai]>=2.0.0",
35
32
  "textual>=6.1.0",
36
33
  "textual-dev>=1.7.0",
37
34
  "kuzu>=0.7.0",
@@ -42,6 +39,9 @@ dependencies = [
42
39
  "tree-sitter-go>=0.23.0",
43
40
  "tree-sitter-rust>=0.23.0",
44
41
  "watchdog>=4.0.0",
42
+ "openai>=1.0.0",
43
+ "anthropic>=0.39.0",
44
+ "google-generativeai>=0.8.0",
45
45
  ]
46
46
 
47
47
  [project.urls]
@@ -170,7 +170,8 @@ def create_base_agent(
170
170
  provider_name.upper(),
171
171
  model_config.name,
172
172
  )
173
- model = model_config.pydantic_model_name
173
+ # Use the Model instance directly (has API key baked in)
174
+ model = model_config.model_instance
174
175
 
175
176
  # Create deps with model config and codebase service
176
177
  codebase_service = get_codebase_service()
@@ -0,0 +1,125 @@
1
+ """Pydantic models for configuration."""
2
+
3
+ from enum import Enum
4
+
5
+ from pydantic import BaseModel, Field, PrivateAttr, SecretStr
6
+ from pydantic_ai.models import Model
7
+
8
+
9
+ class ProviderType(str, Enum):
10
+ """Provider types for AI services."""
11
+
12
+ OPENAI = "openai"
13
+ ANTHROPIC = "anthropic"
14
+ GOOGLE = "google"
15
+
16
+
17
+ class ModelSpec(BaseModel):
18
+ """Static specification for a model - just metadata."""
19
+
20
+ name: str # Model identifier (e.g., "gpt-5", "claude-opus-4-1")
21
+ provider: ProviderType
22
+ max_input_tokens: int
23
+ max_output_tokens: int
24
+
25
+
26
+ class ModelConfig(BaseModel):
27
+ """A fully configured model with API key and settings."""
28
+
29
+ name: str # Model identifier (e.g., "gpt-5", "claude-opus-4-1")
30
+ provider: ProviderType
31
+ max_input_tokens: int
32
+ max_output_tokens: int
33
+ api_key: str
34
+ _model_instance: Model | None = PrivateAttr(default=None)
35
+
36
+ class Config:
37
+ arbitrary_types_allowed = True
38
+
39
+ @property
40
+ def model_instance(self) -> Model:
41
+ """Lazy load the Model instance."""
42
+ if self._model_instance is None:
43
+ from .provider import get_or_create_model
44
+
45
+ self._model_instance = get_or_create_model(
46
+ self.provider, self.name, self.api_key
47
+ )
48
+ return self._model_instance
49
+
50
+ @property
51
+ def pydantic_model_name(self) -> str:
52
+ """Compute the full Pydantic AI model identifier. For backward compatibility."""
53
+ provider_prefix = {
54
+ ProviderType.OPENAI: "openai",
55
+ ProviderType.ANTHROPIC: "anthropic",
56
+ ProviderType.GOOGLE: "google-gla",
57
+ }
58
+ return f"{provider_prefix[self.provider]}:{self.name}"
59
+
60
+
61
+ # Model specifications registry (static metadata)
62
+ MODEL_SPECS: dict[str, ModelSpec] = {
63
+ "gpt-5": ModelSpec(
64
+ name="gpt-5",
65
+ provider=ProviderType.OPENAI,
66
+ max_input_tokens=400_000,
67
+ max_output_tokens=128_000,
68
+ ),
69
+ "gpt-4o": ModelSpec(
70
+ name="gpt-4o",
71
+ provider=ProviderType.OPENAI,
72
+ max_input_tokens=128_000,
73
+ max_output_tokens=16_000,
74
+ ),
75
+ "claude-opus-4-1": ModelSpec(
76
+ name="claude-opus-4-1",
77
+ provider=ProviderType.ANTHROPIC,
78
+ max_input_tokens=200_000,
79
+ max_output_tokens=32_000,
80
+ ),
81
+ "claude-3-5-sonnet-latest": ModelSpec(
82
+ name="claude-3-5-sonnet-latest",
83
+ provider=ProviderType.ANTHROPIC,
84
+ max_input_tokens=200_000,
85
+ max_output_tokens=20_000,
86
+ ),
87
+ "gemini-2.5-pro": ModelSpec(
88
+ name="gemini-2.5-pro",
89
+ provider=ProviderType.GOOGLE,
90
+ max_input_tokens=1_000_000,
91
+ max_output_tokens=64_000,
92
+ ),
93
+ }
94
+
95
+
96
+ class OpenAIConfig(BaseModel):
97
+ """Configuration for OpenAI provider."""
98
+
99
+ api_key: SecretStr | None = None
100
+ model_name: str = "gpt-5"
101
+
102
+
103
+ class AnthropicConfig(BaseModel):
104
+ """Configuration for Anthropic provider."""
105
+
106
+ api_key: SecretStr | None = None
107
+ model_name: str = "claude-opus-4-1"
108
+
109
+
110
+ class GoogleConfig(BaseModel):
111
+ """Configuration for Google provider."""
112
+
113
+ api_key: SecretStr | None = None
114
+ model_name: str = "gemini-2.5-pro"
115
+
116
+
117
+ class ShotgunConfig(BaseModel):
118
+ """Main configuration for Shotgun CLI."""
119
+
120
+ openai: OpenAIConfig = Field(default_factory=OpenAIConfig)
121
+ anthropic: AnthropicConfig = Field(default_factory=AnthropicConfig)
122
+ google: GoogleConfig = Field(default_factory=GoogleConfig)
123
+ default_provider: ProviderType = Field(
124
+ default=ProviderType.OPENAI, description="Default AI provider to use"
125
+ )
@@ -0,0 +1,171 @@
1
+ """Provider management for LLM configuration."""
2
+
3
+ import os
4
+
5
+ from pydantic import SecretStr
6
+ from pydantic_ai.models import Model
7
+ from pydantic_ai.models.anthropic import AnthropicModel
8
+ from pydantic_ai.models.google import GoogleModel
9
+ from pydantic_ai.models.openai import OpenAIChatModel
10
+ from pydantic_ai.providers.anthropic import AnthropicProvider
11
+ from pydantic_ai.providers.google import GoogleProvider
12
+ from pydantic_ai.providers.openai import OpenAIProvider
13
+
14
+ from shotgun.logging_config import get_logger
15
+
16
+ from .manager import get_config_manager
17
+ from .models import MODEL_SPECS, ModelConfig, ProviderType
18
+
19
+ logger = get_logger(__name__)
20
+
21
+ # Global cache for Model instances (singleton pattern)
22
+ _model_cache: dict[tuple[ProviderType, str, str], Model] = {}
23
+
24
+
25
+ def get_or_create_model(provider: ProviderType, model_name: str, api_key: str) -> Model:
26
+ """Get or create a singleton Model instance.
27
+
28
+ Args:
29
+ provider: Provider type
30
+ model_name: Name of the model
31
+ api_key: API key for the provider
32
+
33
+ Returns:
34
+ Cached or newly created Model instance
35
+
36
+ Raises:
37
+ ValueError: If provider is not supported
38
+ """
39
+ cache_key = (provider, model_name, api_key)
40
+
41
+ if cache_key not in _model_cache:
42
+ logger.debug("Creating new %s model instance: %s", provider.value, model_name)
43
+
44
+ if provider == ProviderType.OPENAI:
45
+ openai_provider = OpenAIProvider(api_key=api_key)
46
+ _model_cache[cache_key] = OpenAIChatModel(
47
+ model_name, provider=openai_provider
48
+ )
49
+ elif provider == ProviderType.ANTHROPIC:
50
+ anthropic_provider = AnthropicProvider(api_key=api_key)
51
+ _model_cache[cache_key] = AnthropicModel(
52
+ model_name, provider=anthropic_provider
53
+ )
54
+ elif provider == ProviderType.GOOGLE:
55
+ google_provider = GoogleProvider(api_key=api_key)
56
+ _model_cache[cache_key] = GoogleModel(model_name, provider=google_provider)
57
+ else:
58
+ raise ValueError(f"Unsupported provider: {provider}")
59
+ else:
60
+ logger.debug("Reusing cached %s model instance: %s", provider.value, model_name)
61
+
62
+ return _model_cache[cache_key]
63
+
64
+
65
+ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
66
+ """Get a fully configured ModelConfig with API key and Model instance.
67
+
68
+ Args:
69
+ provider: Provider to get model for. If None, uses default provider
70
+
71
+ Returns:
72
+ ModelConfig with API key configured and lazy Model instance
73
+
74
+ Raises:
75
+ ValueError: If provider is not configured properly or model not found
76
+ """
77
+ config_manager = get_config_manager()
78
+ config = config_manager.load()
79
+ # Convert string to ProviderType enum if needed
80
+ provider_enum = (
81
+ provider
82
+ if isinstance(provider, ProviderType)
83
+ else ProviderType(provider)
84
+ if provider
85
+ else config.default_provider
86
+ )
87
+
88
+ if provider_enum == ProviderType.OPENAI:
89
+ api_key = _get_api_key(config.openai.api_key, "OPENAI_API_KEY")
90
+ if not api_key:
91
+ raise ValueError(
92
+ "OpenAI API key not configured. Set via environment variable OPENAI_API_KEY or config."
93
+ )
94
+
95
+ # Get model spec
96
+ model_name = config.openai.model_name
97
+ if model_name not in MODEL_SPECS:
98
+ raise ValueError(f"Model '{model_name}' not found")
99
+ spec = MODEL_SPECS[model_name]
100
+
101
+ # Create fully configured ModelConfig
102
+ return ModelConfig(
103
+ name=spec.name,
104
+ provider=spec.provider,
105
+ max_input_tokens=spec.max_input_tokens,
106
+ max_output_tokens=spec.max_output_tokens,
107
+ api_key=api_key,
108
+ )
109
+
110
+ elif provider_enum == ProviderType.ANTHROPIC:
111
+ api_key = _get_api_key(config.anthropic.api_key, "ANTHROPIC_API_KEY")
112
+ if not api_key:
113
+ raise ValueError(
114
+ "Anthropic API key not configured. Set via environment variable ANTHROPIC_API_KEY or config."
115
+ )
116
+
117
+ # Get model spec
118
+ model_name = config.anthropic.model_name
119
+ if model_name not in MODEL_SPECS:
120
+ raise ValueError(f"Model '{model_name}' not found")
121
+ spec = MODEL_SPECS[model_name]
122
+
123
+ # Create fully configured ModelConfig
124
+ return ModelConfig(
125
+ name=spec.name,
126
+ provider=spec.provider,
127
+ max_input_tokens=spec.max_input_tokens,
128
+ max_output_tokens=spec.max_output_tokens,
129
+ api_key=api_key,
130
+ )
131
+
132
+ elif provider_enum == ProviderType.GOOGLE:
133
+ api_key = _get_api_key(config.google.api_key, "GEMINI_API_KEY")
134
+ if not api_key:
135
+ raise ValueError(
136
+ "Gemini API key not configured. Set via environment variable GEMINI_API_KEY or config."
137
+ )
138
+
139
+ # Get model spec
140
+ model_name = config.google.model_name
141
+ if model_name not in MODEL_SPECS:
142
+ raise ValueError(f"Model '{model_name}' not found")
143
+ spec = MODEL_SPECS[model_name]
144
+
145
+ # Create fully configured ModelConfig
146
+ return ModelConfig(
147
+ name=spec.name,
148
+ provider=spec.provider,
149
+ max_input_tokens=spec.max_input_tokens,
150
+ max_output_tokens=spec.max_output_tokens,
151
+ api_key=api_key,
152
+ )
153
+
154
+ else:
155
+ raise ValueError(f"Unsupported provider: {provider_enum}")
156
+
157
+
158
+ def _get_api_key(config_key: SecretStr | None, env_var: str) -> str | None:
159
+ """Get API key from config or environment variable.
160
+
161
+ Args:
162
+ config_key: API key from configuration
163
+ env_var: Environment variable name to check
164
+
165
+ Returns:
166
+ API key string or None
167
+ """
168
+ if config_key is not None:
169
+ return config_key.get_secret_value()
170
+
171
+ return os.getenv(env_var)
@@ -23,7 +23,7 @@ from .common import (
23
23
  run_agent,
24
24
  )
25
25
  from .models import AgentDeps, AgentRuntimeOptions
26
- from .tools import web_search_tool
26
+ from .tools import get_available_web_search_tools
27
27
 
28
28
  logger = get_logger(__name__)
29
29
 
@@ -60,11 +60,22 @@ def create_research_agent(
60
60
  Tuple of (Configured Pydantic AI agent for research tasks, Agent dependencies)
61
61
  """
62
62
  logger.debug("Initializing research agent")
63
+
64
+ # Get available web search tools based on configured API keys
65
+ web_search_tools = get_available_web_search_tools()
66
+ if web_search_tools:
67
+ logger.info(
68
+ "Research agent configured with %d web search tool(s)",
69
+ len(web_search_tools),
70
+ )
71
+ else:
72
+ logger.warning("Research agent configured without web search tools")
73
+
63
74
  agent, deps = create_base_agent(
64
75
  _build_research_agent_system_prompt,
65
76
  agent_runtime_options,
66
77
  load_codebase_understanding_tools=True,
67
- additional_tools=[web_search_tool],
78
+ additional_tools=web_search_tools,
68
79
  provider=provider,
69
80
  )
70
81
  return agent, deps
@@ -9,10 +9,18 @@ from .codebase import (
9
9
  )
10
10
  from .file_management import append_file, read_file, write_file
11
11
  from .user_interaction import ask_user
12
- from .web_search import web_search_tool
12
+ from .web_search import (
13
+ anthropic_web_search_tool,
14
+ gemini_web_search_tool,
15
+ get_available_web_search_tools,
16
+ openai_web_search_tool,
17
+ )
13
18
 
14
19
  __all__ = [
15
- "web_search_tool",
20
+ "openai_web_search_tool",
21
+ "anthropic_web_search_tool",
22
+ "gemini_web_search_tool",
23
+ "get_available_web_search_tools",
16
24
  "ask_user",
17
25
  "read_file",
18
26
  "write_file",
@@ -0,0 +1,60 @@
1
+ """Web search tools for Pydantic AI agents.
2
+
3
+ Provides web search capabilities for multiple LLM providers:
4
+ - OpenAI: Uses Responses API with web_search tool
5
+ - Anthropic: Uses Messages API with web_search_20250305 tool
6
+ - Gemini: Uses grounding with Google Search
7
+ """
8
+
9
+ from collections.abc import Callable
10
+
11
+ from shotgun.agents.config.models import ProviderType
12
+ from shotgun.logging_config import get_logger
13
+
14
+ from .anthropic import anthropic_web_search_tool
15
+ from .gemini import gemini_web_search_tool
16
+ from .openai import openai_web_search_tool
17
+ from .utils import is_provider_available
18
+
19
+ logger = get_logger(__name__)
20
+
21
+ # Type alias for web search tools
22
+ WebSearchTool = Callable[[str], str]
23
+
24
+
25
+ def get_available_web_search_tools() -> list[WebSearchTool]:
26
+ """Get list of available web search tools based on configured API keys.
27
+
28
+ Returns:
29
+ List of web search tool functions that have API keys configured
30
+ """
31
+ tools: list[WebSearchTool] = []
32
+
33
+ if is_provider_available(ProviderType.OPENAI):
34
+ logger.debug("✅ OpenAI web search tool available")
35
+ tools.append(openai_web_search_tool)
36
+
37
+ if is_provider_available(ProviderType.ANTHROPIC):
38
+ logger.debug("✅ Anthropic web search tool available")
39
+ tools.append(anthropic_web_search_tool)
40
+
41
+ if is_provider_available(ProviderType.GOOGLE):
42
+ logger.debug("✅ Gemini web search tool available")
43
+ tools.append(gemini_web_search_tool)
44
+
45
+ if not tools:
46
+ logger.warning("⚠️ No web search tools available - no API keys configured")
47
+ else:
48
+ logger.info("🔍 %d web search tool(s) available", len(tools))
49
+
50
+ return tools
51
+
52
+
53
+ __all__ = [
54
+ "openai_web_search_tool",
55
+ "anthropic_web_search_tool",
56
+ "gemini_web_search_tool",
57
+ "get_available_web_search_tools",
58
+ "is_provider_available",
59
+ "WebSearchTool",
60
+ ]
@@ -0,0 +1,86 @@
1
+ """Anthropic web search tool implementation."""
2
+
3
+ import anthropic
4
+ from opentelemetry import trace
5
+
6
+ from shotgun.agents.config import get_provider_model
7
+ from shotgun.agents.config.models import ProviderType
8
+ from shotgun.logging_config import get_logger
9
+
10
+ logger = get_logger(__name__)
11
+
12
+
13
+ def anthropic_web_search_tool(query: str) -> str:
14
+ """Perform a web search using Anthropic's Claude API.
15
+
16
+ This tool uses Anthropic's web search capabilities to find current information
17
+ about the given query.
18
+
19
+ Args:
20
+ query: The search query
21
+
22
+ Returns:
23
+ Search results as a formatted string
24
+ """
25
+ logger.debug("🔧 Invoking Anthropic web_search_tool with query: %s", query)
26
+
27
+ span = trace.get_current_span()
28
+ span.set_attribute("input.value", f"**Query:** {query}\n")
29
+
30
+ logger.debug("📡 Executing Anthropic web search with prompt: %s", query)
31
+
32
+ # Get API key from centralized configuration
33
+ try:
34
+ model_config = get_provider_model(ProviderType.ANTHROPIC)
35
+ api_key = model_config.api_key
36
+ except ValueError as e:
37
+ error_msg = f"Anthropic API key not configured: {str(e)}"
38
+ logger.error("❌ %s", error_msg)
39
+ span.set_attribute("output.value", f"**Error:**\n {error_msg}\n")
40
+ return error_msg
41
+
42
+ client = anthropic.Anthropic(api_key=api_key)
43
+
44
+ # Use the Messages API with web search tool
45
+ try:
46
+ response = client.messages.create(
47
+ model="claude-3-5-sonnet-latest",
48
+ max_tokens=8192, # Increased from 4096 for more comprehensive results
49
+ messages=[{"role": "user", "content": f"Search for: {query}"}],
50
+ tools=[
51
+ {
52
+ "type": "web_search_20250305",
53
+ "name": "web_search",
54
+ }
55
+ ],
56
+ tool_choice={"type": "tool", "name": "web_search"},
57
+ )
58
+
59
+ # Extract the search results from the response
60
+ result_text = ""
61
+ if hasattr(response, "content") and response.content:
62
+ for content in response.content:
63
+ if hasattr(content, "text"):
64
+ result_text += content.text
65
+ elif hasattr(content, "tool_use") and content.tool_use:
66
+ # Handle tool use response
67
+ result_text += f"Search performed for: {query}\n"
68
+
69
+ if not result_text:
70
+ result_text = "No content returned from search"
71
+
72
+ logger.debug("📄 Anthropic web search result: %d characters", len(result_text))
73
+ logger.debug(
74
+ "🔍 Result preview: %s...",
75
+ result_text[:100] if result_text else "No result",
76
+ )
77
+
78
+ span.set_attribute("output.value", f"**Results:**\n {result_text}\n")
79
+
80
+ return result_text
81
+ except Exception as e:
82
+ error_msg = f"Error performing Anthropic web search: {str(e)}"
83
+ logger.error("❌ Anthropic web search failed: %s", str(e))
84
+ logger.debug("💥 Full error details: %s", error_msg)
85
+ span.set_attribute("output.value", f"**Error:**\n {error_msg}\n")
86
+ return error_msg
@@ -0,0 +1,85 @@
1
+ """Gemini web search tool implementation."""
2
+
3
+ import google.generativeai as genai
4
+ from opentelemetry import trace
5
+
6
+ from shotgun.agents.config import get_provider_model
7
+ from shotgun.agents.config.models import ProviderType
8
+ from shotgun.logging_config import get_logger
9
+
10
+ logger = get_logger(__name__)
11
+
12
+
13
+ def gemini_web_search_tool(query: str) -> str:
14
+ """Perform a web search using Google's Gemini API with grounding.
15
+
16
+ This tool uses Gemini's Google Search grounding to find current information
17
+ about the given query.
18
+
19
+ Args:
20
+ query: The search query
21
+
22
+ Returns:
23
+ Search results as a formatted string
24
+ """
25
+ logger.debug("🔧 Invoking Gemini web_search_tool with query: %s", query)
26
+
27
+ span = trace.get_current_span()
28
+ span.set_attribute("input.value", f"**Query:** {query}\n")
29
+
30
+ logger.debug("📡 Executing Gemini web search with prompt: %s", query)
31
+
32
+ # Get API key from centralized configuration
33
+ try:
34
+ model_config = get_provider_model(ProviderType.GOOGLE)
35
+ api_key = model_config.api_key
36
+ except ValueError as e:
37
+ error_msg = f"Gemini API key not configured: {str(e)}"
38
+ logger.error("❌ %s", error_msg)
39
+ span.set_attribute("output.value", f"**Error:**\n {error_msg}\n")
40
+ return error_msg
41
+
42
+ genai.configure(api_key=api_key) # type: ignore[attr-defined]
43
+
44
+ # Create model without built-in tools to avoid conflict with Pydantic AI
45
+ # Using prompt-based search approach instead
46
+ model = genai.GenerativeModel("gemini-2.5-pro") # type: ignore[attr-defined]
47
+
48
+ # Create a search-optimized prompt that leverages Gemini's knowledge
49
+ search_prompt = f"""Please provide current and accurate information about the following query:
50
+
51
+ Query: {query}
52
+
53
+ Instructions:
54
+ - Provide comprehensive, factual information
55
+ - Include relevant details and context
56
+ - Focus on current and recent information
57
+ - Be specific and accurate in your response"""
58
+
59
+ # Generate response using the model's knowledge
60
+ try:
61
+ response = model.generate_content(
62
+ search_prompt,
63
+ generation_config=genai.GenerationConfig( # type: ignore[attr-defined]
64
+ temperature=0.3,
65
+ max_output_tokens=8192, # Explicit limit for comprehensive results
66
+ ),
67
+ )
68
+
69
+ result_text = response.text or "No content returned from search"
70
+
71
+ logger.debug("📄 Gemini web search result: %d characters", len(result_text))
72
+ logger.debug(
73
+ "🔍 Result preview: %s...",
74
+ result_text[:100] if result_text else "No result",
75
+ )
76
+
77
+ span.set_attribute("output.value", f"**Results:**\n {result_text}\n")
78
+
79
+ return result_text
80
+ except Exception as e:
81
+ error_msg = f"Error performing Gemini web search: {str(e)}"
82
+ logger.error("❌ Gemini web search failed: %s", str(e))
83
+ logger.debug("💥 Full error details: %s", error_msg)
84
+ span.set_attribute("output.value", f"**Error:**\n {error_msg}\n")
85
+ return error_msg