cisco-ai-skill-scanner 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. cisco_ai_skill_scanner-1.0.0.dist-info/METADATA +253 -0
  2. cisco_ai_skill_scanner-1.0.0.dist-info/RECORD +100 -0
  3. cisco_ai_skill_scanner-1.0.0.dist-info/WHEEL +4 -0
  4. cisco_ai_skill_scanner-1.0.0.dist-info/entry_points.txt +4 -0
  5. cisco_ai_skill_scanner-1.0.0.dist-info/licenses/LICENSE +17 -0
  6. skillanalyzer/__init__.py +45 -0
  7. skillanalyzer/_version.py +34 -0
  8. skillanalyzer/api/__init__.py +25 -0
  9. skillanalyzer/api/api.py +34 -0
  10. skillanalyzer/api/api_cli.py +78 -0
  11. skillanalyzer/api/api_server.py +634 -0
  12. skillanalyzer/api/router.py +527 -0
  13. skillanalyzer/cli/__init__.py +25 -0
  14. skillanalyzer/cli/cli.py +816 -0
  15. skillanalyzer/config/__init__.py +26 -0
  16. skillanalyzer/config/config.py +149 -0
  17. skillanalyzer/config/config_parser.py +122 -0
  18. skillanalyzer/config/constants.py +85 -0
  19. skillanalyzer/core/__init__.py +24 -0
  20. skillanalyzer/core/analyzers/__init__.py +75 -0
  21. skillanalyzer/core/analyzers/aidefense_analyzer.py +872 -0
  22. skillanalyzer/core/analyzers/base.py +53 -0
  23. skillanalyzer/core/analyzers/behavioral/__init__.py +30 -0
  24. skillanalyzer/core/analyzers/behavioral/alignment/__init__.py +45 -0
  25. skillanalyzer/core/analyzers/behavioral/alignment/alignment_llm_client.py +240 -0
  26. skillanalyzer/core/analyzers/behavioral/alignment/alignment_orchestrator.py +216 -0
  27. skillanalyzer/core/analyzers/behavioral/alignment/alignment_prompt_builder.py +422 -0
  28. skillanalyzer/core/analyzers/behavioral/alignment/alignment_response_validator.py +136 -0
  29. skillanalyzer/core/analyzers/behavioral/alignment/threat_vulnerability_classifier.py +198 -0
  30. skillanalyzer/core/analyzers/behavioral_analyzer.py +453 -0
  31. skillanalyzer/core/analyzers/cross_skill_analyzer.py +490 -0
  32. skillanalyzer/core/analyzers/llm_analyzer.py +440 -0
  33. skillanalyzer/core/analyzers/llm_prompt_builder.py +270 -0
  34. skillanalyzer/core/analyzers/llm_provider_config.py +215 -0
  35. skillanalyzer/core/analyzers/llm_request_handler.py +284 -0
  36. skillanalyzer/core/analyzers/llm_response_parser.py +81 -0
  37. skillanalyzer/core/analyzers/meta_analyzer.py +845 -0
  38. skillanalyzer/core/analyzers/static.py +1105 -0
  39. skillanalyzer/core/analyzers/trigger_analyzer.py +341 -0
  40. skillanalyzer/core/analyzers/virustotal_analyzer.py +463 -0
  41. skillanalyzer/core/exceptions.py +77 -0
  42. skillanalyzer/core/loader.py +377 -0
  43. skillanalyzer/core/models.py +300 -0
  44. skillanalyzer/core/reporters/__init__.py +26 -0
  45. skillanalyzer/core/reporters/json_reporter.py +65 -0
  46. skillanalyzer/core/reporters/markdown_reporter.py +209 -0
  47. skillanalyzer/core/reporters/sarif_reporter.py +246 -0
  48. skillanalyzer/core/reporters/table_reporter.py +195 -0
  49. skillanalyzer/core/rules/__init__.py +19 -0
  50. skillanalyzer/core/rules/patterns.py +165 -0
  51. skillanalyzer/core/rules/yara_scanner.py +157 -0
  52. skillanalyzer/core/scanner.py +437 -0
  53. skillanalyzer/core/static_analysis/__init__.py +27 -0
  54. skillanalyzer/core/static_analysis/cfg/__init__.py +21 -0
  55. skillanalyzer/core/static_analysis/cfg/builder.py +439 -0
  56. skillanalyzer/core/static_analysis/context_extractor.py +742 -0
  57. skillanalyzer/core/static_analysis/dataflow/__init__.py +25 -0
  58. skillanalyzer/core/static_analysis/dataflow/forward_analysis.py +715 -0
  59. skillanalyzer/core/static_analysis/interprocedural/__init__.py +21 -0
  60. skillanalyzer/core/static_analysis/interprocedural/call_graph_analyzer.py +406 -0
  61. skillanalyzer/core/static_analysis/interprocedural/cross_file_analyzer.py +190 -0
  62. skillanalyzer/core/static_analysis/parser/__init__.py +21 -0
  63. skillanalyzer/core/static_analysis/parser/python_parser.py +380 -0
  64. skillanalyzer/core/static_analysis/semantic/__init__.py +28 -0
  65. skillanalyzer/core/static_analysis/semantic/name_resolver.py +206 -0
  66. skillanalyzer/core/static_analysis/semantic/type_analyzer.py +200 -0
  67. skillanalyzer/core/static_analysis/taint/__init__.py +21 -0
  68. skillanalyzer/core/static_analysis/taint/tracker.py +252 -0
  69. skillanalyzer/core/static_analysis/types/__init__.py +36 -0
  70. skillanalyzer/data/__init__.py +30 -0
  71. skillanalyzer/data/prompts/boilerplate_protection_rule_prompt.md +26 -0
  72. skillanalyzer/data/prompts/code_alignment_threat_analysis_prompt.md +901 -0
  73. skillanalyzer/data/prompts/llm_response_schema.json +71 -0
  74. skillanalyzer/data/prompts/skill_meta_analysis_prompt.md +303 -0
  75. skillanalyzer/data/prompts/skill_threat_analysis_prompt.md +263 -0
  76. skillanalyzer/data/prompts/unified_response_schema.md +97 -0
  77. skillanalyzer/data/rules/signatures.yaml +440 -0
  78. skillanalyzer/data/yara_rules/autonomy_abuse.yara +66 -0
  79. skillanalyzer/data/yara_rules/code_execution.yara +61 -0
  80. skillanalyzer/data/yara_rules/coercive_injection.yara +115 -0
  81. skillanalyzer/data/yara_rules/command_injection.yara +54 -0
  82. skillanalyzer/data/yara_rules/credential_harvesting.yara +115 -0
  83. skillanalyzer/data/yara_rules/prompt_injection.yara +71 -0
  84. skillanalyzer/data/yara_rules/script_injection.yara +83 -0
  85. skillanalyzer/data/yara_rules/skill_discovery_abuse.yara +57 -0
  86. skillanalyzer/data/yara_rules/sql_injection.yara +73 -0
  87. skillanalyzer/data/yara_rules/system_manipulation.yara +65 -0
  88. skillanalyzer/data/yara_rules/tool_chaining_abuse.yara +60 -0
  89. skillanalyzer/data/yara_rules/transitive_trust_abuse.yara +73 -0
  90. skillanalyzer/data/yara_rules/unicode_steganography.yara +65 -0
  91. skillanalyzer/hooks/__init__.py +21 -0
  92. skillanalyzer/hooks/pre_commit.py +450 -0
  93. skillanalyzer/threats/__init__.py +25 -0
  94. skillanalyzer/threats/threats.py +480 -0
  95. skillanalyzer/utils/__init__.py +28 -0
  96. skillanalyzer/utils/command_utils.py +129 -0
  97. skillanalyzer/utils/di_container.py +154 -0
  98. skillanalyzer/utils/file_utils.py +86 -0
  99. skillanalyzer/utils/logging_config.py +96 -0
  100. skillanalyzer/utils/logging_utils.py +71 -0
@@ -0,0 +1,270 @@
1
+ # Copyright 2026 Cisco Systems, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+ # SPDX-License-Identifier: Apache-2.0
16
+
17
+ """
18
+ LLM Prompt Builder.
19
+
20
+ Handles prompt construction with injection protection using random delimiters.
21
+ """
22
+
23
+ import secrets
24
+ from pathlib import Path
25
+
26
+ from ...core.models import Skill
27
+
28
+
29
+ class PromptBuilder:
30
+ """Builds analysis prompts with injection protection."""
31
+
32
+ def __init__(self):
33
+ """Initialize prompt builder and load prompts."""
34
+ self.protection_rules = ""
35
+ self.threat_analysis_prompt = ""
36
+ self._load_prompts()
37
+
38
+ def _load_prompts(self):
39
+ """Load analysis prompts from markdown files."""
40
+ prompts_dir = Path(__file__).parent.parent.parent / "data" / "prompts"
41
+
42
+ try:
43
+ protection_file = prompts_dir / "boilerplate_protection_rule_prompt.md"
44
+ threat_file = prompts_dir / "skill_threat_analysis_prompt.md"
45
+
46
+ if protection_file.exists():
47
+ self.protection_rules = protection_file.read_text(encoding="utf-8")
48
+ else:
49
+ print(f"Warning: Protection rules file not found at {protection_file}")
50
+ self.protection_rules = "You are a security analyst analyzing Claude Skills."
51
+
52
+ if threat_file.exists():
53
+ self.threat_analysis_prompt = threat_file.read_text(encoding="utf-8")
54
+ else:
55
+ print(f"Warning: Threat analysis prompt not found at {threat_file}")
56
+ self.threat_analysis_prompt = "Analyze for security threats."
57
+
58
+ except Exception as e:
59
+ print(f"Warning: Failed to load prompts: {e}")
60
+ self.protection_rules = "You are a security analyst analyzing Claude Skills."
61
+ self.threat_analysis_prompt = "Analyze for security threats."
62
+
63
+ def build_threat_analysis_prompt(
64
+ self,
65
+ skill_name: str,
66
+ description: str,
67
+ manifest_details: str,
68
+ instruction_body: str,
69
+ code_files: str,
70
+ referenced_files: str,
71
+ ) -> tuple[str, bool]:
72
+ """
73
+ Create threat analysis prompt with prompt injection protection.
74
+
75
+ Uses random delimiter tags to prevent prompt injection attacks.
76
+
77
+ Args:
78
+ skill_name: Name of the skill
79
+ description: Skill description
80
+ manifest_details: YAML manifest details
81
+ instruction_body: SKILL.md content
82
+ code_files: Formatted code files
83
+ referenced_files: Referenced files
84
+
85
+ Returns:
86
+ Tuple of (prompt, injection_detected)
87
+ """
88
+ # Generate random delimiter tags
89
+ random_id = secrets.token_hex(16)
90
+ start_tag = f"<!---UNTRUSTED_INPUT_START_{random_id}--->"
91
+ end_tag = f"<!---UNTRUSTED_INPUT_END_{random_id}--->"
92
+
93
+ # Build comprehensive analysis content
94
+ analysis_content = f"""Skill Name: {skill_name}
95
+ Description: {description}
96
+
97
+ YAML Manifest Details:
98
+ {manifest_details}
99
+
100
+ Instruction Body (SKILL.md markdown):
101
+ {instruction_body}
102
+
103
+ Script Files (Python/Bash):
104
+ {code_files}
105
+
106
+ Referenced Files:
107
+ {referenced_files}
108
+ """
109
+
110
+ # Check for delimiter injection (security violation)
111
+ injection_detected = start_tag in analysis_content or end_tag in analysis_content
112
+
113
+ if injection_detected:
114
+ print(f"WARNING: Potential prompt injection detected in skill {skill_name}")
115
+
116
+ # Replace placeholders with random tags
117
+ protected_rules = self.protection_rules.replace("<!---UNTRUSTED_INPUT_START--->", start_tag).replace(
118
+ "<!---UNTRUSTED_INPUT_END--->", end_tag
119
+ )
120
+
121
+ # Construct full prompt
122
+ prompt = f"""{protected_rules}
123
+
124
+ {self.threat_analysis_prompt}
125
+
126
+ {start_tag}
127
+ {analysis_content}
128
+ {end_tag}
129
+ """
130
+
131
+ return prompt.strip(), injection_detected
132
+
133
+ def format_manifest(self, manifest) -> str:
134
+ """Format YAML manifest for LLM analysis."""
135
+ lines = []
136
+ lines.append(f"- name: {manifest.name}")
137
+ lines.append(f"- description: {manifest.description}")
138
+ lines.append(f"- license: {manifest.license or 'Not specified'}")
139
+ lines.append(f"- compatibility: {manifest.compatibility or 'Not specified'}")
140
+ lines.append(
141
+ f"- allowed-tools: {', '.join(manifest.allowed_tools) if manifest.allowed_tools else 'Not specified'}"
142
+ )
143
+ if manifest.metadata:
144
+ lines.append(f"- additional metadata: {manifest.metadata}")
145
+ return "\n".join(lines)
146
+
147
+ def format_code_files(self, skill: Skill) -> str:
148
+ """Format code files for LLM analysis."""
149
+ lines = []
150
+
151
+ for skill_file in skill.get_scripts():
152
+ content = skill_file.read_content()
153
+ if content:
154
+ truncated = content[:1500]
155
+ if len(content) > 1500:
156
+ truncated += f"\n... (truncated, total {len(content)} chars)"
157
+
158
+ lines.append(f"**File: {skill_file.relative_path}**")
159
+ lines.append("```" + skill_file.file_type)
160
+ lines.append(truncated)
161
+ lines.append("```")
162
+ lines.append("")
163
+
164
+ return "\n".join(lines) if lines else "No script files found."
165
+
166
+ def _is_path_within_directory(self, path: Path, directory: Path) -> bool:
167
+ """
168
+ Check if a path is within a directory (prevents path traversal attacks).
169
+
170
+ Args:
171
+ path: The path to check (will be resolved)
172
+ directory: The directory that should contain the path
173
+
174
+ Returns:
175
+ True if the path is within the directory, False otherwise
176
+ """
177
+ try:
178
+ # Resolve both paths to absolute paths, resolving symlinks
179
+ resolved_path = path.resolve()
180
+ resolved_directory = directory.resolve()
181
+
182
+ # Check if the resolved path starts with the directory path
183
+ # Using os.path.commonpath is more robust than string comparison
184
+ return resolved_path.is_relative_to(resolved_directory)
185
+ except (ValueError, OSError):
186
+ # is_relative_to raises ValueError if paths are on different drives (Windows)
187
+ # or other path resolution issues
188
+ return False
189
+
190
+ def format_referenced_files(self, skill: Skill, max_file_size: int = 2000) -> str:
191
+ """
192
+ Format referenced files for LLM analysis, including their content.
193
+
194
+ This is critical for detecting hidden malicious payloads in referenced
195
+ instruction files (e.g., rules/logic.md containing curl commands).
196
+
197
+ SECURITY: Only reads files within the skill directory to prevent
198
+ path traversal attacks (e.g., ../../../.env exfiltration).
199
+
200
+ Args:
201
+ skill: The skill being analyzed
202
+ max_file_size: Maximum characters to include per file (default 2000)
203
+
204
+ Returns:
205
+ Formatted string with referenced file contents
206
+ """
207
+ if not skill.referenced_files:
208
+ return "No referenced files."
209
+
210
+ lines = []
211
+ lines.append(f"Files referenced in instructions: {', '.join(skill.referenced_files)}")
212
+ lines.append("")
213
+
214
+ for ref_file_path in skill.referenced_files:
215
+ # Skip paths that look like path traversal attempts
216
+ if ".." in ref_file_path or ref_file_path.startswith("/"):
217
+ lines.append(f"**Referenced File: {ref_file_path}** (blocked: path traversal attempt)")
218
+ lines.append("")
219
+ continue
220
+
221
+ # Try to find the file in the skill directory
222
+ full_path = skill.directory / ref_file_path
223
+ if not full_path.exists():
224
+ # Try alternative locations (all within skill directory)
225
+ alt_paths = [
226
+ skill.directory / "rules" / Path(ref_file_path).name,
227
+ skill.directory / "references" / ref_file_path,
228
+ skill.directory / "assets" / ref_file_path,
229
+ skill.directory / "templates" / ref_file_path,
230
+ ]
231
+ for alt in alt_paths:
232
+ if alt.exists():
233
+ full_path = alt
234
+ break
235
+
236
+ if not full_path.exists():
237
+ lines.append(f"**Referenced File: {ref_file_path}** (not found)")
238
+ lines.append("")
239
+ continue
240
+
241
+ # SECURITY: Verify the resolved path is within the skill directory
242
+ # This prevents path traversal attacks like ../../../.env
243
+ if not self._is_path_within_directory(full_path, skill.directory):
244
+ lines.append(f"**Referenced File: {ref_file_path}** (blocked: outside skill directory)")
245
+ lines.append("")
246
+ continue
247
+
248
+ try:
249
+ content = full_path.read_text(encoding="utf-8")
250
+
251
+ # Truncate if too large
252
+ truncated = content[:max_file_size]
253
+ if len(content) > max_file_size:
254
+ truncated += f"\n... (truncated, total {len(content)} chars)"
255
+
256
+ # Determine file type for syntax highlighting
257
+ suffix = full_path.suffix.lower()
258
+ file_type = "markdown" if suffix in (".md", ".markdown") else "text"
259
+
260
+ lines.append(f"**Referenced File: {ref_file_path}**")
261
+ lines.append(f"```{file_type}")
262
+ lines.append(truncated)
263
+ lines.append("```")
264
+ lines.append("")
265
+
266
+ except Exception as e:
267
+ lines.append(f"**Referenced File: {ref_file_path}** (error reading: {e})")
268
+ lines.append("")
269
+
270
+ return "\n".join(lines)
@@ -0,0 +1,215 @@
1
+ # Copyright 2026 Cisco Systems, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+ # SPDX-License-Identifier: Apache-2.0
16
+
17
+ """
18
+ LLM Provider Configuration Handler.
19
+
20
+ Handles detection and configuration of different LLM providers
21
+ (Anthropic, OpenAI, Azure, Bedrock, Gemini).
22
+ """
23
+
24
+ import importlib.util
25
+ import os
26
+
27
+ # Check for Google GenAI availability
28
+ # Wrap in try/except because find_spec can raise ModuleNotFoundError
29
+ # if the google namespace package is in a broken state
30
+ try:
31
+ GOOGLE_GENAI_AVAILABLE = importlib.util.find_spec("google.genai") is not None
32
+ except (ImportError, ModuleNotFoundError):
33
+ GOOGLE_GENAI_AVAILABLE = False
34
+
35
+ # Check for LiteLLM availability
36
+ try:
37
+ LITELLM_AVAILABLE = importlib.util.find_spec("litellm") is not None
38
+ except (ImportError, ModuleNotFoundError):
39
+ LITELLM_AVAILABLE = False
40
+
41
+
42
+ class ProviderConfig:
43
+ """Handles LLM provider detection and configuration."""
44
+
45
+ def __init__(
46
+ self,
47
+ model: str,
48
+ api_key: str | None = None,
49
+ base_url: str | None = None,
50
+ api_version: str | None = None,
51
+ aws_region: str | None = None,
52
+ aws_profile: str | None = None,
53
+ aws_session_token: str | None = None,
54
+ ):
55
+ """
56
+ Initialize provider configuration.
57
+
58
+ Args:
59
+ model: Model identifier
60
+ api_key: API key (if None, reads from environment)
61
+ base_url: Custom base URL (for Azure)
62
+ api_version: API version (for Azure)
63
+ aws_region: AWS region (for Bedrock)
64
+ aws_profile: AWS profile name (for Bedrock)
65
+ aws_session_token: AWS session token (for Bedrock)
66
+ """
67
+ self.model = model
68
+ self.base_url = base_url
69
+ self.api_version = api_version
70
+ self.aws_region = aws_region or os.getenv("AWS_REGION", "us-east-1")
71
+ self.aws_profile = aws_profile or os.getenv("AWS_PROFILE")
72
+ self.aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
73
+
74
+ # Detect provider type from model string
75
+ model_lower = model.lower()
76
+ self.is_bedrock = "bedrock/" in model or model_lower.startswith("bedrock/")
77
+ self.is_gemini = "gemini" in model_lower or model_lower.startswith("gemini/")
78
+ self.is_azure = model_lower.startswith("azure/") or "azure" in model_lower
79
+ self.is_vertex = model_lower.startswith("vertex_ai/") or "vertex" in model_lower
80
+ self.is_ollama = model_lower.startswith("ollama/")
81
+ self.is_openrouter = model_lower.startswith("openrouter/")
82
+
83
+ # Determine if we should use Google SDK
84
+ self.use_google_sdk = False
85
+
86
+ # Handle Vertex AI separately (uses LiteLLM, not Google SDK)
87
+ if self.is_vertex:
88
+ # Vertex AI models stay as-is for LiteLLM
89
+ if not LITELLM_AVAILABLE:
90
+ raise ImportError("LiteLLM is required for Vertex AI. Install with: pip install litellm")
91
+ self.model = model # Keep vertex_ai/ prefix for LiteLLM
92
+ elif self.is_gemini and GOOGLE_GENAI_AVAILABLE:
93
+ # Google AI Studio (uses Google SDK directly)
94
+ self.use_google_sdk = True
95
+ self.model = self._normalize_gemini_model_name(model)
96
+ elif self.is_gemini and not GOOGLE_GENAI_AVAILABLE:
97
+ raise ImportError(
98
+ "For Gemini models, either LiteLLM or google-genai is required. "
99
+ "Install with: pip install litellm or pip install google-genai"
100
+ )
101
+ elif not LITELLM_AVAILABLE:
102
+ raise ImportError("LiteLLM is required for enhanced LLM analyzer. Install with: pip install litellm")
103
+ else:
104
+ # Normalize Gemini model name for LiteLLM (Google AI Studio via LiteLLM)
105
+ if self.is_gemini and not model.startswith("gemini/"):
106
+ model_name = model.replace("gemini-", "").replace("gemini/", "")
107
+ self.model = f"gemini/{model_name}"
108
+ else:
109
+ self.model = model
110
+
111
+ # Resolve API key
112
+ self.api_key = self._resolve_api_key(api_key)
113
+
114
+ # Note: Google SDK client is created per-request, not configured globally
115
+
116
+ def _resolve_api_key(self, api_key: str | None) -> str | None:
117
+ """Resolve API key from parameter or environment variables.
118
+
119
+ Uses SKILL_SCANNER_LLM_API_KEY consistently for all providers.
120
+
121
+ Special cases:
122
+ - Vertex AI: Uses GOOGLE_APPLICATION_CREDENTIALS (service account)
123
+ - Ollama: No API key needed (local)
124
+ """
125
+ if api_key is not None:
126
+ return api_key
127
+
128
+ # Special cases with different auth mechanisms
129
+ if self.is_vertex:
130
+ # Vertex AI uses Google Cloud service account credentials
131
+ return os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
132
+ elif self.is_ollama:
133
+ # Ollama is local and typically doesn't need API key
134
+ return None
135
+
136
+ # All providers (including Bedrock, Gemini, OpenAI, Anthropic, Azure):
137
+ # Use SKILL_SCANNER_LLM_API_KEY
138
+ return os.getenv("SKILL_SCANNER_LLM_API_KEY")
139
+
140
+ def _normalize_gemini_model_name(self, model: str) -> str:
141
+ """
142
+ Normalize Gemini model name for Google GenAI SDK (new SDK).
143
+
144
+ Handles various input formats:
145
+ - gemini-1.5-pro -> models/gemini-1.5-pro (or models/gemini-pro-latest)
146
+ - gemini-2.5-flash -> models/gemini-2.5-flash
147
+ - gemini/2.0-flash -> models/gemini-2.0-flash
148
+ - models/gemini-2.5-pro -> models/gemini-2.5-pro (already correct)
149
+
150
+ Args:
151
+ model: Input model name
152
+
153
+ Returns:
154
+ Normalized model name for Google SDK (with models/ prefix)
155
+ """
156
+ # Remove any "gemini/" prefix (LiteLLM format)
157
+ model_name = model.replace("gemini/", "")
158
+
159
+ # Remove models/ prefix if present (will add it back)
160
+ model_name = model_name.replace("models/", "")
161
+
162
+ # Map legacy model names to available models
163
+ model_mapping = {
164
+ "gemini-1.5-pro": "gemini-pro-latest", # Map to latest available
165
+ "gemini-1.5-flash": "gemini-flash-latest", # Map to latest available
166
+ }
167
+
168
+ if model_name in model_mapping:
169
+ model_name = model_mapping[model_name]
170
+
171
+ # If it's just a version/variant, add "gemini-" prefix
172
+ if not model_name.startswith("gemini-"):
173
+ model_name = f"gemini-{model_name}"
174
+
175
+ # Add models/ prefix for new SDK
176
+ if not model_name.startswith("models/"):
177
+ model_name = f"models/{model_name}"
178
+
179
+ return model_name
180
+
181
+ def validate(self) -> None:
182
+ """Validate that configuration is complete."""
183
+ if not self.is_bedrock and not self.api_key:
184
+ raise ValueError(f"API key required for model {self.model}")
185
+
186
+ def get_request_params(self) -> dict:
187
+ """Get request parameters for LiteLLM."""
188
+ params = {}
189
+
190
+ if self.api_key:
191
+ if self.is_gemini:
192
+ # For Google AI Studio, LiteLLM uses GEMINI_API_KEY environment variable
193
+ if not os.getenv("GEMINI_API_KEY"):
194
+ os.environ["GEMINI_API_KEY"] = self.api_key
195
+ else:
196
+ # Pass api_key for all providers including Bedrock (bearer token auth)
197
+ params["api_key"] = self.api_key
198
+
199
+ if self.base_url:
200
+ params["api_base"] = self.base_url
201
+ if self.api_version:
202
+ params["api_version"] = self.api_version
203
+
204
+ if self.is_bedrock:
205
+ # AWS Bedrock supports:
206
+ # 1. Bearer token auth via api_key (format: bedrock-api-key-*)
207
+ # 2. IAM credentials via boto3 (falls back if no bearer token)
208
+ if self.aws_region:
209
+ params["aws_region_name"] = self.aws_region
210
+ if self.aws_session_token:
211
+ params["aws_session_token"] = self.aws_session_token
212
+ if self.aws_profile:
213
+ params["aws_profile_name"] = self.aws_profile
214
+
215
+ return params