terminal-sherpa 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ask/main.py CHANGED
@@ -76,10 +76,11 @@ def resolve_provider(args, config_data) -> ProviderInterface:
76
76
  # Use default provider from environment variables
77
77
  default_provider = config.get_default_provider()
78
78
  if not default_provider:
79
+ keys = ["GEMINI_API_KEY", "ANTHROPIC_API_KEY", "OPENAI_API_KEY"]
79
80
  logger.error(
80
81
  "No default model configured and no API keys found. "
81
- "Please set ANTHROPIC_API_KEY or OPENAI_API_KEY environment "
82
- "variable, or set a default_provider in your config file."
82
+ f"Please set one or more of {keys} environment variables, "
83
+ "or set a default_provider in your config file."
83
84
  )
84
85
  sys.exit(1)
85
86
  logger.debug(f"Using default provider from environment: {default_provider}")
ask/providers/__init__.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  from .anthropic import AnthropicProvider
4
4
  from .base import ProviderInterface
5
+ from .gemini import GeminiProvider
5
6
  from .openai import OpenAIProvider
6
7
 
7
8
  # Provider registry - maps provider names to their classes
@@ -33,3 +34,4 @@ def list_providers() -> list[str]:
33
34
 
34
35
  register_provider("anthropic", AnthropicProvider)
35
36
  register_provider("openai", OpenAIProvider)
37
+ register_provider("gemini", GeminiProvider)
@@ -37,6 +37,7 @@ class AnthropicProvider(ProviderInterface):
37
37
  return response.content[0].text
38
38
  except Exception as e:
39
39
  self._handle_api_error(e)
40
+ return ""
40
41
 
41
42
  def validate_config(self) -> None:
42
43
  """Validate provider configuration and API key."""
@@ -0,0 +1,89 @@
1
+ """Anthropic provider implementation."""
2
+
3
+ import os
4
+ from typing import Any
5
+
6
+ from google import genai
7
+ from google.genai.types import GenerateContentConfig, GenerateContentResponse
8
+
9
+ from ask.config import SYSTEM_PROMPT
10
+ from ask.exceptions import APIError, AuthenticationError, RateLimitError
11
+ from ask.providers.base import ProviderInterface
12
+
13
+
14
+ class GeminiProvider(ProviderInterface):
15
+ """Gemini AI provider implementation."""
16
+
17
+ def __init__(self, config: dict[str, Any]):
18
+ """Initialize Gemini provider with configuration."""
19
+ super().__init__(config)
20
+ self.client: genai.Client | None = None
21
+
22
+ def _parse_response(self, response: GenerateContentResponse) -> str:
23
+ """Parse response from Gemini API."""
24
+ if response.candidates is None or len(response.candidates) == 0:
25
+ return ""
26
+ parts = response.candidates[0].content.parts
27
+ if parts is None:
28
+ return ""
29
+ return "".join([part.text for part in parts])
30
+
31
+ def get_bash_command(self, prompt: str) -> str:
32
+ """Generate bash command from natural language prompt."""
33
+ if self.client is None:
34
+ self.validate_config()
35
+
36
+ # After validate_config(), client should be set
37
+ assert self.client is not None, "Client should be initialized after validation"
38
+
39
+ try:
40
+ # max_tokens=self.config.get("max_tokens", 150),
41
+ # temperature=self.config.get("temperature", 0.0),
42
+ # system=self.config.get("system_prompt", SYSTEM_PROMPT),
43
+ response = self.client.models.generate_content(
44
+ model=self.config.get("model_name", "gemini-2.5-flash"),
45
+ contents=prompt,
46
+ config=GenerateContentConfig(
47
+ max_output_tokens=self.config.get("max_tokens", 150),
48
+ temperature=self.config.get("temperature", 0.0),
49
+ system_instruction=self.config.get("system_prompt", SYSTEM_PROMPT),
50
+ ),
51
+ )
52
+ return self._parse_response(response)
53
+ except Exception as e:
54
+ self._handle_api_error(e)
55
+ return ""
56
+
57
+ def validate_config(self) -> None:
58
+ """Validate provider configuration and API key."""
59
+ api_key_env = self.config.get("api_key_env", "GEMINI_API_KEY")
60
+ api_key = os.environ.get(api_key_env)
61
+
62
+ if not api_key:
63
+ raise AuthenticationError(
64
+ f"Error: {api_key_env} environment variable is required"
65
+ )
66
+
67
+ self.client = genai.Client(api_key=api_key)
68
+
69
+ def _handle_api_error(self, error: Exception):
70
+ """Handle API errors and map them to standard exceptions."""
71
+ error_str = str(error).lower()
72
+
73
+ if "authentication" in error_str or "unauthorized" in error_str:
74
+ raise AuthenticationError("Error: Invalid API key")
75
+ elif "rate limit" in error_str:
76
+ raise RateLimitError("Error: API rate limit exceeded")
77
+ else:
78
+ raise APIError(f"Error: API request failed - {error}")
79
+
80
+ @classmethod
81
+ def get_default_config(cls) -> dict[str, Any]:
82
+ """Return default configuration for Gemini provider."""
83
+ return {
84
+ "model_name": "gemini-2.5-flash",
85
+ "max_tokens": 150,
86
+ "api_key_env": "GEMINI_API_KEY",
87
+ "temperature": 0.0,
88
+ "system_prompt": SYSTEM_PROMPT,
89
+ }
ask/providers/openai.py CHANGED
@@ -62,6 +62,7 @@ class OpenAIProvider(ProviderInterface):
62
62
  return re_match.group(1)
63
63
  except Exception as e:
64
64
  self._handle_api_error(e)
65
+ return ""
65
66
 
66
67
  def validate_config(self) -> None:
67
68
  """Validate provider configuration and API key."""
@@ -1,35 +1,42 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: terminal-sherpa
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: AI-powered bash command generator
5
5
  Requires-Python: >=3.13
6
6
  Description-Content-Type: text/markdown
7
+ License-File: LICENSE
7
8
  Requires-Dist: anthropic>=0.7.0
8
9
  Requires-Dist: black>=25.1.0
10
+ Requires-Dist: cosmic-ray>=8.3.4
11
+ Requires-Dist: google-genai>=1.26.0
9
12
  Requires-Dist: loguru>=0.7.0
13
+ Requires-Dist: mutatest>=3.1.0
10
14
  Requires-Dist: openai>=1.0.0
11
15
  Requires-Dist: pytest>=8.0.0
12
16
  Requires-Dist: pytest-cov>=4.0.0
13
17
  Requires-Dist: pytest-mock>=3.12.0
14
18
  Requires-Dist: ruff>=0.12.3
19
+ Requires-Dist: setuptools>=80.9.0
20
+ Requires-Dist: taskipy>=1.14.1
15
21
  Requires-Dist: toml>=0.10.0
22
+ Dynamic: license-file
16
23
 
17
- # ask
24
+ # terminal-sherpa
18
25
 
19
26
  A lightweight AI chat interface for fellow terminal dwellers.
20
27
 
21
28
  Turn natural language into bash commands instantly.
22
29
  Stop googling syntax and start asking.
23
30
 
24
- [![codecov](https://codecov.io/github/lcford2/ask/graph/badge.svg?token=2MXHNL3RHE)](https://codecov.io/github/lcford2/ask)
31
+ [![codecov](https://codecov.io/github/lcford2/terminal-sherpa/graph/badge.svg?token=2MXHNL3RHE)](https://codecov.io/github/lcford2/terminal-sherpa)
25
32
 
26
33
  ## 🚀 Getting Started
27
34
 
28
35
  Get up and running:
29
36
 
30
37
  ```bash
31
- # Install ask
32
- uv tool install ask
38
+ # Install terminal-sherpa
39
+ pip install terminal-sherpa # installs the `ask` CLI tool
33
40
 
34
41
  # Set your API key
35
42
  export ANTHROPIC_API_KEY="your-key-here"
@@ -39,6 +46,7 @@ ask "find all .py files modified in the last week"
39
46
  ```
40
47
 
41
48
  **Example output:**
49
+
42
50
  ```bash
43
51
  find . -name "*.py" -mtime -7
44
52
  ```
@@ -46,7 +54,7 @@ find . -name "*.py" -mtime -7
46
54
  ## ✨ Features
47
55
 
48
56
  - **Natural language to bash conversion** - Describe what you want, get the command
49
- - **Multiple AI provider support** - Choose between Anthropic (Claude) and OpenAI (GPT) models
57
+ - **Multiple AI provider support** - Choose between Anthropic (Claude), OpenAI (GPT), and Google (Gemini) models
50
58
  - **Flexible configuration system** - Set defaults, customize models, and manage API keys
51
59
  - **XDG-compliant config files** - Follows standard configuration file locations
52
60
  - **Verbose logging support** - Debug and understand what's happening under the hood
@@ -54,30 +62,29 @@ find . -name "*.py" -mtime -7
54
62
  ## 📦 Installation
55
63
 
56
64
  ### Requirements
65
+
57
66
  - Python 3.13+
58
67
  - API key for Anthropic or OpenAI
59
68
 
60
69
  ### Install Methods
61
70
 
62
- **Recommended (uv):**
63
- ```bash
64
- uv tool install ask
65
- ```
66
-
67
71
  **Using pip:**
72
+
68
73
  ```bash
69
- pip install ask
74
+ pip install terminal-sherpa
70
75
  ```
71
76
 
72
77
  **From source:**
78
+
73
79
  ```bash
74
- git clone https://github.com/lcford2/ask.git
75
- cd ask
80
+ git clone https://github.com/lcford2/terminal-sherpa.git
81
+ cd terminal-sherpa
76
82
  uv sync
77
83
  uv run ask "your prompt here"
78
84
  ```
79
85
 
80
86
  **Verify installation:**
87
+
81
88
  ```bash
82
89
  ask --help
83
90
  ```
@@ -85,20 +92,23 @@ ask --help
85
92
  ## 💡 Usage
86
93
 
87
94
  ### Basic Syntax
95
+
88
96
  ```bash
89
97
  ask "your natural language prompt"
90
98
  ```
91
99
 
92
100
  ### Command Options
93
101
 
94
- | Option | Description | Example |
95
- |--------|-------------|---------|
102
+ | Option | Description | Example |
103
+ | ------------------------ | -------------------------- | --------------------------------------------------- |
96
104
  | `--model provider:model` | Specify provider and model | `ask --model anthropic:claude-3-haiku "list files"` |
97
- | `--verbose` | Enable verbose logging | `ask --verbose "compress this folder"` |
105
+ | | | `ask --model gemini:gemini-2.5-flash "list files"` |
106
+ | `--verbose` | Enable verbose logging | `ask --verbose "compress this folder"` |
98
107
 
99
108
  ### Practical Examples
100
109
 
101
110
  **File Operations:**
111
+
102
112
  ```bash
103
113
  ask "find all files larger than 100MB"
104
114
  # Example output: find . -size +100M
@@ -108,6 +118,7 @@ ask "create a backup of config.txt with timestamp"
108
118
  ```
109
119
 
110
120
  **Git Commands:**
121
+
111
122
  ```bash
112
123
  ask "show git log for last 5 commits with one line each"
113
124
  # Example output: git log --oneline -5
@@ -117,6 +128,7 @@ ask "delete all local branches that have been merged"
117
128
  ```
118
129
 
119
130
  **System Administration:**
131
+
120
132
  ```bash
121
133
  ask "check disk usage of current directory sorted by size"
122
134
  # Example output: du -sh * | sort -hr
@@ -126,6 +138,7 @@ ask "find processes using port 8080"
126
138
  ```
127
139
 
128
140
  **Text Processing:**
141
+
129
142
  ```bash
130
143
  ask "count lines in all Python files"
131
144
  # Example output: find . -name "*.py" -exec wc -l {} + | tail -1
@@ -135,6 +148,7 @@ ask "replace all tabs with spaces in file.txt"
135
148
  ```
136
149
 
137
150
  **Network Operations:**
151
+
138
152
  ```bash
139
153
  ask "download file from URL and save to downloads folder"
140
154
  # Example output: curl -o ~/Downloads/filename "https://example.com/file"
@@ -146,6 +160,7 @@ ask "check if port 443 is open on example.com"
146
160
  ## ⚙️ Configuration
147
161
 
148
162
  ### Configuration File Locations
163
+
149
164
  Ask follows XDG Base Directory Specification:
150
165
 
151
166
  1. `$XDG_CONFIG_HOME/ask/config.toml`
@@ -153,12 +168,15 @@ Ask follows XDG Base Directory Specification:
153
168
  1. `~/.ask/config.toml` (fallback)
154
169
 
155
170
  ### Environment Variables
171
+
156
172
  ```bash
157
173
  export ANTHROPIC_API_KEY="your-anthropic-key"
158
174
  export OPENAI_API_KEY="your-openai-key"
175
+ export GEMINI_API_KEY="your-gemini-key"
159
176
  ```
160
177
 
161
178
  ### Example Configuration File
179
+
162
180
  Create `~/.config/ask/config.toml`:
163
181
 
164
182
  ```toml
@@ -166,7 +184,6 @@ Create `~/.config/ask/config.toml`:
166
184
  default_model = "anthropic"
167
185
 
168
186
  [anthropic]
169
- api_key = "your-anthropic-key"
170
187
  model = "claude-3-haiku-20240307"
171
188
  max_tokens = 512
172
189
 
@@ -175,33 +192,44 @@ model = "claude-3-5-sonnet-20241022"
175
192
  max_tokens = 1024
176
193
 
177
194
  [openai]
178
- api_key = "your-openai-key"
179
195
  model = "gpt-4o"
180
196
  max_tokens = 1024
197
+
198
+ [gemini]
199
+ model = "gemini-2.5-flash"
200
+ max_tokens = 150
201
+
202
+ [gemini.pro]
203
+ model = "gemini-2.5-pro"
204
+ max_tokens = 1024
181
205
  ```
182
206
 
183
207
  ## 🤖 Supported Providers
184
208
 
185
209
  - Anthropic (Claude)
186
210
  - OpenAI (GPT)
211
+ - Google (Gemini)
187
212
 
188
- > **Note:** Get API keys from [Anthropic Console](https://console.anthropic.com/) or [OpenAI Platform](https://platform.openai.com/)
213
+ > **Note:** Get API keys from [Anthropic Console](https://console.anthropic.com/), [OpenAI Platform](https://platform.openai.com/), or [Google AI Studio](https://aistudio.google.com/)
189
214
 
190
215
  ## 🛣️ Roadmap
191
216
 
192
217
  ### Near-term
218
+
193
219
  - [ ] Shell integration and auto-completion
194
220
  - [ ] Command history and favorites
195
221
  - [ ] Safety features (command preview/confirmation)
196
222
  - [ ] Output formatting options
197
223
 
198
224
  ### Medium-term
225
+
199
226
  - [ ] Additional providers (Google, Cohere, Mistral)
200
227
  - [ ] Interactive mode for complex tasks
201
228
  - [ ] Plugin system for custom providers
202
229
  - [ ] Command validation and testing
203
230
 
204
231
  ### Long-term
232
+
205
233
  - [ ] Local model support (Ollama, llama.cpp)
206
234
  - [ ] Learning from user preferences
207
235
  - [ ] Advanced safety and sandboxing
@@ -210,19 +238,22 @@ max_tokens = 1024
210
238
  ## 🔧 Development
211
239
 
212
240
  ### Setup
241
+
213
242
  ```bash
214
- git clone https://github.com/lcford2/ask.git
243
+ git clone https://github.com/lcford2/terminal-sherpa.git
215
244
  cd ask
216
245
  uv sync
217
246
  uv run pre-commit install
218
247
  ```
219
248
 
220
249
  ### Testing
250
+
221
251
  ```bash
222
252
  uv run python -m pytest
223
253
  ```
224
254
 
225
255
  ### Contributing
256
+
226
257
  1. Fork the repository
227
258
  2. Create a feature branch
228
259
  3. Make your changes
@@ -0,0 +1,23 @@
1
+ ask/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ ask/config.py,sha256=iHIiMKePia80Sno_XlARqa7pEyW3eZm_Bf5SlUMidiQ,2880
3
+ ask/exceptions.py,sha256=0RLMSbw6j49BEhJN7C8MYaKpuhVeitsBhTGjZmaiHis,434
4
+ ask/main.py,sha256=9mVXwncU2P4OQxE7Oxcqi376A06xluC76kiIoCCqNSc,3936
5
+ ask/providers/__init__.py,sha256=Y0NswA6O8PpE_PDWa-GZ1FNmSXrwReZ9-roUoTOksXU,1184
6
+ ask/providers/anthropic.py,sha256=3bB335ZxRu4a_j7U0tox_6sQxTf4X287GhU2-gr5ctU,2725
7
+ ask/providers/base.py,sha256=91ZbVORYWckSHNwNPiTmgfqQN0FLO9AgV6mptuAkIU0,769
8
+ ask/providers/gemini.py,sha256=c1uQg6ShNkw2AkwDJhjfki7hTBl4vOkT9v1O4ewRfbg,3408
9
+ ask/providers/openai.py,sha256=9PS1AgMr6Nb-OcYYiNYNrG384wNUS8m2lVT04K3hFV8,3683
10
+ terminal_sherpa-0.2.0.dist-info/licenses/LICENSE,sha256=xLe81eIrf0X6CnEDDJXmoXuDzkdMYM3Eq1BgHUpG1JQ,1067
11
+ test/conftest.py,sha256=pjDI0SbIhHxDqJW-BdL7s6lTqM2f8hItxWY8EjC-dL8,1548
12
+ test/test_anthropic.py,sha256=S5OQ67qIZ4VO38eJwAAwJa4JBylJhKCtmcGjCWA8WLY,5687
13
+ test/test_config.py,sha256=FrJ6bsZ6mK46e-8fQfkFGx9GgwHrNfnoI8211R0V9K8,5565
14
+ test/test_exceptions.py,sha256=tw-spMitAdYj9uW_8TjnlyVKKXFC06FR3610WGR-494,1754
15
+ test/test_gemini.py,sha256=sV8FkaU5rLfuu3lGeQdxsa-ZmNnLUYpODaKhayrasSo,8000
16
+ test/test_main.py,sha256=3gZ83nVHMSEmgHSF2UJoELfK028a4vgxLpIk2P1cH1Y,7745
17
+ test/test_openai.py,sha256=KAGQWFrXeu4P9umij7XDoxnKQ2cApv6ImuL8EiG_5W8,8388
18
+ test/test_providers.py,sha256=SejQvCZSEQ5RAfVTCtPZ-39fXnfV17n4gaSxjiHA5UM,2140
19
+ terminal_sherpa-0.2.0.dist-info/METADATA,sha256=V3EAri2cVmsjv3zxWnJxhpEK_2kXnBLxWa5QsNUALc4,6392
20
+ terminal_sherpa-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
21
+ terminal_sherpa-0.2.0.dist-info/entry_points.txt,sha256=LxG9-J__nMGmeEIi47WVGYC1LLJo1GaADH21hfxEK70,38
22
+ terminal_sherpa-0.2.0.dist-info/top_level.txt,sha256=Y7k5b2NSCkKiA_XPU-4fT_GYangD6JVDug5xwfXvmuQ,9
23
+ terminal_sherpa-0.2.0.dist-info/RECORD,,
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Lucas Ford
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
test/conftest.py CHANGED
@@ -38,6 +38,13 @@ def mock_anthropic_key():
38
38
  yield
39
39
 
40
40
 
41
+ @pytest.fixture
42
+ def mock_gemini_key():
43
+ """Mock Gemini API key in environment."""
44
+ with patch.dict(os.environ, {"GEMINI_API_KEY": "test-gemini-key"}, clear=True):
45
+ yield
46
+
47
+
41
48
  @pytest.fixture
42
49
  def mock_openai_key():
43
50
  """Mock OpenAI API key in environment."""
test/test_gemini.py ADDED
@@ -0,0 +1,244 @@
1
+ """Tests for Gemini provider."""
2
+
3
+ import os
4
+ from unittest.mock import MagicMock, patch
5
+
6
+ import pytest
7
+ from google.genai.types import GenerateContentConfig
8
+
9
+ from ask.config import SYSTEM_PROMPT
10
+ from ask.exceptions import APIError, AuthenticationError, RateLimitError
11
+ from ask.providers.gemini import GeminiProvider
12
+
13
+
14
+ def test_gemini_provider_init():
15
+ """Test provider initialization."""
16
+ config = {"model_name": "gemini-2.5-flash"}
17
+ provider = GeminiProvider(config)
18
+
19
+ assert provider.config == config
20
+ assert provider.client is None
21
+
22
+
23
+ def test_validate_config_success(mock_gemini_key):
24
+ """Test successful config validation."""
25
+ config = {"api_key_env": "GEMINI_API_KEY"}
26
+ provider = GeminiProvider(config)
27
+
28
+ with patch("google.genai.Client") as mock_genai:
29
+ mock_client = MagicMock()
30
+ mock_genai.return_value = mock_client
31
+
32
+ provider.validate_config()
33
+
34
+ assert provider.client == mock_client
35
+ mock_genai.assert_called_once_with(api_key="test-gemini-key")
36
+
37
+
38
+ def test_validate_config_missing_key(mock_env_vars):
39
+ """Test missing API key error."""
40
+ config = {"api_key_env": "GEMINI_API_KEY"}
41
+ provider = GeminiProvider(config)
42
+
43
+ with pytest.raises(
44
+ AuthenticationError, match="GEMINI_API_KEY environment variable is required"
45
+ ):
46
+ provider.validate_config()
47
+
48
+
49
+ def test_validate_config_custom_env():
50
+ """Test custom environment variable."""
51
+ config = {"api_key_env": "CUSTOM_GEMINI_KEY"}
52
+ provider = GeminiProvider(config)
53
+
54
+ with patch.dict(os.environ, {"CUSTOM_GEMINI_KEY": "custom-key"}):
55
+ with patch("google.genai.Client") as mock_genai:
56
+ mock_client = MagicMock()
57
+ mock_genai.return_value = mock_client
58
+
59
+ provider.validate_config()
60
+
61
+ mock_genai.assert_called_once_with(api_key="custom-key")
62
+
63
+
64
+ def test_get_default_config():
65
+ """Test default configuration values."""
66
+ default_config = GeminiProvider.get_default_config()
67
+
68
+ assert default_config["model_name"] == "gemini-2.5-flash"
69
+ assert default_config["max_tokens"] == 150
70
+ assert default_config["api_key_env"] == "GEMINI_API_KEY"
71
+ assert default_config["temperature"] == 0.0
72
+ assert default_config["system_prompt"] == SYSTEM_PROMPT
73
+
74
+
75
+ def test_parse_response_empty_candidates():
76
+ """Test _parse_response with empty candidates."""
77
+ provider = GeminiProvider({})
78
+
79
+ # Test with None candidates
80
+ mock_response = MagicMock()
81
+ mock_response.candidates = None
82
+ result = provider._parse_response(mock_response)
83
+ assert result == ""
84
+
85
+ # Test with empty candidates list
86
+ mock_response.candidates = []
87
+ result = provider._parse_response(mock_response)
88
+ assert result == ""
89
+
90
+
91
+ def test_parse_response_none_parts():
92
+ """Test _parse_response with None parts."""
93
+ provider = GeminiProvider({})
94
+
95
+ mock_response = MagicMock()
96
+ mock_response.candidates = [MagicMock()]
97
+ mock_response.candidates[0].content = MagicMock()
98
+ mock_response.candidates[0].content.parts = None
99
+
100
+ result = provider._parse_response(mock_response)
101
+ assert result == ""
102
+
103
+
104
+ def test_parse_response_success():
105
+ """Test _parse_response with successful response."""
106
+ provider = GeminiProvider({})
107
+
108
+ mock_response = MagicMock()
109
+ mock_response.candidates = [MagicMock()]
110
+ mock_response.candidates[0].content = MagicMock()
111
+ mock_response.candidates[0].content.parts = [
112
+ MagicMock(text="part1"),
113
+ MagicMock(text="part2"),
114
+ ]
115
+
116
+ result = provider._parse_response(mock_response)
117
+ assert result == "part1part2"
118
+
119
+
120
+ def test_get_bash_command_success(mock_gemini_key):
121
+ """Test successful command generation."""
122
+ config = {"model_name": "gemini-2.5-flash", "max_tokens": 150}
123
+ provider = GeminiProvider(config)
124
+
125
+ mock_response = MagicMock()
126
+ mock_response.candidates = [MagicMock()]
127
+ mock_response.candidates[0].content = MagicMock()
128
+ mock_response.candidates[0].content.parts = [MagicMock(text="ls -la")]
129
+
130
+ with patch("google.genai.Client") as mock_genai:
131
+ mock_client = MagicMock()
132
+ mock_client.models.generate_content.return_value = mock_response
133
+ mock_genai.return_value = mock_client
134
+
135
+ result = provider.get_bash_command("list files")
136
+
137
+ assert result == "ls -la"
138
+ mock_client.models.generate_content.assert_called_once_with(
139
+ model="gemini-2.5-flash",
140
+ contents="list files",
141
+ config=GenerateContentConfig(
142
+ max_output_tokens=150,
143
+ temperature=0.0,
144
+ system_instruction=SYSTEM_PROMPT,
145
+ ),
146
+ )
147
+
148
+
149
+ def test_get_bash_command_api_exception(mock_gemini_key):
150
+ """Test get_bash_command with API exception."""
151
+ config = {"model_name": "gemini-2.5-flash"}
152
+ provider = GeminiProvider(config)
153
+
154
+ with patch("google.genai.Client") as mock_genai:
155
+ mock_client = MagicMock()
156
+ mock_client.models.generate_content.side_effect = Exception("API error")
157
+ mock_genai.return_value = mock_client
158
+
159
+ # This should trigger the exception handling and return empty string
160
+ with pytest.raises(APIError, match="API request failed"):
161
+ provider.get_bash_command("test prompt")
162
+
163
+
164
+ def test_get_bash_command_auto_validate(mock_gemini_key):
165
+ """Test auto-validation behavior."""
166
+ config = {}
167
+ provider = GeminiProvider(config)
168
+
169
+ mock_response = MagicMock()
170
+ mock_response.candidates = [MagicMock()]
171
+ mock_response.candidates[0].content = MagicMock()
172
+ mock_response.candidates[0].content.parts = [MagicMock(text="ls -la")]
173
+
174
+ with patch("google.genai.Client") as mock_genai:
175
+ mock_client = MagicMock()
176
+ mock_client.models.generate_content.return_value = mock_response
177
+ mock_genai.return_value = mock_client
178
+
179
+ # Client should be None initially
180
+ assert provider.client is None
181
+
182
+ result = provider.get_bash_command("list files")
183
+
184
+ # Client should be set after auto-validation
185
+ assert provider.client is not None
186
+ assert result == "ls -la"
187
+
188
+
189
+ def test_handle_api_error_auth():
190
+ """Test authentication error mapping."""
191
+ provider = GeminiProvider({})
192
+
193
+ with pytest.raises(AuthenticationError, match="Invalid API key"):
194
+ provider._handle_api_error(Exception("authentication failed"))
195
+
196
+
197
+ def test_handle_api_error_rate_limit():
198
+ """Test rate limit error mapping."""
199
+ provider = GeminiProvider({})
200
+
201
+ with pytest.raises(RateLimitError, match="API rate limit exceeded"):
202
+ provider._handle_api_error(Exception("rate limit exceeded"))
203
+
204
+
205
+ def test_handle_api_error_generic():
206
+ """Test generic API error mapping."""
207
+ provider = GeminiProvider({})
208
+
209
+ with pytest.raises(APIError, match="API request failed"):
210
+ provider._handle_api_error(Exception("unexpected error"))
211
+
212
+
213
+ def test_config_parameter_usage(mock_gemini_key):
214
+ """Test configuration parameter usage."""
215
+ config = {
216
+ "model_name": "gemini-2.5-pro",
217
+ "max_tokens": 1024,
218
+ "temperature": 0.5,
219
+ "system_prompt": "Custom system prompt",
220
+ }
221
+ provider = GeminiProvider(config)
222
+
223
+ mock_response = MagicMock()
224
+ mock_response.candidates = [MagicMock()]
225
+ mock_response.candidates[0].content = MagicMock()
226
+ mock_response.candidates[0].content.parts = [MagicMock(text="custom response")]
227
+
228
+ with patch("google.genai.Client") as mock_genai:
229
+ mock_client = MagicMock()
230
+ mock_client.models.generate_content.return_value = mock_response
231
+ mock_genai.return_value = mock_client
232
+
233
+ result = provider.get_bash_command("test prompt")
234
+
235
+ assert result == "custom response"
236
+ mock_client.models.generate_content.assert_called_once_with(
237
+ model="gemini-2.5-pro",
238
+ contents="test prompt",
239
+ config=GenerateContentConfig(
240
+ max_output_tokens=1024,
241
+ temperature=0.5,
242
+ system_instruction="Custom system prompt",
243
+ ),
244
+ )
test/test_openai.py CHANGED
@@ -196,14 +196,6 @@ def test_handle_api_error_rate_limit():
196
196
  provider._handle_api_error(Exception("rate limit exceeded"))
197
197
 
198
198
 
199
- def test_handle_api_error_quota():
200
- """Test quota error mapping."""
201
- provider = OpenAIProvider({})
202
-
203
- with pytest.raises(RateLimitError, match="API rate limit exceeded"):
204
- provider._handle_api_error(Exception("quota exceeded"))
205
-
206
-
207
199
  def test_handle_api_error_generic():
208
200
  """Test generic API error mapping."""
209
201
  provider = OpenAIProvider({})
@@ -1,20 +0,0 @@
1
- ask/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- ask/config.py,sha256=iHIiMKePia80Sno_XlARqa7pEyW3eZm_Bf5SlUMidiQ,2880
3
- ask/exceptions.py,sha256=0RLMSbw6j49BEhJN7C8MYaKpuhVeitsBhTGjZmaiHis,434
4
- ask/main.py,sha256=SJ084NXNvd-pO2hY8oFO__NFR1F07YxeOacBuv65crc,3867
5
- ask/providers/__init__.py,sha256=IINO0hNGarFpf62lCuVtIqMeAOC1CtR_wDWUL6_iWzI,1105
6
- ask/providers/anthropic.py,sha256=ZFlnQZaxPGHLYjAacapE-63LoSu43xYnZ30Ajmrmgw0,2703
7
- ask/providers/base.py,sha256=91ZbVORYWckSHNwNPiTmgfqQN0FLO9AgV6mptuAkIU0,769
8
- ask/providers/openai.py,sha256=R-UgVArtlpn8F4qkliQ7unNk11ekTPL0hFZCfubGYpg,3661
9
- test/conftest.py,sha256=V9ebLC-soz0hHocZLZAibzsVdvzZh4-elcTtKmZ2FyA,1363
10
- test/test_anthropic.py,sha256=S5OQ67qIZ4VO38eJwAAwJa4JBylJhKCtmcGjCWA8WLY,5687
11
- test/test_config.py,sha256=FrJ6bsZ6mK46e-8fQfkFGx9GgwHrNfnoI8211R0V9K8,5565
12
- test/test_exceptions.py,sha256=tw-spMitAdYj9uW_8TjnlyVKKXFC06FR3610WGR-494,1754
13
- test/test_main.py,sha256=3gZ83nVHMSEmgHSF2UJoELfK028a4vgxLpIk2P1cH1Y,7745
14
- test/test_openai.py,sha256=3dDwlxnKGwl5aJcKHyNgrwrciJBZ96a51fHuxr-V8FA,8633
15
- test/test_providers.py,sha256=SejQvCZSEQ5RAfVTCtPZ-39fXnfV17n4gaSxjiHA5UM,2140
16
- terminal_sherpa-0.1.0.dist-info/METADATA,sha256=hh4NTZNv3yisUjLUcsJxlOBHUiK70l0u82CQIxJrjJI,5610
17
- terminal_sherpa-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- terminal_sherpa-0.1.0.dist-info/entry_points.txt,sha256=LxG9-J__nMGmeEIi47WVGYC1LLJo1GaADH21hfxEK70,38
19
- terminal_sherpa-0.1.0.dist-info/top_level.txt,sha256=Y7k5b2NSCkKiA_XPU-4fT_GYangD6JVDug5xwfXvmuQ,9
20
- terminal_sherpa-0.1.0.dist-info/RECORD,,