terminal-sherpa 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ask/config.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  import os
4
4
  from pathlib import Path
5
- from typing import Any
5
+ from typing import Any, Optional
6
6
 
7
7
  import toml
8
8
 
@@ -16,7 +16,7 @@ SYSTEM_PROMPT = (
16
16
  )
17
17
 
18
18
 
19
- def get_config_path() -> Path | None:
19
+ def get_config_path() -> Optional[Path]:
20
20
  """Find config file using XDG standard."""
21
21
  # Primary location: $XDG_CONFIG_HOME/ask/config.toml
22
22
  xdg_config_home = os.environ.get("XDG_CONFIG_HOME")
@@ -77,13 +77,13 @@ def get_provider_config(
77
77
  return provider_name, merged_config
78
78
 
79
79
 
80
- def get_default_model(config: dict[str, Any]) -> str | None:
80
+ def get_default_model(config: dict[str, Any]) -> Optional[str]:
81
81
  """Get default model from configuration."""
82
82
  global_config = config.get("ask", {})
83
83
  return global_config.get("default_model")
84
84
 
85
85
 
86
- def get_default_provider() -> str | None:
86
+ def get_default_provider() -> Optional[str]:
87
87
  """Determine fallback provider from environment variables."""
88
88
  # Check for API keys in order of preference: claude -> openai
89
89
  if os.environ.get("ANTHROPIC_API_KEY"):
ask/main.py CHANGED
@@ -76,10 +76,11 @@ def resolve_provider(args, config_data) -> ProviderInterface:
76
76
  # Use default provider from environment variables
77
77
  default_provider = config.get_default_provider()
78
78
  if not default_provider:
79
+ keys = ["GEMINI_API_KEY", "ANTHROPIC_API_KEY", "OPENAI_API_KEY"]
79
80
  logger.error(
80
81
  "No default model configured and no API keys found. "
81
- "Please set ANTHROPIC_API_KEY or OPENAI_API_KEY environment "
82
- "variable, or set a default_provider in your config file."
82
+ f"Please set one or more of {keys} environment variables, "
83
+ "or set a default_provider in your config file."
83
84
  )
84
85
  sys.exit(1)
85
86
  logger.debug(f"Using default provider from environment: {default_provider}")
ask/providers/__init__.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  from .anthropic import AnthropicProvider
4
4
  from .base import ProviderInterface
5
+ from .gemini import GeminiProvider
5
6
  from .openai import OpenAIProvider
6
7
 
7
8
  # Provider registry - maps provider names to their classes
@@ -33,3 +34,4 @@ def list_providers() -> list[str]:
33
34
 
34
35
  register_provider("anthropic", AnthropicProvider)
35
36
  register_provider("openai", OpenAIProvider)
37
+ register_provider("gemini", GeminiProvider)
@@ -1,7 +1,7 @@
1
1
  """Anthropic provider implementation."""
2
2
 
3
3
  import os
4
- from typing import Any
4
+ from typing import Any, Optional
5
5
 
6
6
  import anthropic
7
7
 
@@ -16,7 +16,7 @@ class AnthropicProvider(ProviderInterface):
16
16
  def __init__(self, config: dict[str, Any]):
17
17
  """Initialize Anthropic provider with configuration."""
18
18
  super().__init__(config)
19
- self.client: anthropic.Anthropic | None = None
19
+ self.client: Optional[anthropic.Anthropic] = None
20
20
 
21
21
  def get_bash_command(self, prompt: str) -> str:
22
22
  """Generate bash command from natural language prompt."""
@@ -37,6 +37,7 @@ class AnthropicProvider(ProviderInterface):
37
37
  return response.content[0].text
38
38
  except Exception as e:
39
39
  self._handle_api_error(e)
40
+ return ""
40
41
 
41
42
  def validate_config(self) -> None:
42
43
  """Validate provider configuration and API key."""
@@ -0,0 +1,89 @@
1
+ """Anthropic provider implementation."""
2
+
3
+ import os
4
+ from typing import Any, Optional
5
+
6
+ from google import genai
7
+ from google.genai.types import GenerateContentConfig, GenerateContentResponse
8
+
9
+ from ask.config import SYSTEM_PROMPT
10
+ from ask.exceptions import APIError, AuthenticationError, RateLimitError
11
+ from ask.providers.base import ProviderInterface
12
+
13
+
14
+ class GeminiProvider(ProviderInterface):
15
+ """Gemini AI provider implementation."""
16
+
17
+ def __init__(self, config: dict[str, Any]):
18
+ """Initialize Gemini provider with configuration."""
19
+ super().__init__(config)
20
+ self.client: Optional[genai.Client] = None
21
+
22
+ def _parse_response(self, response: GenerateContentResponse) -> str:
23
+ """Parse response from Gemini API."""
24
+ if response.candidates is None or len(response.candidates) == 0:
25
+ return ""
26
+ parts = response.candidates[0].content.parts
27
+ if parts is None:
28
+ return ""
29
+ return "".join([part.text for part in parts])
30
+
31
+ def get_bash_command(self, prompt: str) -> str:
32
+ """Generate bash command from natural language prompt."""
33
+ if self.client is None:
34
+ self.validate_config()
35
+
36
+ # After validate_config(), client should be set
37
+ assert self.client is not None, "Client should be initialized after validation"
38
+
39
+ try:
40
+ # max_tokens=self.config.get("max_tokens", 150),
41
+ # temperature=self.config.get("temperature", 0.0),
42
+ # system=self.config.get("system_prompt", SYSTEM_PROMPT),
43
+ response = self.client.models.generate_content(
44
+ model=self.config.get("model_name", "gemini-2.5-flash"),
45
+ contents=prompt,
46
+ config=GenerateContentConfig(
47
+ max_output_tokens=self.config.get("max_tokens", 150),
48
+ temperature=self.config.get("temperature", 0.0),
49
+ system_instruction=self.config.get("system_prompt", SYSTEM_PROMPT),
50
+ ),
51
+ )
52
+ return self._parse_response(response)
53
+ except Exception as e:
54
+ self._handle_api_error(e)
55
+ return ""
56
+
57
+ def validate_config(self) -> None:
58
+ """Validate provider configuration and API key."""
59
+ api_key_env = self.config.get("api_key_env", "GEMINI_API_KEY")
60
+ api_key = os.environ.get(api_key_env)
61
+
62
+ if not api_key:
63
+ raise AuthenticationError(
64
+ f"Error: {api_key_env} environment variable is required"
65
+ )
66
+
67
+ self.client = genai.Client(api_key=api_key)
68
+
69
+ def _handle_api_error(self, error: Exception):
70
+ """Handle API errors and map them to standard exceptions."""
71
+ error_str = str(error).lower()
72
+
73
+ if "authentication" in error_str or "unauthorized" in error_str:
74
+ raise AuthenticationError("Error: Invalid API key")
75
+ elif "rate limit" in error_str:
76
+ raise RateLimitError("Error: API rate limit exceeded")
77
+ else:
78
+ raise APIError(f"Error: API request failed - {error}")
79
+
80
+ @classmethod
81
+ def get_default_config(cls) -> dict[str, Any]:
82
+ """Return default configuration for Gemini provider."""
83
+ return {
84
+ "model_name": "gemini-2.5-flash",
85
+ "max_tokens": 150,
86
+ "api_key_env": "GEMINI_API_KEY",
87
+ "temperature": 0.0,
88
+ "system_prompt": SYSTEM_PROMPT,
89
+ }
ask/providers/openai.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  import os
4
4
  import re
5
- from typing import Any, NoReturn
5
+ from typing import Any, NoReturn, Optional
6
6
 
7
7
  import openai
8
8
 
@@ -21,7 +21,7 @@ class OpenAIProvider(ProviderInterface):
21
21
  config: The configuration for the OpenAI provider
22
22
  """
23
23
  super().__init__(config)
24
- self.client: openai.OpenAI | None = None
24
+ self.client: Optional[openai.OpenAI] = None
25
25
 
26
26
  def get_bash_command(self, prompt: str) -> str:
27
27
  """Generate bash command from natural language prompt.
@@ -62,6 +62,7 @@ class OpenAIProvider(ProviderInterface):
62
62
  return re_match.group(1)
63
63
  except Exception as e:
64
64
  self._handle_api_error(e)
65
+ return ""
65
66
 
66
67
  def validate_config(self) -> None:
67
68
  """Validate provider configuration and API key."""
@@ -1,35 +1,55 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: terminal-sherpa
3
- Version: 0.1.0
3
+ Version: 0.3.0
4
4
  Summary: AI-powered bash command generator
5
- Requires-Python: >=3.13
5
+ Project-URL: Homepage, https://github.com/lcford2/terminal-sherpa
6
+ Project-URL: Issues, https://github.com/lcford2/terminal-sherpa/issues
7
+ Classifier: Development Status :: 4 - Beta
8
+ Classifier: Environment :: Console
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python
13
+ Classifier: Programming Language :: Python :: 3.8
14
+ Classifier: Programming Language :: Python :: 3.9
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Programming Language :: Python :: 3 :: Only
20
+ Classifier: Topic :: Utilities
21
+ Classifier: Topic :: Software Development :: Libraries
22
+ Requires-Python: >=3.9
6
23
  Description-Content-Type: text/markdown
24
+ License-File: LICENSE
7
25
  Requires-Dist: anthropic>=0.7.0
8
- Requires-Dist: black>=25.1.0
26
+ Requires-Dist: google-genai>=1.26.0
9
27
  Requires-Dist: loguru>=0.7.0
10
28
  Requires-Dist: openai>=1.0.0
11
- Requires-Dist: pytest>=8.0.0
12
- Requires-Dist: pytest-cov>=4.0.0
13
- Requires-Dist: pytest-mock>=3.12.0
14
- Requires-Dist: ruff>=0.12.3
29
+ Requires-Dist: setuptools>=80.9.0
15
30
  Requires-Dist: toml>=0.10.0
31
+ Dynamic: license-file
16
32
 
17
- # ask
33
+ # terminal-sherpa
18
34
 
19
35
  A lightweight AI chat interface for fellow terminal dwellers.
20
36
 
21
37
  Turn natural language into bash commands instantly.
22
38
  Stop googling syntax and start asking.
23
39
 
24
- [![codecov](https://codecov.io/github/lcford2/ask/graph/badge.svg?token=2MXHNL3RHE)](https://codecov.io/github/lcford2/ask)
40
+ [![PyPI - Version](https://img.shields.io/pypi/v/terminal-sherpa)](https://pypi.python.org/pypi/terminal-sherpa)
41
+ [![GitHub License](https://img.shields.io/github/license/lcford2/terminal-sherpa)](https://github.com/lcford2/terminal-sherpa/blob/main/LICENSE)
42
+ [![Python Versions](https://img.shields.io/pypi/pyversions/terminal-sherpa)](https://pypi.python.org/pypi/terminal-sherpa)
43
+ [![Actions status](https://github.com/lcford2/terminal-sherpa/actions/workflows/main.yml/badge.svg)](https://github.com/lcford2/terminal-sherpa/actions)
44
+ [![codecov](https://codecov.io/github/lcford2/terminal-sherpa/graph/badge.svg?token=2MXHNL3RHE)](https://codecov.io/github/lcford2/terminal-sherpa)
25
45
 
26
46
  ## 🚀 Getting Started
27
47
 
28
48
  Get up and running:
29
49
 
30
50
  ```bash
31
- # Install ask
32
- uv tool install ask
51
+ # Install terminal-sherpa
52
+ pip install terminal-sherpa # installs the `ask` CLI tool
33
53
 
34
54
  # Set your API key
35
55
  export ANTHROPIC_API_KEY="your-key-here"
@@ -39,6 +59,7 @@ ask "find all .py files modified in the last week"
39
59
  ```
40
60
 
41
61
  **Example output:**
62
+
42
63
  ```bash
43
64
  find . -name "*.py" -mtime -7
44
65
  ```
@@ -46,7 +67,7 @@ find . -name "*.py" -mtime -7
46
67
  ## ✨ Features
47
68
 
48
69
  - **Natural language to bash conversion** - Describe what you want, get the command
49
- - **Multiple AI provider support** - Choose between Anthropic (Claude) and OpenAI (GPT) models
70
+ - **Multiple AI provider support** - Choose between Anthropic (Claude), OpenAI (GPT), and Google (Gemini) models
50
71
  - **Flexible configuration system** - Set defaults, customize models, and manage API keys
51
72
  - **XDG-compliant config files** - Follows standard configuration file locations
52
73
  - **Verbose logging support** - Debug and understand what's happening under the hood
@@ -54,30 +75,29 @@ find . -name "*.py" -mtime -7
54
75
  ## 📦 Installation
55
76
 
56
77
  ### Requirements
57
- - Python 3.13+
78
+
79
+ - Python 3.9+
58
80
  - API key for Anthropic or OpenAI
59
81
 
60
82
  ### Install Methods
61
83
 
62
- **Recommended (uv):**
63
- ```bash
64
- uv tool install ask
65
- ```
66
-
67
84
  **Using pip:**
85
+
68
86
  ```bash
69
- pip install ask
87
+ pip install terminal-sherpa
70
88
  ```
71
89
 
72
90
  **From source:**
91
+
73
92
  ```bash
74
- git clone https://github.com/lcford2/ask.git
75
- cd ask
93
+ git clone https://github.com/lcford2/terminal-sherpa.git
94
+ cd terminal-sherpa
76
95
  uv sync
77
96
  uv run ask "your prompt here"
78
97
  ```
79
98
 
80
99
  **Verify installation:**
100
+
81
101
  ```bash
82
102
  ask --help
83
103
  ```
@@ -85,20 +105,23 @@ ask --help
85
105
  ## 💡 Usage
86
106
 
87
107
  ### Basic Syntax
108
+
88
109
  ```bash
89
110
  ask "your natural language prompt"
90
111
  ```
91
112
 
92
113
  ### Command Options
93
114
 
94
- | Option | Description | Example |
95
- |--------|-------------|---------|
115
+ | Option | Description | Example |
116
+ | ------------------------ | -------------------------- | --------------------------------------------------- |
96
117
  | `--model provider:model` | Specify provider and model | `ask --model anthropic:claude-3-haiku "list files"` |
97
- | `--verbose` | Enable verbose logging | `ask --verbose "compress this folder"` |
118
+ | | | `ask --model gemini:gemini-2.5-flash "list files"` |
119
+ | `--verbose` | Enable verbose logging | `ask --verbose "compress this folder"` |
98
120
 
99
121
  ### Practical Examples
100
122
 
101
123
  **File Operations:**
124
+
102
125
  ```bash
103
126
  ask "find all files larger than 100MB"
104
127
  # Example output: find . -size +100M
@@ -108,6 +131,7 @@ ask "create a backup of config.txt with timestamp"
108
131
  ```
109
132
 
110
133
  **Git Commands:**
134
+
111
135
  ```bash
112
136
  ask "show git log for last 5 commits with one line each"
113
137
  # Example output: git log --oneline -5
@@ -117,6 +141,7 @@ ask "delete all local branches that have been merged"
117
141
  ```
118
142
 
119
143
  **System Administration:**
144
+
120
145
  ```bash
121
146
  ask "check disk usage of current directory sorted by size"
122
147
  # Example output: du -sh * | sort -hr
@@ -126,6 +151,7 @@ ask "find processes using port 8080"
126
151
  ```
127
152
 
128
153
  **Text Processing:**
154
+
129
155
  ```bash
130
156
  ask "count lines in all Python files"
131
157
  # Example output: find . -name "*.py" -exec wc -l {} + | tail -1
@@ -135,6 +161,7 @@ ask "replace all tabs with spaces in file.txt"
135
161
  ```
136
162
 
137
163
  **Network Operations:**
164
+
138
165
  ```bash
139
166
  ask "download file from URL and save to downloads folder"
140
167
  # Example output: curl -o ~/Downloads/filename "https://example.com/file"
@@ -146,6 +173,7 @@ ask "check if port 443 is open on example.com"
146
173
  ## ⚙️ Configuration
147
174
 
148
175
  ### Configuration File Locations
176
+
149
177
  Ask follows XDG Base Directory Specification:
150
178
 
151
179
  1. `$XDG_CONFIG_HOME/ask/config.toml`
@@ -153,12 +181,15 @@ Ask follows XDG Base Directory Specification:
153
181
  1. `~/.ask/config.toml` (fallback)
154
182
 
155
183
  ### Environment Variables
184
+
156
185
  ```bash
157
186
  export ANTHROPIC_API_KEY="your-anthropic-key"
158
187
  export OPENAI_API_KEY="your-openai-key"
188
+ export GEMINI_API_KEY="your-gemini-key"
159
189
  ```
160
190
 
161
191
  ### Example Configuration File
192
+
162
193
  Create `~/.config/ask/config.toml`:
163
194
 
164
195
  ```toml
@@ -166,7 +197,6 @@ Create `~/.config/ask/config.toml`:
166
197
  default_model = "anthropic"
167
198
 
168
199
  [anthropic]
169
- api_key = "your-anthropic-key"
170
200
  model = "claude-3-haiku-20240307"
171
201
  max_tokens = 512
172
202
 
@@ -175,33 +205,44 @@ model = "claude-3-5-sonnet-20241022"
175
205
  max_tokens = 1024
176
206
 
177
207
  [openai]
178
- api_key = "your-openai-key"
179
208
  model = "gpt-4o"
180
209
  max_tokens = 1024
210
+
211
+ [gemini]
212
+ model = "gemini-2.5-flash"
213
+ max_tokens = 150
214
+
215
+ [gemini.pro]
216
+ model = "gemini-2.5-pro"
217
+ max_tokens = 1024
181
218
  ```
182
219
 
183
220
  ## 🤖 Supported Providers
184
221
 
185
222
  - Anthropic (Claude)
186
223
  - OpenAI (GPT)
224
+ - Google (Gemini)
187
225
 
188
- > **Note:** Get API keys from [Anthropic Console](https://console.anthropic.com/) or [OpenAI Platform](https://platform.openai.com/)
226
+ > **Note:** Get API keys from [Anthropic Console](https://console.anthropic.com/), [OpenAI Platform](https://platform.openai.com/), or [Google AI Studio](https://aistudio.google.com/)
189
227
 
190
228
  ## 🛣️ Roadmap
191
229
 
192
230
  ### Near-term
231
+
193
232
  - [ ] Shell integration and auto-completion
194
233
  - [ ] Command history and favorites
195
234
  - [ ] Safety features (command preview/confirmation)
196
235
  - [ ] Output formatting options
197
236
 
198
237
  ### Medium-term
238
+
199
239
  - [ ] Additional providers (Google, Cohere, Mistral)
200
240
  - [ ] Interactive mode for complex tasks
201
241
  - [ ] Plugin system for custom providers
202
242
  - [ ] Command validation and testing
203
243
 
204
244
  ### Long-term
245
+
205
246
  - [ ] Local model support (Ollama, llama.cpp)
206
247
  - [ ] Learning from user preferences
207
248
  - [ ] Advanced safety and sandboxing
@@ -210,19 +251,22 @@ max_tokens = 1024
210
251
  ## 🔧 Development
211
252
 
212
253
  ### Setup
254
+
213
255
  ```bash
214
- git clone https://github.com/lcford2/ask.git
256
+ git clone https://github.com/lcford2/terminal-sherpa.git
215
257
  cd ask
216
258
  uv sync
217
259
  uv run pre-commit install
218
260
  ```
219
261
 
220
262
  ### Testing
263
+
221
264
  ```bash
222
265
  uv run python -m pytest
223
266
  ```
224
267
 
225
268
  ### Contributing
269
+
226
270
  1. Fork the repository
227
271
  2. Create a feature branch
228
272
  3. Make your changes
@@ -0,0 +1,23 @@
1
+ ask/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ ask/config.py,sha256=12YKck6Q9vaSESfuI7kKFbs1hkxzts2FOsxR9eK96MY,2899
3
+ ask/exceptions.py,sha256=0RLMSbw6j49BEhJN7C8MYaKpuhVeitsBhTGjZmaiHis,434
4
+ ask/main.py,sha256=9mVXwncU2P4OQxE7Oxcqi376A06xluC76kiIoCCqNSc,3936
5
+ ask/providers/__init__.py,sha256=Y0NswA6O8PpE_PDWa-GZ1FNmSXrwReZ9-roUoTOksXU,1184
6
+ ask/providers/anthropic.py,sha256=M6bcOxDbbWQrJ6kWWtYqsVk6NsZOqX7PzxyiI_yji1Q,2738
7
+ ask/providers/base.py,sha256=91ZbVORYWckSHNwNPiTmgfqQN0FLO9AgV6mptuAkIU0,769
8
+ ask/providers/gemini.py,sha256=taFZYEygiEf05XCm4AKSKL2F_BQwLrU4Y5Ac-WN5owk,3421
9
+ ask/providers/openai.py,sha256=jVyRH4FRdF_91iuK5Tga4as9zbyGKPPFW90ewGG5s5k,3696
10
+ terminal_sherpa-0.3.0.dist-info/licenses/LICENSE,sha256=xLe81eIrf0X6CnEDDJXmoXuDzkdMYM3Eq1BgHUpG1JQ,1067
11
+ test/conftest.py,sha256=pjDI0SbIhHxDqJW-BdL7s6lTqM2f8hItxWY8EjC-dL8,1548
12
+ test/test_anthropic.py,sha256=S5OQ67qIZ4VO38eJwAAwJa4JBylJhKCtmcGjCWA8WLY,5687
13
+ test/test_config.py,sha256=FrJ6bsZ6mK46e-8fQfkFGx9GgwHrNfnoI8211R0V9K8,5565
14
+ test/test_exceptions.py,sha256=tw-spMitAdYj9uW_8TjnlyVKKXFC06FR3610WGR-494,1754
15
+ test/test_gemini.py,sha256=sV8FkaU5rLfuu3lGeQdxsa-ZmNnLUYpODaKhayrasSo,8000
16
+ test/test_main.py,sha256=3gZ83nVHMSEmgHSF2UJoELfK028a4vgxLpIk2P1cH1Y,7745
17
+ test/test_openai.py,sha256=KAGQWFrXeu4P9umij7XDoxnKQ2cApv6ImuL8EiG_5W8,8388
18
+ test/test_providers.py,sha256=SejQvCZSEQ5RAfVTCtPZ-39fXnfV17n4gaSxjiHA5UM,2140
19
+ terminal_sherpa-0.3.0.dist-info/METADATA,sha256=r0ARy4RTO3vnlxn6tizVE-aEDLj79MyXLTl5ZxLp0Yk,7521
20
+ terminal_sherpa-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
21
+ terminal_sherpa-0.3.0.dist-info/entry_points.txt,sha256=LxG9-J__nMGmeEIi47WVGYC1LLJo1GaADH21hfxEK70,38
22
+ terminal_sherpa-0.3.0.dist-info/top_level.txt,sha256=Y7k5b2NSCkKiA_XPU-4fT_GYangD6JVDug5xwfXvmuQ,9
23
+ terminal_sherpa-0.3.0.dist-info/RECORD,,
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Lucas Ford
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
test/conftest.py CHANGED
@@ -38,6 +38,13 @@ def mock_anthropic_key():
38
38
  yield
39
39
 
40
40
 
41
+ @pytest.fixture
42
+ def mock_gemini_key():
43
+ """Mock Gemini API key in environment."""
44
+ with patch.dict(os.environ, {"GEMINI_API_KEY": "test-gemini-key"}, clear=True):
45
+ yield
46
+
47
+
41
48
  @pytest.fixture
42
49
  def mock_openai_key():
43
50
  """Mock OpenAI API key in environment."""
test/test_gemini.py ADDED
@@ -0,0 +1,244 @@
1
+ """Tests for Gemini provider."""
2
+
3
+ import os
4
+ from unittest.mock import MagicMock, patch
5
+
6
+ import pytest
7
+ from google.genai.types import GenerateContentConfig
8
+
9
+ from ask.config import SYSTEM_PROMPT
10
+ from ask.exceptions import APIError, AuthenticationError, RateLimitError
11
+ from ask.providers.gemini import GeminiProvider
12
+
13
+
14
+ def test_gemini_provider_init():
15
+ """Test provider initialization."""
16
+ config = {"model_name": "gemini-2.5-flash"}
17
+ provider = GeminiProvider(config)
18
+
19
+ assert provider.config == config
20
+ assert provider.client is None
21
+
22
+
23
+ def test_validate_config_success(mock_gemini_key):
24
+ """Test successful config validation."""
25
+ config = {"api_key_env": "GEMINI_API_KEY"}
26
+ provider = GeminiProvider(config)
27
+
28
+ with patch("google.genai.Client") as mock_genai:
29
+ mock_client = MagicMock()
30
+ mock_genai.return_value = mock_client
31
+
32
+ provider.validate_config()
33
+
34
+ assert provider.client == mock_client
35
+ mock_genai.assert_called_once_with(api_key="test-gemini-key")
36
+
37
+
38
+ def test_validate_config_missing_key(mock_env_vars):
39
+ """Test missing API key error."""
40
+ config = {"api_key_env": "GEMINI_API_KEY"}
41
+ provider = GeminiProvider(config)
42
+
43
+ with pytest.raises(
44
+ AuthenticationError, match="GEMINI_API_KEY environment variable is required"
45
+ ):
46
+ provider.validate_config()
47
+
48
+
49
+ def test_validate_config_custom_env():
50
+ """Test custom environment variable."""
51
+ config = {"api_key_env": "CUSTOM_GEMINI_KEY"}
52
+ provider = GeminiProvider(config)
53
+
54
+ with patch.dict(os.environ, {"CUSTOM_GEMINI_KEY": "custom-key"}):
55
+ with patch("google.genai.Client") as mock_genai:
56
+ mock_client = MagicMock()
57
+ mock_genai.return_value = mock_client
58
+
59
+ provider.validate_config()
60
+
61
+ mock_genai.assert_called_once_with(api_key="custom-key")
62
+
63
+
64
+ def test_get_default_config():
65
+ """Test default configuration values."""
66
+ default_config = GeminiProvider.get_default_config()
67
+
68
+ assert default_config["model_name"] == "gemini-2.5-flash"
69
+ assert default_config["max_tokens"] == 150
70
+ assert default_config["api_key_env"] == "GEMINI_API_KEY"
71
+ assert default_config["temperature"] == 0.0
72
+ assert default_config["system_prompt"] == SYSTEM_PROMPT
73
+
74
+
75
+ def test_parse_response_empty_candidates():
76
+ """Test _parse_response with empty candidates."""
77
+ provider = GeminiProvider({})
78
+
79
+ # Test with None candidates
80
+ mock_response = MagicMock()
81
+ mock_response.candidates = None
82
+ result = provider._parse_response(mock_response)
83
+ assert result == ""
84
+
85
+ # Test with empty candidates list
86
+ mock_response.candidates = []
87
+ result = provider._parse_response(mock_response)
88
+ assert result == ""
89
+
90
+
91
+ def test_parse_response_none_parts():
92
+ """Test _parse_response with None parts."""
93
+ provider = GeminiProvider({})
94
+
95
+ mock_response = MagicMock()
96
+ mock_response.candidates = [MagicMock()]
97
+ mock_response.candidates[0].content = MagicMock()
98
+ mock_response.candidates[0].content.parts = None
99
+
100
+ result = provider._parse_response(mock_response)
101
+ assert result == ""
102
+
103
+
104
+ def test_parse_response_success():
105
+ """Test _parse_response with successful response."""
106
+ provider = GeminiProvider({})
107
+
108
+ mock_response = MagicMock()
109
+ mock_response.candidates = [MagicMock()]
110
+ mock_response.candidates[0].content = MagicMock()
111
+ mock_response.candidates[0].content.parts = [
112
+ MagicMock(text="part1"),
113
+ MagicMock(text="part2"),
114
+ ]
115
+
116
+ result = provider._parse_response(mock_response)
117
+ assert result == "part1part2"
118
+
119
+
120
+ def test_get_bash_command_success(mock_gemini_key):
121
+ """Test successful command generation."""
122
+ config = {"model_name": "gemini-2.5-flash", "max_tokens": 150}
123
+ provider = GeminiProvider(config)
124
+
125
+ mock_response = MagicMock()
126
+ mock_response.candidates = [MagicMock()]
127
+ mock_response.candidates[0].content = MagicMock()
128
+ mock_response.candidates[0].content.parts = [MagicMock(text="ls -la")]
129
+
130
+ with patch("google.genai.Client") as mock_genai:
131
+ mock_client = MagicMock()
132
+ mock_client.models.generate_content.return_value = mock_response
133
+ mock_genai.return_value = mock_client
134
+
135
+ result = provider.get_bash_command("list files")
136
+
137
+ assert result == "ls -la"
138
+ mock_client.models.generate_content.assert_called_once_with(
139
+ model="gemini-2.5-flash",
140
+ contents="list files",
141
+ config=GenerateContentConfig(
142
+ max_output_tokens=150,
143
+ temperature=0.0,
144
+ system_instruction=SYSTEM_PROMPT,
145
+ ),
146
+ )
147
+
148
+
149
+ def test_get_bash_command_api_exception(mock_gemini_key):
150
+ """Test get_bash_command with API exception."""
151
+ config = {"model_name": "gemini-2.5-flash"}
152
+ provider = GeminiProvider(config)
153
+
154
+ with patch("google.genai.Client") as mock_genai:
155
+ mock_client = MagicMock()
156
+ mock_client.models.generate_content.side_effect = Exception("API error")
157
+ mock_genai.return_value = mock_client
158
+
159
+ # This should trigger the exception handling and return empty string
160
+ with pytest.raises(APIError, match="API request failed"):
161
+ provider.get_bash_command("test prompt")
162
+
163
+
164
+ def test_get_bash_command_auto_validate(mock_gemini_key):
165
+ """Test auto-validation behavior."""
166
+ config = {}
167
+ provider = GeminiProvider(config)
168
+
169
+ mock_response = MagicMock()
170
+ mock_response.candidates = [MagicMock()]
171
+ mock_response.candidates[0].content = MagicMock()
172
+ mock_response.candidates[0].content.parts = [MagicMock(text="ls -la")]
173
+
174
+ with patch("google.genai.Client") as mock_genai:
175
+ mock_client = MagicMock()
176
+ mock_client.models.generate_content.return_value = mock_response
177
+ mock_genai.return_value = mock_client
178
+
179
+ # Client should be None initially
180
+ assert provider.client is None
181
+
182
+ result = provider.get_bash_command("list files")
183
+
184
+ # Client should be set after auto-validation
185
+ assert provider.client is not None
186
+ assert result == "ls -la"
187
+
188
+
189
+ def test_handle_api_error_auth():
190
+ """Test authentication error mapping."""
191
+ provider = GeminiProvider({})
192
+
193
+ with pytest.raises(AuthenticationError, match="Invalid API key"):
194
+ provider._handle_api_error(Exception("authentication failed"))
195
+
196
+
197
+ def test_handle_api_error_rate_limit():
198
+ """Test rate limit error mapping."""
199
+ provider = GeminiProvider({})
200
+
201
+ with pytest.raises(RateLimitError, match="API rate limit exceeded"):
202
+ provider._handle_api_error(Exception("rate limit exceeded"))
203
+
204
+
205
+ def test_handle_api_error_generic():
206
+ """Test generic API error mapping."""
207
+ provider = GeminiProvider({})
208
+
209
+ with pytest.raises(APIError, match="API request failed"):
210
+ provider._handle_api_error(Exception("unexpected error"))
211
+
212
+
213
+ def test_config_parameter_usage(mock_gemini_key):
214
+ """Test configuration parameter usage."""
215
+ config = {
216
+ "model_name": "gemini-2.5-pro",
217
+ "max_tokens": 1024,
218
+ "temperature": 0.5,
219
+ "system_prompt": "Custom system prompt",
220
+ }
221
+ provider = GeminiProvider(config)
222
+
223
+ mock_response = MagicMock()
224
+ mock_response.candidates = [MagicMock()]
225
+ mock_response.candidates[0].content = MagicMock()
226
+ mock_response.candidates[0].content.parts = [MagicMock(text="custom response")]
227
+
228
+ with patch("google.genai.Client") as mock_genai:
229
+ mock_client = MagicMock()
230
+ mock_client.models.generate_content.return_value = mock_response
231
+ mock_genai.return_value = mock_client
232
+
233
+ result = provider.get_bash_command("test prompt")
234
+
235
+ assert result == "custom response"
236
+ mock_client.models.generate_content.assert_called_once_with(
237
+ model="gemini-2.5-pro",
238
+ contents="test prompt",
239
+ config=GenerateContentConfig(
240
+ max_output_tokens=1024,
241
+ temperature=0.5,
242
+ system_instruction="Custom system prompt",
243
+ ),
244
+ )
test/test_openai.py CHANGED
@@ -196,14 +196,6 @@ def test_handle_api_error_rate_limit():
196
196
  provider._handle_api_error(Exception("rate limit exceeded"))
197
197
 
198
198
 
199
- def test_handle_api_error_quota():
200
- """Test quota error mapping."""
201
- provider = OpenAIProvider({})
202
-
203
- with pytest.raises(RateLimitError, match="API rate limit exceeded"):
204
- provider._handle_api_error(Exception("quota exceeded"))
205
-
206
-
207
199
  def test_handle_api_error_generic():
208
200
  """Test generic API error mapping."""
209
201
  provider = OpenAIProvider({})
@@ -1,20 +0,0 @@
1
- ask/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- ask/config.py,sha256=iHIiMKePia80Sno_XlARqa7pEyW3eZm_Bf5SlUMidiQ,2880
3
- ask/exceptions.py,sha256=0RLMSbw6j49BEhJN7C8MYaKpuhVeitsBhTGjZmaiHis,434
4
- ask/main.py,sha256=SJ084NXNvd-pO2hY8oFO__NFR1F07YxeOacBuv65crc,3867
5
- ask/providers/__init__.py,sha256=IINO0hNGarFpf62lCuVtIqMeAOC1CtR_wDWUL6_iWzI,1105
6
- ask/providers/anthropic.py,sha256=ZFlnQZaxPGHLYjAacapE-63LoSu43xYnZ30Ajmrmgw0,2703
7
- ask/providers/base.py,sha256=91ZbVORYWckSHNwNPiTmgfqQN0FLO9AgV6mptuAkIU0,769
8
- ask/providers/openai.py,sha256=R-UgVArtlpn8F4qkliQ7unNk11ekTPL0hFZCfubGYpg,3661
9
- test/conftest.py,sha256=V9ebLC-soz0hHocZLZAibzsVdvzZh4-elcTtKmZ2FyA,1363
10
- test/test_anthropic.py,sha256=S5OQ67qIZ4VO38eJwAAwJa4JBylJhKCtmcGjCWA8WLY,5687
11
- test/test_config.py,sha256=FrJ6bsZ6mK46e-8fQfkFGx9GgwHrNfnoI8211R0V9K8,5565
12
- test/test_exceptions.py,sha256=tw-spMitAdYj9uW_8TjnlyVKKXFC06FR3610WGR-494,1754
13
- test/test_main.py,sha256=3gZ83nVHMSEmgHSF2UJoELfK028a4vgxLpIk2P1cH1Y,7745
14
- test/test_openai.py,sha256=3dDwlxnKGwl5aJcKHyNgrwrciJBZ96a51fHuxr-V8FA,8633
15
- test/test_providers.py,sha256=SejQvCZSEQ5RAfVTCtPZ-39fXnfV17n4gaSxjiHA5UM,2140
16
- terminal_sherpa-0.1.0.dist-info/METADATA,sha256=hh4NTZNv3yisUjLUcsJxlOBHUiK70l0u82CQIxJrjJI,5610
17
- terminal_sherpa-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- terminal_sherpa-0.1.0.dist-info/entry_points.txt,sha256=LxG9-J__nMGmeEIi47WVGYC1LLJo1GaADH21hfxEK70,38
19
- terminal_sherpa-0.1.0.dist-info/top_level.txt,sha256=Y7k5b2NSCkKiA_XPU-4fT_GYangD6JVDug5xwfXvmuQ,9
20
- terminal_sherpa-0.1.0.dist-info/RECORD,,