claude-dev-cli 0.13.3__py3-none-any.whl → 0.16.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of claude-dev-cli might be problematic. Click here for more details.

@@ -9,7 +9,7 @@ Features:
9
9
  - Interactive and single-shot modes
10
10
  """
11
11
 
12
- __version__ = "0.13.3"
12
+ __version__ = "0.16.1"
13
13
  __author__ = "Julio"
14
14
  __license__ = "MIT"
15
15
 
claude_dev_cli/cli.py CHANGED
@@ -479,37 +479,103 @@ def config() -> None:
479
479
 
480
480
 
481
481
  @config.command('add')
482
+ @click.argument('provider', type=click.Choice(['anthropic', 'openai', 'ollama'], case_sensitive=False), default='anthropic')
482
483
  @click.argument('name')
483
- @click.option('--api-key', help='API key (or set {NAME}_ANTHROPIC_API_KEY env var)')
484
+ @click.option('--api-key', help='API key (or set {NAME}_<PROVIDER>_API_KEY env var; not needed for ollama)')
484
485
  @click.option('--description', help='Description of this API config')
485
486
  @click.option('--default', is_flag=True, help='Set as default API config')
487
+ @click.option('--base-url', help='Custom API base URL (for Azure, proxies, or local Ollama server)')
486
488
  @click.pass_context
487
489
  def config_add(
488
490
  ctx: click.Context,
491
+ provider: str,
489
492
  name: str,
490
493
  api_key: Optional[str],
491
494
  description: Optional[str],
492
- default: bool
495
+ default: bool,
496
+ base_url: Optional[str]
493
497
  ) -> None:
494
- """Add a new API configuration."""
498
+ """Add a new provider configuration.
499
+
500
+ PROVIDER: Provider type (anthropic, openai, or ollama)
501
+ NAME: Configuration name
502
+
503
+ Examples:
504
+ cdc config add anthropic personal --default
505
+ cdc config add openai work-openai --api-key sk-...
506
+ cdc config add ollama local --default
507
+ cdc config add ollama remote --base-url http://server:11434
508
+ """
495
509
  console = ctx.obj['console']
496
510
 
497
511
  try:
512
+ from claude_dev_cli.providers.factory import ProviderFactory
513
+
514
+ # Check if provider is available
515
+ if not ProviderFactory.is_provider_available(provider):
516
+ console.print(f"[red]Error: {provider} provider not available[/red]")
517
+ if provider == 'openai':
518
+ console.print("Install with: pip install 'claude-dev-cli[openai]'")
519
+ elif provider == 'ollama':
520
+ console.print("Install with: pip install 'claude-dev-cli[ollama]'")
521
+ sys.exit(1)
522
+
498
523
  config = Config()
499
- config.add_api_config(
524
+
525
+ # Get API key from environment if not provided (skip for ollama)
526
+ if api_key is None and provider not in ['ollama']:
527
+ env_var = f"{name.upper()}_{provider.upper()}_API_KEY"
528
+ api_key = os.environ.get(env_var)
529
+ if not api_key:
530
+ # Try generic env var for provider
531
+ generic_env = f"{provider.upper()}_API_KEY"
532
+ api_key = os.environ.get(generic_env)
533
+ if not api_key:
534
+ raise ValueError(
535
+ f"API key not provided and {env_var} environment variable not set"
536
+ )
537
+
538
+ # Check if name already exists
539
+ api_configs = config._data.get("api_configs", [])
540
+ for cfg in api_configs:
541
+ if cfg["name"] == name:
542
+ raise ValueError(f"Config with name '{name}' already exists")
543
+
544
+ # Store API key in secure storage (if provided)
545
+ if api_key:
546
+ config.secure_storage.store_key(name, api_key)
547
+
548
+ # If this is the first config or make_default is True, set as default
549
+ if default or not api_configs:
550
+ for cfg in api_configs:
551
+ cfg["default"] = False
552
+
553
+ # Create provider config
554
+ from claude_dev_cli.config import ProviderConfig
555
+ provider_config = ProviderConfig(
500
556
  name=name,
501
- api_key=api_key,
557
+ provider=provider,
558
+ api_key="", # Empty string indicates key is in secure storage (or not needed)
559
+ base_url=base_url,
502
560
  description=description,
503
- make_default=default
561
+ default=default or not api_configs
504
562
  )
505
- console.print(f"[green]✓[/green] Added API config: {name}")
506
563
 
507
- # Show storage method
508
- storage_method = config.secure_storage.get_storage_method()
509
- if storage_method == "keyring":
510
- console.print("[dim]🔐 Stored securely in system keyring[/dim]")
564
+ api_configs.append(provider_config.model_dump())
565
+ config._data["api_configs"] = api_configs
566
+ config._save_config()
567
+
568
+ console.print(f"[green]✓[/green] Added {provider} config: {name}")
569
+
570
+ # Show storage method (if API key was stored)
571
+ if api_key:
572
+ storage_method = config.secure_storage.get_storage_method()
573
+ if storage_method == "keyring":
574
+ console.print("[dim]🔐 Stored securely in system keyring[/dim]")
575
+ else:
576
+ console.print("[dim]🔒 Stored in encrypted file (keyring unavailable)[/dim]")
511
577
  else:
512
- console.print("[dim]🔒 Stored in encrypted file (keyring unavailable)[/dim]")
578
+ console.print("[dim]ℹ️ No API key needed for local provider[/dim]")
513
579
  except Exception as e:
514
580
  console.print(f"[red]Error: {e}[/red]")
515
581
  sys.exit(1)
@@ -575,7 +641,8 @@ def config_list(ctx: click.Context) -> None:
575
641
 
576
642
  for cfg in api_configs:
577
643
  default_marker = " [bold green](default)[/bold green]" if cfg.default else ""
578
- console.print(f"• {cfg.name}{default_marker}")
644
+ provider = getattr(cfg, 'provider', 'anthropic') # Default to anthropic for backward compatibility
645
+ console.print(f"• {cfg.name}{default_marker} [dim]({provider})[/dim]")
579
646
  if cfg.description:
580
647
  console.print(f" {cfg.description}")
581
648
  console.print(f" API Key: {cfg.api_key[:15]}...")
@@ -2439,6 +2506,157 @@ def template_use(ctx: click.Context, name: str, api: Optional[str], model: Optio
2439
2506
  sys.exit(1)
2440
2507
 
2441
2508
 
2509
+ @main.group()
2510
+ def ollama() -> None:
2511
+ """Manage Ollama local models."""
2512
+ pass
2513
+
2514
+
2515
+ @ollama.command('list')
2516
+ @click.option('-a', '--api', help='Ollama config to use (default: local ollama)')
2517
+ @click.pass_context
2518
+ def ollama_list(ctx: click.Context, api: Optional[str]) -> None:
2519
+ """List available Ollama models."""
2520
+ console = ctx.obj['console']
2521
+
2522
+ try:
2523
+ from claude_dev_cli.providers.ollama import OllamaProvider
2524
+ from claude_dev_cli.providers.base import ProviderConnectionError
2525
+ from claude_dev_cli.config import Config, ProviderConfig
2526
+ from rich.table import Table
2527
+
2528
+ # Get config or use default local
2529
+ config = Config()
2530
+ provider_config = None
2531
+ if api:
2532
+ api_config = config.get_provider_config(api)
2533
+ if not api_config or api_config.provider != 'ollama':
2534
+ console.print(f"[red]Error: '{api}' is not an ollama config[/red]")
2535
+ sys.exit(1)
2536
+ provider_config = api_config
2537
+ else:
2538
+ # Use default local ollama
2539
+ provider_config = ProviderConfig(
2540
+ name="local",
2541
+ provider="ollama",
2542
+ base_url="http://localhost:11434"
2543
+ )
2544
+
2545
+ provider = OllamaProvider(provider_config)
2546
+
2547
+ with console.status("[bold blue]Fetching models from Ollama..."):
2548
+ models = provider.list_models()
2549
+
2550
+ if not models:
2551
+ console.print("[yellow]No models found.[/yellow]")
2552
+ console.print("\nPull a model with: ollama pull mistral")
2553
+ console.print("Or use: cdc ollama pull mistral")
2554
+ return
2555
+
2556
+ table = Table(show_header=True, header_style="bold magenta")
2557
+ table.add_column("Model", style="cyan")
2558
+ table.add_column("Display Name")
2559
+ table.add_column("Context", justify="right")
2560
+ table.add_column("Cost", justify="right")
2561
+ table.add_column("Capabilities")
2562
+
2563
+ for model in models:
2564
+ capabilities = ", ".join(model.capabilities)
2565
+ table.add_row(
2566
+ model.model_id,
2567
+ model.display_name,
2568
+ f"{model.context_window:,}",
2569
+ "[green]FREE[/green]",
2570
+ capabilities
2571
+ )
2572
+
2573
+ console.print(table)
2574
+ console.print(f"\n[dim]Found {len(models)} model(s)[/dim]")
2575
+
2576
+ except ProviderConnectionError:
2577
+ console.print("[red]Error: Cannot connect to Ollama[/red]")
2578
+ console.print("\nMake sure Ollama is running:")
2579
+ console.print(" ollama serve")
2580
+ console.print("\nOr install Ollama from: https://ollama.ai")
2581
+ sys.exit(1)
2582
+ except ImportError:
2583
+ console.print("[red]Error: Ollama provider not installed[/red]")
2584
+ console.print("Install with: pip install 'claude-dev-cli[ollama]'")
2585
+ sys.exit(1)
2586
+ except Exception as e:
2587
+ console.print(f"[red]Error: {e}[/red]")
2588
+ sys.exit(1)
2589
+
2590
+
2591
+ @ollama.command('pull')
2592
+ @click.argument('model')
2593
+ @click.pass_context
2594
+ def ollama_pull(ctx: click.Context, model: str) -> None:
2595
+ """Pull an Ollama model.
2596
+
2597
+ Examples:
2598
+ cdc ollama pull mistral
2599
+ cdc ollama pull codellama
2600
+ cdc ollama pull mixtral
2601
+ """
2602
+ console = ctx.obj['console']
2603
+
2604
+ console.print(f"[yellow]Pulling {model} via Ollama CLI...[/yellow]")
2605
+ console.print("[dim]This will use the 'ollama pull' command directly[/dim]\n")
2606
+
2607
+ import subprocess
2608
+ try:
2609
+ # Use ollama CLI directly - it shows progress
2610
+ result = subprocess.run(
2611
+ ['ollama', 'pull', model],
2612
+ check=True
2613
+ )
2614
+
2615
+ if result.returncode == 0:
2616
+ console.print(f"\n[green]✓[/green] Successfully pulled {model}")
2617
+ console.print(f"\nUse it with: cdc ask -m {model} 'your question'")
2618
+ except FileNotFoundError:
2619
+ console.print("[red]Error: ollama command not found[/red]")
2620
+ console.print("\nInstall Ollama from: https://ollama.ai")
2621
+ sys.exit(1)
2622
+ except subprocess.CalledProcessError:
2623
+ console.print(f"[red]Error: Failed to pull {model}[/red]")
2624
+ sys.exit(1)
2625
+
2626
+
2627
+ @ollama.command('show')
2628
+ @click.argument('model')
2629
+ @click.pass_context
2630
+ def ollama_show(ctx: click.Context, model: str) -> None:
2631
+ """Show details about an Ollama model.
2632
+
2633
+ Examples:
2634
+ cdc ollama show mistral
2635
+ cdc ollama show codellama
2636
+ """
2637
+ console = ctx.obj['console']
2638
+
2639
+ import subprocess
2640
+ try:
2641
+ # Use ollama CLI for detailed info
2642
+ result = subprocess.run(
2643
+ ['ollama', 'show', model],
2644
+ capture_output=True,
2645
+ text=True,
2646
+ check=True
2647
+ )
2648
+
2649
+ console.print(result.stdout)
2650
+ except FileNotFoundError:
2651
+ console.print("[red]Error: ollama command not found[/red]")
2652
+ console.print("\nInstall Ollama from: https://ollama.ai")
2653
+ sys.exit(1)
2654
+ except subprocess.CalledProcessError:
2655
+ console.print(f"[red]Error: Model '{model}' not found[/red]")
2656
+ console.print(f"\nPull it first: cdc ollama pull {model}")
2657
+ sys.exit(1)
2658
+
2659
+
2442
2660
  @main.group()
2443
2661
  def warp() -> None:
2444
2662
  """Warp terminal integration."""
claude_dev_cli/config.py CHANGED
@@ -31,25 +31,43 @@ class SummarizationConfig(BaseModel):
31
31
 
32
32
 
33
33
  class APIConfig(BaseModel):
34
- """Configuration for a Claude API key."""
34
+ """Configuration for a Claude API key.
35
+
36
+ DEPRECATED: Use ProviderConfig instead. Maintained for backward compatibility.
37
+ """
35
38
 
36
39
  name: str
37
40
  api_key: str
38
41
  description: Optional[str] = None
39
42
  default: bool = False
40
43
  default_model_profile: Optional[str] = None # Default model profile for this API
44
+ # Added for provider compatibility
45
+ provider: str = "anthropic" # Always anthropic for APIConfig
46
+
47
+
48
+ class ProviderConfig(BaseModel):
49
+ """Configuration for an AI provider (Anthropic, OpenAI, Ollama, etc.)."""
50
+
51
+ name: str # User-friendly name (e.g., "personal-claude", "work-openai")
52
+ provider: str # Provider type: "anthropic", "openai", "ollama", "lmstudio"
53
+ api_key: Optional[str] = None # Not needed for local providers
54
+ base_url: Optional[str] = None # Custom endpoint URL (for local/enterprise)
55
+ description: Optional[str] = None
56
+ default: bool = False
57
+ default_model_profile: Optional[str] = None
41
58
 
42
59
 
43
60
  class ModelProfile(BaseModel):
44
61
  """Model profile with pricing information."""
45
62
 
46
63
  name: str # User-friendly alias (e.g., "fast", "smart", "powerful")
47
- model_id: str # Actual Claude model ID
64
+ model_id: str # Provider-specific model ID
48
65
  description: Optional[str] = None
49
66
  input_price_per_mtok: float # Input cost per million tokens (USD)
50
67
  output_price_per_mtok: float # Output cost per million tokens (USD)
51
68
  use_cases: List[str] = Field(default_factory=list) # Task types
52
- api_config_name: Optional[str] = None # Tied to specific API config, or None for global
69
+ provider: str = "anthropic" # Provider type: "anthropic", "openai", "ollama"
70
+ api_config_name: Optional[str] = None # Tied to specific API/provider config, or None for global
53
71
 
54
72
 
55
73
  class ProjectProfile(BaseModel):
@@ -168,36 +186,104 @@ class Config:
168
186
  json.dump(data, f, indent=2)
169
187
 
170
188
  def _get_default_model_profiles(self) -> List[Dict]:
171
- """Get default model profiles with current Anthropic pricing."""
172
- return [
189
+ """Get default model profiles for all providers."""
190
+ profiles = [
191
+ # Anthropic (Claude) profiles
173
192
  {
174
193
  "name": "fast",
175
194
  "model_id": "claude-3-5-haiku-20241022",
176
- "description": "Fast and economical for simple tasks",
195
+ "description": "Fast and economical for simple tasks (Claude)",
177
196
  "input_price_per_mtok": 0.80,
178
197
  "output_price_per_mtok": 4.00,
179
198
  "use_cases": ["quick", "simple", "classification"],
199
+ "provider": "anthropic",
180
200
  "api_config_name": None
181
201
  },
182
202
  {
183
203
  "name": "smart",
184
204
  "model_id": "claude-sonnet-4-5-20250929",
185
- "description": "Balanced performance and cost for most tasks",
205
+ "description": "Balanced performance and cost for most tasks (Claude)",
186
206
  "input_price_per_mtok": 3.00,
187
207
  "output_price_per_mtok": 15.00,
188
208
  "use_cases": ["general", "coding", "analysis"],
209
+ "provider": "anthropic",
189
210
  "api_config_name": None
190
211
  },
191
212
  {
192
213
  "name": "powerful",
193
214
  "model_id": "claude-opus-4-20250514",
194
- "description": "Maximum capability for complex tasks",
215
+ "description": "Maximum capability for complex tasks (Claude)",
195
216
  "input_price_per_mtok": 15.00,
196
217
  "output_price_per_mtok": 75.00,
197
218
  "use_cases": ["complex", "research", "creative"],
219
+ "provider": "anthropic",
198
220
  "api_config_name": None
199
- }
221
+ },
222
+ # OpenAI (GPT) profiles - only added if OpenAI provider available
223
+ {
224
+ "name": "fast-openai",
225
+ "model_id": "gpt-3.5-turbo",
226
+ "description": "Fast and economical for simple tasks (OpenAI)",
227
+ "input_price_per_mtok": 0.50,
228
+ "output_price_per_mtok": 1.50,
229
+ "use_cases": ["quick", "simple", "chat"],
230
+ "provider": "openai",
231
+ "api_config_name": None
232
+ },
233
+ {
234
+ "name": "smart-openai",
235
+ "model_id": "gpt-4-turbo",
236
+ "description": "Balanced performance and cost for most tasks (OpenAI)",
237
+ "input_price_per_mtok": 10.00,
238
+ "output_price_per_mtok": 30.00,
239
+ "use_cases": ["general", "coding", "analysis"],
240
+ "provider": "openai",
241
+ "api_config_name": None
242
+ },
243
+ {
244
+ "name": "powerful-openai",
245
+ "model_id": "gpt-4",
246
+ "description": "High capability for complex tasks (OpenAI)",
247
+ "input_price_per_mtok": 30.00,
248
+ "output_price_per_mtok": 60.00,
249
+ "use_cases": ["complex", "research", "reasoning"],
250
+ "provider": "openai",
251
+ "api_config_name": None
252
+ },
253
+ # Ollama (Local) profiles - zero cost!
254
+ {
255
+ "name": "fast-local",
256
+ "model_id": "mistral",
257
+ "description": "Fast local inference with Mistral 7B (zero cost)",
258
+ "input_price_per_mtok": 0.0,
259
+ "output_price_per_mtok": 0.0,
260
+ "use_cases": ["quick", "chat", "general"],
261
+ "provider": "ollama",
262
+ "api_config_name": None
263
+ },
264
+ {
265
+ "name": "smart-local",
266
+ "model_id": "mixtral",
267
+ "description": "Powerful local inference with Mixtral 8x7B (zero cost)",
268
+ "input_price_per_mtok": 0.0,
269
+ "output_price_per_mtok": 0.0,
270
+ "use_cases": ["general", "analysis", "chat"],
271
+ "provider": "ollama",
272
+ "api_config_name": None
273
+ },
274
+ {
275
+ "name": "code-local",
276
+ "model_id": "codellama",
277
+ "description": "Code-focused local inference with Code Llama (zero cost)",
278
+ "input_price_per_mtok": 0.0,
279
+ "output_price_per_mtok": 0.0,
280
+ "use_cases": ["coding", "refactoring", "debugging"],
281
+ "provider": "ollama",
282
+ "api_config_name": None
283
+ },
200
284
  ]
285
+
286
+ return profiles
201
287
 
202
288
  def _auto_migrate_keys(self) -> None:
203
289
  """Automatically migrate plaintext API keys to secure storage."""
claude_dev_cli/core.py CHANGED
@@ -3,17 +3,22 @@
3
3
  import json
4
4
  from datetime import datetime
5
5
  from pathlib import Path
6
- from typing import Optional, Dict, Any, List
7
- from anthropic import Anthropic
6
+ from typing import Optional, Dict, Any, List, Union
8
7
 
9
- from claude_dev_cli.config import Config
8
+ from claude_dev_cli.config import Config, APIConfig, ProviderConfig
9
+ from claude_dev_cli.providers.factory import ProviderFactory
10
+ from claude_dev_cli.providers.base import AIProvider
10
11
 
11
12
 
12
13
  class ClaudeClient:
13
- """Claude API client with multi-key routing and usage tracking."""
14
+ """AI client with multi-provider support and routing.
15
+
16
+ Backward compatible wrapper around provider system.
17
+ Uses AIProvider abstraction to support Anthropic, OpenAI, Ollama, etc.
18
+ """
14
19
 
15
20
  def __init__(self, config: Optional[Config] = None, api_config_name: Optional[str] = None):
16
- """Initialize Claude client.
21
+ """Initialize AI client.
17
22
 
18
23
  API routing hierarchy (highest to lowest priority):
19
24
  1. Explicit api_config_name parameter
@@ -35,7 +40,10 @@ class ClaudeClient:
35
40
  "No API configuration found. Run 'cdc config add' to set up an API key."
36
41
  )
37
42
 
38
- self.client = Anthropic(api_key=self.api_config.api_key)
43
+ # Create provider using factory pattern
44
+ # APIConfig is treated as ProviderConfig with provider="anthropic"
45
+ self.provider = ProviderFactory.create(self.api_config)
46
+
39
47
  self.model = self.config.get_model()
40
48
  self.max_tokens = self.config.get_max_tokens()
41
49
 
@@ -86,7 +94,7 @@ class ClaudeClient:
86
94
  temperature: float = 1.0,
87
95
  stream: bool = False
88
96
  ) -> str:
89
- """Make a call to Claude API.
97
+ """Make a call to AI provider.
90
98
 
91
99
  Args:
92
100
  model: Model ID or profile name (e.g., 'fast', 'smart', 'powerful')
@@ -100,34 +108,25 @@ class ClaudeClient:
100
108
  if project_profile and project_profile.system_prompt and not system_prompt:
101
109
  system_prompt = project_profile.system_prompt
102
110
 
103
- kwargs: Dict[str, Any] = {
104
- "model": resolved_model,
105
- "max_tokens": max_tokens,
106
- "temperature": temperature,
107
- "messages": [{"role": "user", "content": prompt}]
108
- }
109
-
110
- if system_prompt:
111
- kwargs["system"] = system_prompt
112
-
113
- start_time = datetime.utcnow()
114
- response = self.client.messages.create(**kwargs)
115
- end_time = datetime.utcnow()
116
-
117
- # Log usage
118
- self._log_usage(
111
+ # Call provider
112
+ response = self.provider.call(
119
113
  prompt=prompt,
120
- response=response,
114
+ system_prompt=system_prompt,
121
115
  model=resolved_model,
122
- duration_ms=int((end_time - start_time).total_seconds() * 1000),
123
- api_config_name=self.api_config.name
116
+ max_tokens=max_tokens,
117
+ temperature=temperature
124
118
  )
125
119
 
126
- # Extract text from response
127
- text_blocks = [
128
- block.text for block in response.content if hasattr(block, 'text')
129
- ]
130
- return '\n'.join(text_blocks)
120
+ # Log usage
121
+ usage = self.provider.get_last_usage()
122
+ if usage:
123
+ self._log_usage(
124
+ prompt=prompt,
125
+ usage=usage,
126
+ api_config_name=self.api_config.name
127
+ )
128
+
129
+ return response
131
130
 
132
131
  def call_streaming(
133
132
  self,
@@ -137,7 +136,7 @@ class ClaudeClient:
137
136
  max_tokens: Optional[int] = None,
138
137
  temperature: float = 1.0
139
138
  ):
140
- """Make a streaming call to Claude API.
139
+ """Make a streaming call to AI provider.
141
140
 
142
141
  Args:
143
142
  model: Model ID or profile name (e.g., 'fast', 'smart', 'powerful')
@@ -151,37 +150,33 @@ class ClaudeClient:
151
150
  if project_profile and project_profile.system_prompt and not system_prompt:
152
151
  system_prompt = project_profile.system_prompt
153
152
 
154
- kwargs: Dict[str, Any] = {
155
- "model": resolved_model,
156
- "max_tokens": max_tokens,
157
- "temperature": temperature,
158
- "messages": [{"role": "user", "content": prompt}]
159
- }
160
-
161
- if system_prompt:
162
- kwargs["system"] = system_prompt
163
-
164
- with self.client.messages.stream(**kwargs) as stream:
165
- for text in stream.text_stream:
166
- yield text
153
+ # Use provider's streaming method
154
+ for text in self.provider.call_streaming(
155
+ prompt=prompt,
156
+ system_prompt=system_prompt,
157
+ model=resolved_model,
158
+ max_tokens=max_tokens,
159
+ temperature=temperature
160
+ ):
161
+ yield text
167
162
 
168
163
  def _log_usage(
169
164
  self,
170
165
  prompt: str,
171
- response: Any,
172
- model: str,
173
- duration_ms: int,
166
+ usage: Any, # UsageInfo from provider
174
167
  api_config_name: str
175
168
  ) -> None:
176
169
  """Log API usage to file."""
177
170
  log_entry = {
178
- "timestamp": datetime.utcnow().isoformat(),
171
+ "timestamp": usage.timestamp.isoformat(),
179
172
  "api_config": api_config_name,
180
- "model": model,
173
+ "model": usage.model,
181
174
  "prompt_preview": prompt[:100],
182
- "input_tokens": response.usage.input_tokens,
183
- "output_tokens": response.usage.output_tokens,
184
- "duration_ms": duration_ms,
175
+ "input_tokens": usage.input_tokens,
176
+ "output_tokens": usage.output_tokens,
177
+ "duration_ms": usage.duration_ms,
178
+ "cost_usd": usage.cost_usd,
179
+ "provider": self.provider.provider_name
185
180
  }
186
181
 
187
182
  with open(self.config.usage_log, 'a') as f:
@@ -0,0 +1,28 @@
1
+ """AI Provider abstraction layer for claude-dev-cli.
2
+
3
+ This package provides a unified interface for multiple AI providers:
4
+ - Anthropic (Claude)
5
+ - OpenAI (GPT-4, GPT-3.5) - coming in v0.15.0
6
+ - Ollama (Local models) - coming in v0.16.0
7
+ - LM Studio (Local models) - coming in v0.16.0
8
+ """
9
+
10
+ from claude_dev_cli.providers.base import (
11
+ AIProvider,
12
+ ModelInfo,
13
+ UsageInfo,
14
+ ProviderError,
15
+ InsufficientCreditsError,
16
+ ProviderConnectionError,
17
+ ModelNotFoundError,
18
+ )
19
+
20
+ __all__ = [
21
+ "AIProvider",
22
+ "ModelInfo",
23
+ "UsageInfo",
24
+ "ProviderError",
25
+ "InsufficientCreditsError",
26
+ "ProviderConnectionError",
27
+ "ModelNotFoundError",
28
+ ]