agentic-python-coder 2.0.1__tar.gz → 2.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/.gitignore +4 -0
  2. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/PKG-INFO +24 -17
  3. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/README.md +23 -16
  4. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/__init__.py +10 -4
  5. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/cli.py +13 -10
  6. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/llm.py +190 -0
  7. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/deepseek31.json +7 -0
  8. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/gemini25.json +8 -0
  9. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/gpt5.json +6 -0
  10. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/grok41.json +7 -0
  11. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/opus45.json +6 -0
  12. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/qwen3.json +7 -0
  13. agentic_python_coder-2.1.0/coder/src/agentic_python_coder/models/sonnet45.json +6 -0
  14. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/pyproject.toml +1 -1
  15. agentic_python_coder-2.0.1/coder/src/agentic_python_coder/llm.py +0 -230
  16. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/LICENSE +0 -0
  17. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/.gitignore +0 -0
  18. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/prompts/system.md +0 -0
  19. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/prompts/system_todo.md +0 -0
  20. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/agent.py +0 -0
  21. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/__init__.py +0 -0
  22. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/README.md +0 -0
  23. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/clingo.md +0 -0
  24. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/sample_tasks/bird_reasoning.md +0 -0
  25. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/sample_tasks/diagnosis.md +0 -0
  26. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/sample_tasks/simple_coloring.md +0 -0
  27. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/sample_tasks/stable_marriage.md +0 -0
  28. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/clingo/sample_tasks/sudoku_mini.md +0 -0
  29. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/cpmpy/README.md +0 -0
  30. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/cpmpy/cpmpy.md +0 -0
  31. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/cpmpy/sample_tasks/magic_square.md +0 -0
  32. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/cpmpy/sample_tasks/n_queens.md +0 -0
  33. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/regex/README.md +0 -0
  34. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/regex/regex.md +0 -0
  35. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/regex/sample_tasks/email_extraction.md +0 -0
  36. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/regex/sample_tasks/phone_validation.md +0 -0
  37. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/examples/regex/sample_tasks/url_parsing.md +0 -0
  38. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/kernel.py +0 -0
  39. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/project_md.py +0 -0
  40. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/runner.py +0 -0
  41. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/coder/src/agentic_python_coder/tools.py +0 -0
  42. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/README.md +0 -0
  43. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/clingo.md +0 -0
  44. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/sample_tasks/bird_reasoning.md +0 -0
  45. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/sample_tasks/diagnosis.md +0 -0
  46. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/sample_tasks/simple_coloring.md +0 -0
  47. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/sample_tasks/stable_marriage.md +0 -0
  48. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/clingo/sample_tasks/sudoku_mini.md +0 -0
  49. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/cpmpy/README.md +0 -0
  50. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/cpmpy/cpmpy.md +0 -0
  51. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/cpmpy/sample_problems/magic_square.md +0 -0
  52. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/cpmpy/sample_problems/n_queens.md +0 -0
  53. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/regex/README.md +0 -0
  54. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/regex/regex.md +0 -0
  55. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/regex/sample_tasks/email_extraction.md +0 -0
  56. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/regex/sample_tasks/phone_validation.md +0 -0
  57. {agentic_python_coder-2.0.1 → agentic_python_coder-2.1.0}/examples/regex/sample_tasks/url_parsing.md +0 -0
@@ -164,7 +164,11 @@ TRASH/
164
164
  LOCAL/
165
165
  ASP/
166
166
  CPBENCH/
167
+ CPMPY/
167
168
  PAPER/
169
+ PAPER-ASP/
170
+ ZEBRA/
171
+ .mcp.json
168
172
  examples/cpmpy/cpmpy_v*.md
169
173
  examples/cpmpy/cpmpy.md.backup-*
170
174
  examples/clingo/clingo_v*.md
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-python-coder
3
- Version: 2.0.1
3
+ Version: 2.1.0
4
4
  Summary: A lightweight Python coding agent that writes, executes, and iterates on code through natural language instructions
5
5
  Author: Stefan Szeider
6
6
  License: Apache-2.0
@@ -105,7 +105,7 @@ import agentic_python_coder as coder
105
105
  messages, stats, log_path = coder.solve_task(
106
106
  "Write a fibonacci function",
107
107
  working_directory="/tmp/workspace",
108
- model="sonnet",
108
+ model="sonnet45",
109
109
  quiet=True, # Suppress console output
110
110
  )
111
111
 
@@ -128,7 +128,7 @@ from agentic_python_coder import solve_task
128
128
  messages, stats, log_path = solve_task(
129
129
  task="Your task description",
130
130
  working_directory=".", # Where to run and save files
131
- model=None, # Model alias: "sonnet", "opus", "deepseek", etc.
131
+ model=None, # Model name: "sonnet45", "opus45", or JSON file
132
132
  system_prompt=None, # Custom system prompt (string)
133
133
  system_prompt_path=None, # Path to system prompt file
134
134
  project_prompt=None, # Domain-specific context
@@ -158,7 +158,7 @@ from agentic_python_coder import create_coding_agent, run_agent, get_final_respo
158
158
  agent = create_coding_agent(
159
159
  working_directory="/tmp/workspace",
160
160
  system_prompt="You are a Python expert.",
161
- model="deepseek",
161
+ model="deepseek31",
162
162
  with_packages=["pandas"],
163
163
  )
164
164
 
@@ -175,14 +175,17 @@ print(get_final_response(messages2))
175
175
  Get a configured LangChain LLM instance for custom use.
176
176
 
177
177
  ```python
178
- from agentic_python_coder import get_openrouter_llm, MODEL_REGISTRY
178
+ from agentic_python_coder import get_openrouter_llm, list_available_models
179
179
 
180
- # Get LLM by alias
181
- llm = get_openrouter_llm(model="sonnet")
180
+ # Get LLM by model name
181
+ llm = get_openrouter_llm(model="sonnet45")
182
182
 
183
183
  # See available models
184
- print(MODEL_REGISTRY.keys())
185
- # dict_keys(['deepseek', 'sonnet', 'opus', 'default', 'grok', 'qwen', 'gemini', 'gpt'])
184
+ print(list_available_models())
185
+ # ['deepseek31', 'gemini25', 'gpt5', 'grok41', 'opus45', 'qwen3', 'sonnet45']
186
+
187
+ # Use a custom model JSON file
188
+ llm = get_openrouter_llm(model="./mymodel.json")
186
189
  ```
187
190
 
188
191
  ---
@@ -212,7 +215,7 @@ coder -i
212
215
  | `--version`, `-V` | Show version and exit |
213
216
  | `--init [TEMPLATE]` | Initialize example templates (cpmpy, clingo, regex, or all) |
214
217
  | `--task`, `-t FILE` | Load task from markdown file |
215
- | `--model MODEL` | Model to use (default: sonnet) |
218
+ | `--model MODEL` | Model name or JSON file (default: sonnet45) |
216
219
  | `--project`, `-p FILE` | Project template for domain-specific prompts |
217
220
  | `--with PACKAGE` | Add packages dynamically (repeatable) |
218
221
  | `--dir`, `-d DIR` | Working directory |
@@ -225,13 +228,17 @@ coder -i
225
228
  ### Model Selection
226
229
 
227
230
  ```bash
228
- coder --model sonnet "task" # Claude Sonnet 4.5 (default)
229
- coder --model opus "task" # Claude Opus 4.5
230
- coder --model deepseek "task" # DeepSeek v3.1
231
- coder --model grok "task" # X.AI Grok
232
- coder --model qwen "task" # Qwen3 Coder
233
- coder --model gemini "task" # Gemini Pro 2.5
234
- coder --model gpt "task" # GPT-5
231
+ # Built-in models (versioned names)
232
+ coder --model sonnet45 "task" # Claude Sonnet 4.5 (default)
233
+ coder --model opus45 "task" # Claude Opus 4.5
234
+ coder --model deepseek31 "task" # DeepSeek v3.1
235
+ coder --model grok41 "task" # X.AI Grok 4.1
236
+ coder --model qwen3 "task" # Qwen3 Coder
237
+ coder --model gemini25 "task" # Gemini Pro 2.5
238
+ coder --model gpt5 "task" # GPT-5
239
+
240
+ # Custom model (JSON file)
241
+ coder --model ./mymodel.json "task"
235
242
  ```
236
243
 
237
244
  ### Project Templates
@@ -70,7 +70,7 @@ import agentic_python_coder as coder
70
70
  messages, stats, log_path = coder.solve_task(
71
71
  "Write a fibonacci function",
72
72
  working_directory="/tmp/workspace",
73
- model="sonnet",
73
+ model="sonnet45",
74
74
  quiet=True, # Suppress console output
75
75
  )
76
76
 
@@ -93,7 +93,7 @@ from agentic_python_coder import solve_task
93
93
  messages, stats, log_path = solve_task(
94
94
  task="Your task description",
95
95
  working_directory=".", # Where to run and save files
96
- model=None, # Model alias: "sonnet", "opus", "deepseek", etc.
96
+ model=None, # Model name: "sonnet45", "opus45", or JSON file
97
97
  system_prompt=None, # Custom system prompt (string)
98
98
  system_prompt_path=None, # Path to system prompt file
99
99
  project_prompt=None, # Domain-specific context
@@ -123,7 +123,7 @@ from agentic_python_coder import create_coding_agent, run_agent, get_final_respo
123
123
  agent = create_coding_agent(
124
124
  working_directory="/tmp/workspace",
125
125
  system_prompt="You are a Python expert.",
126
- model="deepseek",
126
+ model="deepseek31",
127
127
  with_packages=["pandas"],
128
128
  )
129
129
 
@@ -140,14 +140,17 @@ print(get_final_response(messages2))
140
140
  Get a configured LangChain LLM instance for custom use.
141
141
 
142
142
  ```python
143
- from agentic_python_coder import get_openrouter_llm, MODEL_REGISTRY
143
+ from agentic_python_coder import get_openrouter_llm, list_available_models
144
144
 
145
- # Get LLM by alias
146
- llm = get_openrouter_llm(model="sonnet")
145
+ # Get LLM by model name
146
+ llm = get_openrouter_llm(model="sonnet45")
147
147
 
148
148
  # See available models
149
- print(MODEL_REGISTRY.keys())
150
- # dict_keys(['deepseek', 'sonnet', 'opus', 'default', 'grok', 'qwen', 'gemini', 'gpt'])
149
+ print(list_available_models())
150
+ # ['deepseek31', 'gemini25', 'gpt5', 'grok41', 'opus45', 'qwen3', 'sonnet45']
151
+
152
+ # Use a custom model JSON file
153
+ llm = get_openrouter_llm(model="./mymodel.json")
151
154
  ```
152
155
 
153
156
  ---
@@ -177,7 +180,7 @@ coder -i
177
180
  | `--version`, `-V` | Show version and exit |
178
181
  | `--init [TEMPLATE]` | Initialize example templates (cpmpy, clingo, regex, or all) |
179
182
  | `--task`, `-t FILE` | Load task from markdown file |
180
- | `--model MODEL` | Model to use (default: sonnet) |
183
+ | `--model MODEL` | Model name or JSON file (default: sonnet45) |
181
184
  | `--project`, `-p FILE` | Project template for domain-specific prompts |
182
185
  | `--with PACKAGE` | Add packages dynamically (repeatable) |
183
186
  | `--dir`, `-d DIR` | Working directory |
@@ -190,13 +193,17 @@ coder -i
190
193
  ### Model Selection
191
194
 
192
195
  ```bash
193
- coder --model sonnet "task" # Claude Sonnet 4.5 (default)
194
- coder --model opus "task" # Claude Opus 4.5
195
- coder --model deepseek "task" # DeepSeek v3.1
196
- coder --model grok "task" # X.AI Grok
197
- coder --model qwen "task" # Qwen3 Coder
198
- coder --model gemini "task" # Gemini Pro 2.5
199
- coder --model gpt "task" # GPT-5
196
+ # Built-in models (versioned names)
197
+ coder --model sonnet45 "task" # Claude Sonnet 4.5 (default)
198
+ coder --model opus45 "task" # Claude Opus 4.5
199
+ coder --model deepseek31 "task" # DeepSeek v3.1
200
+ coder --model grok41 "task" # X.AI Grok 4.1
201
+ coder --model qwen3 "task" # Qwen3 Coder
202
+ coder --model gemini25 "task" # Gemini Pro 2.5
203
+ coder --model gpt5 "task" # GPT-5
204
+
205
+ # Custom model (JSON file)
206
+ coder --model ./mymodel.json "task"
200
207
  ```
201
208
 
202
209
  ### Project Templates
@@ -1,6 +1,6 @@
1
1
  """Python Coding Agent - A minimal coding assistant using LangGraph and OpenRouter."""
2
2
 
3
- __version__ = "2.0.1"
3
+ __version__ = "2.1.0"
4
4
 
5
5
  # High-level API (recommended for most users)
6
6
  from agentic_python_coder.runner import solve_task
@@ -14,7 +14,12 @@ from agentic_python_coder.agent import (
14
14
  )
15
15
 
16
16
  # LLM utilities
17
- from agentic_python_coder.llm import get_openrouter_llm, MODEL_REGISTRY, MODEL_STRING
17
+ from agentic_python_coder.llm import (
18
+ get_openrouter_llm,
19
+ load_model_config,
20
+ list_available_models,
21
+ DEFAULT_MODEL,
22
+ )
18
23
 
19
24
  __all__ = [
20
25
  # Version
@@ -28,6 +33,7 @@ __all__ = [
28
33
  "DEFAULT_STEP_LIMIT",
29
34
  # LLM
30
35
  "get_openrouter_llm",
31
- "MODEL_REGISTRY",
32
- "MODEL_STRING",
36
+ "load_model_config",
37
+ "list_available_models",
38
+ "DEFAULT_MODEL",
33
39
  ]
@@ -4,7 +4,6 @@ import argparse
4
4
  import os
5
5
  import sys
6
6
  import re
7
- import shutil
8
7
  from pathlib import Path
9
8
  from typing import Optional, Dict, Any
10
9
  from importlib import resources
@@ -24,7 +23,7 @@ from agentic_python_coder.project_md import (
24
23
  check_packages_available,
25
24
  create_project_prompt,
26
25
  )
27
- from agentic_python_coder.llm import MODEL_STRING, MODEL_REGISTRY
26
+ from agentic_python_coder.llm import DEFAULT_MODEL, list_available_models, load_model_config
28
27
  from agentic_python_coder import __version__
29
28
 
30
29
 
@@ -107,7 +106,7 @@ def parse_args():
107
106
  help="Path to task file (creates {basename}_code.py and {basename}.jsonl)",
108
107
  )
109
108
 
110
- parser.add_argument("--model", help=f"Model to use (default: {MODEL_STRING})")
109
+ parser.add_argument("--model", help=f"Model name or JSON file (default: {DEFAULT_MODEL})")
111
110
 
112
111
  parser.add_argument(
113
112
  "--interactive", "-i", action="store_true", help="Interactive mode"
@@ -239,13 +238,17 @@ def validate_packages(packages):
239
238
 
240
239
 
241
240
  def validate_model(model):
242
- """Validate model name."""
243
- if model and model not in MODEL_REGISTRY and "/" not in model:
244
- available = sorted([m for m in MODEL_REGISTRY.keys() if m != "default"])
245
- print(f"Error: Unknown model: '{model}'")
246
- print("\nAvailable models:")
247
- for m in available:
241
+ """Validate model name or JSON file."""
242
+ if not model:
243
+ return
244
+ try:
245
+ load_model_config(model)
246
+ except FileNotFoundError as e:
247
+ print(f"Error: {e}")
248
+ print("\nAvailable built-in models:")
249
+ for m in list_available_models():
248
250
  print(f" - {m}")
251
+ print("\nOr provide a path to a custom model JSON file.")
249
252
  sys.exit(1)
250
253
 
251
254
 
@@ -434,7 +437,7 @@ def main():
434
437
  if not args.quiet:
435
438
  if args.with_packages:
436
439
  print(f"Dynamic packages: {', '.join(args.with_packages)}")
437
- print(f"Creating agent with model: {args.model or MODEL_STRING}")
440
+ print(f"Creating agent with model: {args.model or DEFAULT_MODEL}")
438
441
 
439
442
  task_basename = task_file_path.stem if task_file_path else None
440
443
 
@@ -0,0 +1,190 @@
1
+ """LLM configuration for OpenRouter."""
2
+
3
+ import json
4
+ import os
5
+ import sys
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+ from dotenv import load_dotenv
10
+ from langchain_openai import ChatOpenAI
11
+
12
+ # Default model name (without .json)
13
+ DEFAULT_MODEL = "sonnet45"
14
+
15
+
16
+ def get_api_key() -> str:
17
+ """Get API key from environment or config file.
18
+
19
+ Returns:
20
+ API key string
21
+
22
+ Raises:
23
+ ValueError: If no API key found
24
+ """
25
+ # Load from ~/.config/coder/.env (don't override shell env vars)
26
+ config_env = Path.home() / ".config" / "coder" / ".env"
27
+ if config_env.exists():
28
+ load_dotenv(dotenv_path=config_env, override=False)
29
+
30
+ # Get API key from environment
31
+ api_key = os.getenv("OPENROUTER_API_KEY")
32
+
33
+ if not api_key:
34
+ print("Warning: No API key found. Set up with:", file=sys.stderr)
35
+ print(" mkdir -p ~/.config/coder", file=sys.stderr)
36
+ print(" echo 'OPENROUTER_API_KEY=sk-or-...' > ~/.config/coder/.env", file=sys.stderr)
37
+ print("\nOr use: --api-key sk-or-...", file=sys.stderr)
38
+ raise ValueError("OPENROUTER_API_KEY not configured")
39
+
40
+ return api_key
41
+
42
+
43
+ def _load_json_file(path: Path, model: str) -> dict[str, Any]:
44
+ """Load and validate a model JSON file."""
45
+ try:
46
+ with open(path, encoding="utf-8") as f:
47
+ config = json.load(f)
48
+ except json.JSONDecodeError as e:
49
+ raise ValueError(f"Invalid JSON in model config '{model}': {e}") from e
50
+
51
+ if "path" not in config:
52
+ raise ValueError(f"Model config '{model}' missing required key: 'path'")
53
+
54
+ return config
55
+
56
+
57
+ def load_model_config(model: str) -> dict[str, Any]:
58
+ """Load model configuration from JSON file.
59
+
60
+ Lookup order:
61
+ 1. If model ends with .json, treat as explicit path
62
+ 2. Local file: ./{model}.json
63
+ 3. Bundled default: <package>/models/{model}.json
64
+
65
+ Args:
66
+ model: Model name (e.g., "sonnet45") or path to JSON file
67
+
68
+ Returns:
69
+ Model configuration dict
70
+
71
+ Raises:
72
+ FileNotFoundError: If model config not found
73
+ ValueError: If JSON is invalid or missing required keys
74
+ """
75
+ # Explicit JSON path
76
+ if model.endswith(".json"):
77
+ path = Path(model).expanduser()
78
+ if not path.exists():
79
+ raise FileNotFoundError(f"Model config not found: {model}")
80
+ return _load_json_file(path, model)
81
+
82
+ # Local override: ./{model}.json
83
+ local_path = Path(f"./{model}.json")
84
+ if local_path.exists():
85
+ return _load_json_file(local_path, model)
86
+
87
+ # Bundled default: <package>/models/{model}.json
88
+ package_dir = Path(__file__).parent
89
+ bundled_path = package_dir / "models" / f"{model}.json"
90
+ if bundled_path.exists():
91
+ return _load_json_file(bundled_path, model)
92
+
93
+ # List available models for error message
94
+ available = list_available_models()
95
+ raise FileNotFoundError(
96
+ f"Model '{model}' not found. Available: {', '.join(available)}"
97
+ )
98
+
99
+
100
+ def list_available_models() -> list[str]:
101
+ """List all available model names (bundled defaults).
102
+
103
+ Returns:
104
+ List of model names (without .json extension)
105
+ """
106
+ package_dir = Path(__file__).parent
107
+ models_dir = package_dir / "models"
108
+ if not models_dir.exists():
109
+ return []
110
+ return sorted(p.stem for p in models_dir.glob("*.json"))
111
+
112
+
113
+ def get_openrouter_llm(
114
+ model: str = DEFAULT_MODEL,
115
+ api_key: str | None = None,
116
+ verbose: bool = False,
117
+ ) -> ChatOpenAI:
118
+ """Create a fully configured OpenRouter LLM instance.
119
+
120
+ Args:
121
+ model: Model name (e.g., "sonnet45") or path to JSON file
122
+ api_key: Optional API key
123
+ verbose: If True, print model info to console
124
+
125
+ Returns:
126
+ Fully configured ChatOpenAI instance
127
+
128
+ Raises:
129
+ FileNotFoundError: If model config not found
130
+ ValueError: If API key not configured
131
+ """
132
+ # Load config from JSON
133
+ config = load_model_config(model)
134
+ model_path = config["path"]
135
+
136
+ if verbose:
137
+ print(f"Using model: {model_path}")
138
+ if os.getenv("CODER_VERBOSE"):
139
+ for key, value in config.items():
140
+ if key != "path":
141
+ print(f" {key}: {value}")
142
+
143
+ # Get API key
144
+ if not api_key:
145
+ api_key = get_api_key()
146
+
147
+ # Create base kwargs
148
+ streaming = config.get("streaming", True)
149
+ llm_kwargs = {
150
+ "model": model_path,
151
+ "openai_api_key": api_key,
152
+ "openai_api_base": "https://openrouter.ai/api/v1",
153
+ "default_headers": {
154
+ "HTTP-Referer": "https://github.com/szeider/agentic-python-coder",
155
+ "X-Title": "Agentic Python Coder",
156
+ },
157
+ "streaming": streaming,
158
+ }
159
+
160
+ # Only include stream_options when streaming is enabled
161
+ if streaming:
162
+ llm_kwargs["model_kwargs"] = {"stream_options": {"include_usage": True}}
163
+ else:
164
+ llm_kwargs["model_kwargs"] = {}
165
+
166
+ # Handle models that don't accept sampling parameters (e.g., GPT-5)
167
+ if config.get("no_sampling_params"):
168
+ if "max_tokens" in config:
169
+ llm_kwargs["max_tokens"] = config["max_tokens"]
170
+ else:
171
+ # Standard parameters
172
+ if "temperature" in config:
173
+ llm_kwargs["temperature"] = config["temperature"]
174
+ if "max_tokens" in config:
175
+ llm_kwargs["max_tokens"] = config["max_tokens"]
176
+ if "top_p" in config:
177
+ llm_kwargs["top_p"] = config["top_p"]
178
+ if "top_k" in config:
179
+ # Pass via model_kwargs for OpenRouter compatibility
180
+ llm_kwargs["model_kwargs"]["top_k"] = config["top_k"]
181
+ if "frequency_penalty" in config:
182
+ llm_kwargs["frequency_penalty"] = config["frequency_penalty"]
183
+ if "presence_penalty" in config:
184
+ llm_kwargs["presence_penalty"] = config["presence_penalty"]
185
+
186
+ # Add request_timeout for models that need it
187
+ if "request_timeout" in config:
188
+ llm_kwargs["request_timeout"] = config["request_timeout"]
189
+
190
+ return ChatOpenAI(**llm_kwargs)
@@ -0,0 +1,7 @@
1
+ {
2
+ "path": "deepseek/deepseek-chat-v3.1",
3
+ "temperature": 0.2,
4
+ "max_tokens": 8192,
5
+ "streaming": true,
6
+ "top_p": 1.0
7
+ }
@@ -0,0 +1,8 @@
1
+ {
2
+ "path": "google/gemini-2.5-pro",
3
+ "temperature": 0.3,
4
+ "max_tokens": 2048,
5
+ "streaming": true,
6
+ "top_p": 0.9,
7
+ "request_timeout": 60
8
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "path": "openai/gpt-5",
3
+ "max_tokens": 3000,
4
+ "streaming": true,
5
+ "no_sampling_params": true
6
+ }
@@ -0,0 +1,7 @@
1
+ {
2
+ "path": "x-ai/grok-4.1-fast",
3
+ "temperature": 0.15,
4
+ "max_tokens": 2000,
5
+ "streaming": true,
6
+ "top_p": 0.9
7
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "path": "anthropic/claude-opus-4.5",
3
+ "temperature": 0.0,
4
+ "max_tokens": 16384,
5
+ "streaming": true
6
+ }
@@ -0,0 +1,7 @@
1
+ {
2
+ "path": "qwen/qwen3-coder",
3
+ "temperature": 0.15,
4
+ "max_tokens": 2048,
5
+ "streaming": true,
6
+ "top_p": 0.9
7
+ }
@@ -0,0 +1,6 @@
1
+ {
2
+ "path": "anthropic/claude-sonnet-4.5",
3
+ "temperature": 0.0,
4
+ "max_tokens": 16384,
5
+ "streaming": true
6
+ }
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "agentic-python-coder"
3
- version = "2.0.1"
3
+ version = "2.1.0"
4
4
  description = "A lightweight Python coding agent that writes, executes, and iterates on code through natural language instructions"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13,<3.14"
@@ -1,230 +0,0 @@
1
- """LLM configuration for OpenRouter."""
2
-
3
- import os
4
- from typing import Optional
5
- from langchain_openai import ChatOpenAI
6
- from dotenv import load_dotenv
7
- from pathlib import Path
8
-
9
- # Model aliases to full OpenRouter paths
10
- MODEL_REGISTRY = {
11
- "deepseek": "deepseek/deepseek-chat-v3.1",
12
- "sonnet": "anthropic/claude-sonnet-4.5",
13
- "opus": "anthropic/claude-opus-4.5",
14
- "default": "anthropic/claude-sonnet-4.5",
15
- "grok": "x-ai/grok-4.1-fast",
16
- "qwen": "qwen/qwen3-coder",
17
- "gemini": "google/gemini-2.5-pro",
18
- "gpt": "openai/gpt-5",
19
- }
20
-
21
- # Model-specific configurations
22
- MODEL_CONFIGS = {
23
- "deepseek/deepseek-chat-v3.1": {
24
- "temperature": 0.2, # Low for deterministic code generation with tool calls
25
- "max_tokens": 8192, # Beta API supports up to 8192
26
- "streaming": True,
27
- "context_window": 128000, # Full context window
28
- "top_p": 1.0, # Keep at 1.0 with low temperature
29
- "frequency_penalty": 0, # Don't use for coding
30
- "presence_penalty": 0, # Don't use for coding
31
- "model_kwargs": {
32
- "stream_options": {"include_usage": True},
33
- },
34
- },
35
- "anthropic/claude-sonnet-4.5": {
36
- "temperature": 0.0, # Default for deterministic output
37
- "streaming": True,
38
- "max_tokens": 16384, # Sonnet 4.5 supports up to 64K
39
- "model_kwargs": {"stream_options": {"include_usage": True}},
40
- },
41
- "anthropic/claude-opus-4.5": {
42
- "temperature": 0.0,
43
- "streaming": True,
44
- "max_tokens": 16384, # Opus 4.5 supports up to 32K output
45
- "model_kwargs": {"stream_options": {"include_usage": True}},
46
- },
47
- # Other model configurations
48
- "x-ai/grok-code-fast-1": {
49
- "temperature": 0.15,
50
- "max_tokens": 2000,
51
- "streaming": True,
52
- "context_window": 256000,
53
- "top_p": 0.9,
54
- # Note: Grok doesn't accept frequency_penalty or presence_penalty
55
- "model_kwargs": {
56
- "stream_options": {"include_usage": True}
57
- # Reasoning mode may be added later when supported
58
- },
59
- },
60
- "qwen/qwen3-coder": {
61
- "temperature": 0.15,
62
- "max_tokens": 2048,
63
- "streaming": True,
64
- "context_window": 256000,
65
- "top_p": 0.9,
66
- "model_kwargs": {
67
- "stream_options": {"include_usage": True}
68
- # Provider filtering would be added via headers if needed
69
- },
70
- },
71
- "google/gemini-2.5-pro": {
72
- "temperature": 0.3,
73
- "max_tokens": 2048, # Will be mapped to max_output_tokens
74
- "streaming": True,
75
- "context_window": 1048576,
76
- "top_p": 0.9,
77
- # Note: No top_k or set to 64 (Google's fixed value)
78
- "request_timeout": 60, # Handle slow responses on free tier
79
- "model_kwargs": {"stream_options": {"include_usage": True}},
80
- },
81
- "openai/gpt-5": {
82
- # NO temperature, top_p, or penalty parameters for GPT-5
83
- "max_tokens": 3000,
84
- "streaming": True,
85
- "context_window": 400000,
86
- "model_kwargs": {
87
- "stream_options": {"include_usage": True},
88
- "parallel_tool_calls": False,
89
- # Additional parameters can be added when supported
90
- },
91
- },
92
- }
93
-
94
- # Default configuration for unknown models
95
- DEFAULT_CONFIG = {
96
- "temperature": 0.0,
97
- "max_tokens": 4096,
98
- "streaming": True,
99
- "context_window": 32000,
100
- "model_kwargs": {"stream_options": {"include_usage": True}},
101
- }
102
-
103
- # Keep old constant for backward compatibility
104
- MODEL_STRING = "anthropic/claude-sonnet-4.5"
105
-
106
-
107
- def get_api_key() -> str:
108
- """Get API key from environment or config file.
109
-
110
- Returns:
111
- API key string
112
-
113
- Raises:
114
- ValueError: If no API key found
115
- """
116
- # Load from ~/.config/coder/.env
117
- config_env = Path.home() / ".config" / "coder" / ".env"
118
- if config_env.exists():
119
- load_dotenv(dotenv_path=config_env, override=True)
120
-
121
- # Get API key from environment
122
- api_key = os.getenv("OPENROUTER_API_KEY")
123
-
124
- if not api_key:
125
- print("Warning: No API key found. Set up with:")
126
- print(" mkdir -p ~/.config/coder")
127
- print(" echo 'OPENROUTER_API_KEY=sk-or-...' > ~/.config/coder/.env")
128
- print("\nOr use: --api-key sk-or-...")
129
- raise ValueError("OPENROUTER_API_KEY not configured")
130
-
131
- return api_key
132
-
133
-
134
- def get_openrouter_llm(
135
- model: str = "default",
136
- temperature: Optional[float] = None,
137
- api_key: Optional[str] = None,
138
- verbose: bool = False,
139
- ) -> ChatOpenAI:
140
- """Create a fully configured OpenRouter LLM instance.
141
- Special handling for GPT-5 which doesn't accept sampling parameters.
142
-
143
- Args:
144
- model: Model alias (e.g., "deepseek", "claude") or full path
145
- temperature: Optional temperature override
146
- api_key: Optional API key
147
- verbose: If True, print model info to console (default False for library use)
148
-
149
- Returns:
150
- Fully configured ChatOpenAI instance
151
-
152
- Raises:
153
- ValueError: If model alias is not recognized
154
- """
155
- # Handle direct model path (backward compatibility)
156
- if "/" in model:
157
- model_path = model
158
- # Get config for this path or use default
159
- config = MODEL_CONFIGS.get(model_path, DEFAULT_CONFIG.copy())
160
- else:
161
- # Resolve alias to full path
162
- if model not in MODEL_REGISTRY:
163
- available = ", ".join(sorted(MODEL_REGISTRY.keys()))
164
- raise ValueError(f"Unknown model: '{model}'. Available models: {available}")
165
-
166
- model_path = MODEL_REGISTRY[model]
167
-
168
- # Get hardcoded config for this model
169
- config = MODEL_CONFIGS.get(model_path, DEFAULT_CONFIG.copy())
170
-
171
- # Print model info only if verbose
172
- if verbose and model != "default":
173
- print(f"Using model: {model_path}")
174
- if os.getenv("CODER_VERBOSE"):
175
- # Special handling for GPT-5 which has no temperature
176
- if model_path == "openai/gpt-5":
177
- print(f" Max tokens: {config.get('max_tokens', 'default')}")
178
- print(f" Streaming: {config['streaming']}")
179
- else:
180
- print(f" Temperature: {config.get('temperature', 'default')}")
181
- print(f" Max tokens: {config.get('max_tokens', 'default')}")
182
- print(f" Streaming: {config['streaming']}")
183
-
184
- # Get API key
185
- if not api_key:
186
- api_key = get_api_key()
187
-
188
- # Create base kwargs
189
- llm_kwargs = {
190
- "model": model_path,
191
- "openai_api_key": api_key,
192
- "openai_api_base": "https://openrouter.ai/api/v1",
193
- "default_headers": {
194
- "HTTP-Referer": "https://github.com/szeider/agentic-python-coder",
195
- "X-Title": "Agentic Python Coder",
196
- },
197
- "streaming": config["streaming"],
198
- "model_kwargs": config.get("model_kwargs", {}),
199
- }
200
-
201
- # Special case for GPT-5: NO sampling parameters
202
- if model_path == "openai/gpt-5":
203
- # Only add max_tokens for GPT-5
204
- if "max_tokens" in config:
205
- llm_kwargs["max_tokens"] = config["max_tokens"]
206
- else:
207
- # All other models get standard parameters
208
- llm_kwargs["temperature"] = config.get("temperature", 0.0)
209
- if temperature is not None: # Allow override
210
- llm_kwargs["temperature"] = temperature
211
-
212
- # Add optional parameters
213
- if "max_tokens" in config:
214
- llm_kwargs["max_tokens"] = config["max_tokens"]
215
- if "top_p" in config:
216
- llm_kwargs["top_p"] = config["top_p"]
217
- if "top_k" in config:
218
- llm_kwargs["top_k"] = config["top_k"]
219
- if "frequency_penalty" in config:
220
- llm_kwargs["frequency_penalty"] = config["frequency_penalty"]
221
- if "presence_penalty" in config:
222
- llm_kwargs["presence_penalty"] = config["presence_penalty"]
223
-
224
- # Add request_timeout for models that need it (e.g., Gemini)
225
- if "request_timeout" in config:
226
- llm_kwargs["request_timeout"] = config["request_timeout"]
227
-
228
- llm = ChatOpenAI(**llm_kwargs)
229
-
230
- return llm