prompture 0.0.31.dev1__tar.gz → 0.0.32.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.github/scripts/update_docs_version.py +16 -21
  2. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/PKG-INFO +21 -3
  3. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/README.md +20 -2
  4. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/__init__.py +19 -0
  5. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/core.py +84 -0
  6. prompture-0.0.32.dev1/prompture/discovery.py +149 -0
  7. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture.egg-info/PKG-INFO +21 -3
  8. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture.egg-info/SOURCES.txt +1 -0
  9. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.env.copy +0 -0
  10. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.github/FUNDING.yml +0 -0
  11. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.github/scripts/update_wrapper_version.py +0 -0
  12. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.github/workflows/dev.yml +0 -0
  13. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.github/workflows/documentation.yml +0 -0
  14. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/.github/workflows/publish.yml +0 -0
  15. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/LICENSE +0 -0
  16. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/MANIFEST.in +0 -0
  17. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/VERSION +0 -0
  18. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/_static/custom.css +0 -0
  19. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/core.rst +0 -0
  20. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/drivers.rst +0 -0
  21. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/field_definitions.rst +0 -0
  22. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/index.rst +0 -0
  23. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/runner.rst +0 -0
  24. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/tools.rst +0 -0
  25. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/api/validator.rst +0 -0
  26. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/conf.py +0 -0
  27. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/contributing.rst +0 -0
  28. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/examples.rst +0 -0
  29. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/field_definitions_reference.rst +0 -0
  30. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/index.rst +0 -0
  31. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/installation.rst +0 -0
  32. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/quickstart.rst +0 -0
  33. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/docs/source/toon_input_guide.rst +0 -0
  34. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/README.md +0 -0
  35. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_json/README.md +0 -0
  36. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_json/llm_to_json/__init__.py +0 -0
  37. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_json/pyproject.toml +0 -0
  38. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_json/test.py +0 -0
  39. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_toon/README.md +0 -0
  40. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_toon/llm_to_toon/__init__.py +0 -0
  41. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_toon/pyproject.toml +0 -0
  42. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/packages/llm_to_toon/test.py +0 -0
  43. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/cli.py +0 -0
  44. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/driver.py +0 -0
  45. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/__init__.py +0 -0
  46. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/azure_driver.py +0 -0
  47. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/claude_driver.py +0 -0
  48. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/google_driver.py +0 -0
  49. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/grok_driver.py +0 -0
  50. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/groq_driver.py +0 -0
  51. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/hugging_driver.py +0 -0
  52. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/lmstudio_driver.py +0 -0
  53. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/local_http_driver.py +0 -0
  54. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/ollama_driver.py +0 -0
  55. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/openai_driver.py +0 -0
  56. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/drivers/openrouter_driver.py +0 -0
  57. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/field_definitions.py +0 -0
  58. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/runner.py +0 -0
  59. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/settings.py +0 -0
  60. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/tools.py +0 -0
  61. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture/validator.py +0 -0
  62. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture.egg-info/dependency_links.txt +0 -0
  63. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture.egg-info/entry_points.txt +0 -0
  64. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture.egg-info/requires.txt +0 -0
  65. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/prompture.egg-info/top_level.txt +0 -0
  66. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/requirements.txt +0 -0
  67. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/setup.cfg +0 -0
  68. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/setup.py +0 -0
  69. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/test.py +0 -0
  70. {prompture-0.0.31.dev1 → prompture-0.0.32.dev1}/test_version_diagnosis.py +0 -0
@@ -142,39 +142,34 @@ def update_index_rst(index_file, version):
142
142
  content = index_file.read_text(encoding='utf-8')
143
143
  lines = content.splitlines(keepends=True)
144
144
 
145
- # Pattern to match the version line (line 20, 0-indexed as 19)
145
+ # Pattern to match the version line
146
146
  pattern = re.compile(
147
147
  r'^(\s*Prompture is currently in development \(version )'
148
148
  r'[^)]+'
149
149
  r'(\)\. APIs may change between versions\.\s*)$'
150
150
  )
151
151
 
152
- # Update line 20 (index 19)
153
- if len(lines) >= 20:
154
- line_idx = 19 # Line 20 is at index 19
155
- original_line = lines[line_idx]
156
-
157
- # Check if the line matches the expected pattern
158
- if pattern.match(original_line):
152
+ updated = False
153
+ for i, line in enumerate(lines):
154
+ if pattern.match(line):
159
155
  # Replace with new version
160
156
  new_line = pattern.sub(
161
157
  rf'\g<1>{version}\g<2>',
162
- original_line
158
+ line
163
159
  )
164
- lines[line_idx] = new_line
165
-
166
- # Write back to file
167
- index_file.write_text(''.join(lines), encoding='utf-8')
168
- print(f"✓ Updated version in {index_file}")
169
- print(f" Old: {original_line.strip()}")
160
+ lines[i] = new_line
161
+ print(f"✓ Updated version in {index_file} at line {i+1}")
162
+ print(f" Old: {line.strip()}")
170
163
  print(f" New: {new_line.strip()}")
171
- return True
172
- else:
173
- print(f"✗ Line 20 does not match expected pattern", file=sys.stderr)
174
- print(f" Found: {original_line.strip()}", file=sys.stderr)
175
- return False
164
+ updated = True
165
+ break
166
+
167
+ if updated:
168
+ # Write back to file
169
+ index_file.write_text(''.join(lines), encoding='utf-8')
170
+ return True
176
171
  else:
177
- print(f"✗ File has fewer than 20 lines", file=sys.stderr)
172
+ print(f"✗ Version pattern not found in {index_file}", file=sys.stderr)
178
173
  return False
179
174
 
180
175
  except Exception as e:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.31.dev1
3
+ Version: 0.0.32.dev1
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Home-page: https://github.com/jhd3197/prompture
6
6
  Author: Juan Denis
@@ -61,8 +61,7 @@ Dynamic: summary
61
61
  - ✅ **Usage & cost** → Token + $ tracking on every call (`usage` from driver meta)
62
62
  - ✅ **AI cleanup** → Optional LLM pass to fix malformed JSON
63
63
  - ✅ **Batch testing** → Define suites and compare models (spec-driven)
64
- - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text (see [analysis](toon_token_analysis.md))
65
-
64
+ - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text
66
65
  <br>
67
66
 
68
67
  > [!TIP]
@@ -122,6 +121,25 @@ export LMSTUDIO_ENDPOINT=...
122
121
 
123
122
  ---
124
123
 
124
+ ## 🔍 Model Discovery
125
+
126
+ Prompture can auto-detect available models from your configured environment. This is especially useful for local setups (like Ollama) or when you want to see which models are available to your application.
127
+
128
+ ```python
129
+ from prompture import get_available_models
130
+
131
+ # Returns a list of strings like ["openai/gpt-4o", "ollama/llama3:latest", ...]
132
+ models = get_available_models()
133
+
134
+ for model in models:
135
+ print(f"Found: {model}")
136
+ ```
137
+
138
+ - **Static Drivers** (OpenAI, Claude, Azure, etc.): Returns models listed in the driver's `MODEL_PRICING` configuration if the driver is configured (API key present).
139
+ - **Dynamic Drivers** (Ollama): Queries the local endpoint (e.g., `http://localhost:11434/api/tags`) to fetch currently installed models.
140
+
141
+ ---
142
+
125
143
  ## Quickstart: Pydantic in one line (auto driver)
126
144
 
127
145
  Use `extract_with_model` for a single LLM call that fills your Pydantic model.
@@ -18,8 +18,7 @@
18
18
  - ✅ **Usage & cost** → Token + $ tracking on every call (`usage` from driver meta)
19
19
  - ✅ **AI cleanup** → Optional LLM pass to fix malformed JSON
20
20
  - ✅ **Batch testing** → Define suites and compare models (spec-driven)
21
- - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text (see [analysis](toon_token_analysis.md))
22
-
21
+ - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text
23
22
  <br>
24
23
 
25
24
  > [!TIP]
@@ -79,6 +78,25 @@ export LMSTUDIO_ENDPOINT=...
79
78
 
80
79
  ---
81
80
 
81
+ ## 🔍 Model Discovery
82
+
83
+ Prompture can auto-detect available models from your configured environment. This is especially useful for local setups (like Ollama) or when you want to see which models are available to your application.
84
+
85
+ ```python
86
+ from prompture import get_available_models
87
+
88
+ # Returns a list of strings like ["openai/gpt-4o", "ollama/llama3:latest", ...]
89
+ models = get_available_models()
90
+
91
+ for model in models:
92
+ print(f"Found: {model}")
93
+ ```
94
+
95
+ - **Static Drivers** (OpenAI, Claude, Azure, etc.): Returns models listed in the driver's `MODEL_PRICING` configuration if the driver is configured (API key present).
96
+ - **Dynamic Drivers** (Ollama): Queries the local endpoint (e.g., `http://localhost:11434/api/tags`) to fetch currently installed models.
97
+
98
+ ---
99
+
82
100
  ## Quickstart: Pydantic in one line (auto driver)
83
101
 
84
102
  Use `extract_with_model` for a single LLM call that fills your Pydantic model.
@@ -11,7 +11,9 @@ from .core import (
11
11
  stepwise_extract_with_model,
12
12
  extract_from_data,
13
13
  extract_from_pandas,
14
+ render_output,
14
15
  )
16
+ from .drivers import get_driver, get_driver_for_model, OpenAIDriver, LocalHTTPDriver, OllamaDriver, ClaudeDriver, LMStudioDriver, AzureDriver, GoogleDriver, GroqDriver, OpenRouterDriver, GrokDriver
15
17
  from .tools import clean_json_text, clean_toon_text
16
18
  from .field_definitions import (
17
19
  FIELD_DEFINITIONS, get_field_definition, get_required_fields, get_field_names,
@@ -21,6 +23,7 @@ from .field_definitions import (
21
23
  )
22
24
  from .runner import run_suite_from_spec
23
25
  from .validator import validate_against_schema
26
+ from .discovery import get_available_models
24
27
 
25
28
  # Load environment variables from .env file
26
29
  load_dotenv()
@@ -54,6 +57,7 @@ __all__ = [
54
57
  # TOON Data Extraction Functions
55
58
  "extract_from_data",
56
59
  "extract_from_pandas",
60
+ "render_output",
57
61
  # Field Definitions
58
62
  "FIELD_DEFINITIONS",
59
63
  "get_field_definition",
@@ -70,4 +74,19 @@ __all__ = [
70
74
  # Enum Field Support
71
75
  "validate_enum_value",
72
76
  "normalize_enum_value",
77
+ # Drivers
78
+ "get_driver",
79
+ "get_driver_for_model",
80
+ "OpenAIDriver",
81
+ "LocalHTTPDriver",
82
+ "OllamaDriver",
83
+ "ClaudeDriver",
84
+ "LMStudioDriver",
85
+ "AzureDriver",
86
+ "GoogleDriver",
87
+ "GroqDriver",
88
+ "OpenRouterDriver",
89
+ "GrokDriver",
90
+ # Discovery
91
+ "get_available_models",
73
92
  ]
@@ -129,6 +129,90 @@ def clean_json_text_with_ai(driver: Driver, text: str, model_name: str = "", opt
129
129
  cleaned = clean_json_text(raw)
130
130
  return cleaned
131
131
 
132
+
133
+ def render_output(
134
+ driver: Driver,
135
+ content_prompt: str,
136
+ output_format: Literal["text", "html", "markdown"] = "text",
137
+ model_name: str = "",
138
+ options: Dict[str, Any] = {},
139
+ ) -> Dict[str, Any]:
140
+ """Sends a prompt to the driver and returns the raw output in the requested format.
141
+
142
+ This function is designed for "no fluff" output, instructing the LLM to return
143
+ only the requested content without conversational filler or markdown fences
144
+ (unless markdown is requested).
145
+
146
+ Args:
147
+ driver: Adapter that implements generate(prompt, options).
148
+ content_prompt: Main prompt content.
149
+ output_format: Desired format ("text", "html", "markdown").
150
+ model_name: Optional model identifier used in usage metadata.
151
+ options: Additional options to pass to the driver.
152
+
153
+ Returns:
154
+ A dictionary containing:
155
+ - text: the raw text output.
156
+ - usage: token usage and cost information from the driver's meta object.
157
+ - output_format: the format of the output.
158
+
159
+ Raises:
160
+ ValueError: If an unsupported output format is provided.
161
+ """
162
+ if output_format not in ("text", "html", "markdown"):
163
+ raise ValueError(f"Unsupported output_format '{output_format}'. Use 'text', 'html', or 'markdown'.")
164
+
165
+ instruct = ""
166
+ if output_format == "text":
167
+ instruct = (
168
+ "Return ONLY the raw text content. Do not use markdown formatting, "
169
+ "code fences, or conversational filler. Just the text."
170
+ )
171
+ elif output_format == "html":
172
+ instruct = (
173
+ "Return ONLY valid HTML code. Do not wrap it in markdown code fences "
174
+ "(like ```html ... ```). Do not include conversational filler."
175
+ )
176
+ elif output_format == "markdown":
177
+ instruct = (
178
+ "Return valid markdown content. You may use standard markdown formatting."
179
+ )
180
+
181
+ full_prompt = f"{content_prompt}\n\nSYSTEM INSTRUCTION: {instruct}"
182
+
183
+ # If specific options are needed for certain formats, they could be added here
184
+ # For now, we pass options through
185
+
186
+ resp = driver.generate(full_prompt, options)
187
+ raw = resp.get("text", "")
188
+
189
+ # Clean up potential markdown fences if the model disobeyed for text/html
190
+ if output_format in ("text", "html"):
191
+ # Simple cleanup for common fences if they appear despite instructions
192
+ cleaned = raw.strip()
193
+ if cleaned.startswith("```") and cleaned.endswith("```"):
194
+ # Remove first line (fence + optional language) and last line (fence)
195
+ lines = cleaned.splitlines()
196
+ if len(lines) >= 2:
197
+ cleaned = "\n".join(lines[1:-1])
198
+ raw = cleaned
199
+
200
+ usage = {
201
+ **resp.get("meta", {}),
202
+ "raw_response": resp,
203
+ "total_tokens": resp.get("meta", {}).get("total_tokens", 0),
204
+ "prompt_tokens": resp.get("meta", {}).get("prompt_tokens", 0),
205
+ "completion_tokens": resp.get("meta", {}).get("completion_tokens", 0),
206
+ "cost": resp.get("meta", {}).get("cost", 0.0),
207
+ "model_name": model_name or getattr(driver, "model", "")
208
+ }
209
+
210
+ return {
211
+ "text": raw,
212
+ "usage": usage,
213
+ "output_format": output_format
214
+ }
215
+
132
216
  def ask_for_json(
133
217
  driver: Driver,
134
218
  content_prompt: str,
@@ -0,0 +1,149 @@
1
+ """Discovery module for auto-detecting available models."""
2
+ import os
3
+ import requests
4
+ import logging
5
+ from typing import List, Dict, Any, Set
6
+
7
+ from .drivers import (
8
+ DRIVER_REGISTRY,
9
+ OpenAIDriver,
10
+ AzureDriver,
11
+ ClaudeDriver,
12
+ GoogleDriver,
13
+ GroqDriver,
14
+ OpenRouterDriver,
15
+ GrokDriver,
16
+ OllamaDriver,
17
+ LMStudioDriver,
18
+ LocalHTTPDriver,
19
+ )
20
+ from .settings import settings
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ def get_available_models() -> List[str]:
25
+ """
26
+ Auto-detects all available models based on configured drivers and environment variables.
27
+
28
+ Iterates through supported providers and checks if they are configured (e.g. API key present).
29
+ For static drivers, returns models from their MODEL_PRICING keys.
30
+ For dynamic drivers (like Ollama), attempts to fetch available models from the endpoint.
31
+
32
+ Returns:
33
+ A list of unique model strings in the format "provider/model_id".
34
+ """
35
+ available_models: Set[str] = set()
36
+
37
+ # Map of provider name to driver class
38
+ # We need to map the registry keys to the actual classes to check MODEL_PRICING
39
+ # and instantiate for dynamic checks if needed.
40
+ provider_classes = {
41
+ "openai": OpenAIDriver,
42
+ "azure": AzureDriver,
43
+ "claude": ClaudeDriver,
44
+ "google": GoogleDriver,
45
+ "groq": GroqDriver,
46
+ "openrouter": OpenRouterDriver,
47
+ "grok": GrokDriver,
48
+ "ollama": OllamaDriver,
49
+ "lmstudio": LMStudioDriver,
50
+ "local_http": LocalHTTPDriver,
51
+ }
52
+
53
+ for provider, driver_cls in provider_classes.items():
54
+ try:
55
+ # 1. Check if the provider is configured (has API key or endpoint)
56
+ # We can check this by looking at the settings or env vars that the driver uses.
57
+ # A simple way is to try to instantiate it with defaults, but that might fail if keys are missing.
58
+ # Instead, let's check the specific requirements for each known provider.
59
+
60
+ is_configured = False
61
+
62
+ if provider == "openai":
63
+ if settings.openai_api_key or os.getenv("OPENAI_API_KEY"):
64
+ is_configured = True
65
+ elif provider == "azure":
66
+ if (settings.azure_api_key or os.getenv("AZURE_API_KEY")) and \
67
+ (settings.azure_api_endpoint or os.getenv("AZURE_API_ENDPOINT")) and \
68
+ (settings.azure_deployment_id or os.getenv("AZURE_DEPLOYMENT_ID")):
69
+ is_configured = True
70
+ elif provider == "claude":
71
+ if settings.claude_api_key or os.getenv("CLAUDE_API_KEY"):
72
+ is_configured = True
73
+ elif provider == "google":
74
+ if settings.google_api_key or os.getenv("GOOGLE_API_KEY"):
75
+ is_configured = True
76
+ elif provider == "groq":
77
+ if settings.groq_api_key or os.getenv("GROQ_API_KEY"):
78
+ is_configured = True
79
+ elif provider == "openrouter":
80
+ if settings.openrouter_api_key or os.getenv("OPENROUTER_API_KEY"):
81
+ is_configured = True
82
+ elif provider == "grok":
83
+ if settings.grok_api_key or os.getenv("GROK_API_KEY"):
84
+ is_configured = True
85
+ elif provider == "ollama":
86
+ # Ollama is always considered "configured" as it defaults to localhost
87
+ # We will check connectivity later
88
+ is_configured = True
89
+ elif provider == "lmstudio":
90
+ # LM Studio is similar to Ollama, defaults to localhost
91
+ is_configured = True
92
+ elif provider == "local_http":
93
+ if settings.local_http_endpoint or os.getenv("LOCAL_HTTP_ENDPOINT"):
94
+ is_configured = True
95
+
96
+ if not is_configured:
97
+ continue
98
+
99
+ # 2. Static Detection: Get models from MODEL_PRICING
100
+ if hasattr(driver_cls, "MODEL_PRICING"):
101
+ pricing = driver_cls.MODEL_PRICING
102
+ for model_id in pricing.keys():
103
+ # Skip "default" or generic keys if they exist
104
+ if model_id == "default":
105
+ continue
106
+
107
+ # For Azure, the model_id in pricing is usually the base model name,
108
+ # but the user needs to use the deployment ID.
109
+ # However, our Azure driver implementation uses the deployment_id from init
110
+ # as the "model" for the request, but expects the user to pass a model name
111
+ # that maps to pricing?
112
+ # Looking at AzureDriver:
113
+ # kwargs = {"model": self.deployment_id, ...}
114
+ # model = options.get("model", self.model) -> used for pricing lookup
115
+ # So we should list the keys in MODEL_PRICING as available "models"
116
+ # even though for Azure specifically it's a bit weird because of deployment IDs.
117
+ # But for general discovery, listing supported models is correct.
118
+
119
+ available_models.add(f"{provider}/{model_id}")
120
+
121
+ # 3. Dynamic Detection: Specific logic for Ollama
122
+ if provider == "ollama":
123
+ try:
124
+ endpoint = settings.ollama_endpoint or os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434/api/generate")
125
+ # We need the base URL for tags, usually http://localhost:11434/api/tags
126
+ # The configured endpoint might be .../api/generate or .../api/chat
127
+ base_url = endpoint.split("/api/")[0]
128
+ tags_url = f"{base_url}/api/tags"
129
+
130
+ resp = requests.get(tags_url, timeout=2)
131
+ if resp.status_code == 200:
132
+ data = resp.json()
133
+ models = data.get("models", [])
134
+ for model in models:
135
+ name = model.get("name")
136
+ if name:
137
+ # Ollama model names often include tags like "llama3:latest"
138
+ # We can keep them as is.
139
+ available_models.add(f"ollama/{name}")
140
+ except Exception as e:
141
+ logger.debug(f"Failed to fetch Ollama models: {e}")
142
+
143
+ # Future: Add dynamic detection for LM Studio if they have an endpoint for listing models
144
+
145
+ except Exception as e:
146
+ logger.warning(f"Error detecting models for provider {provider}: {e}")
147
+ continue
148
+
149
+ return sorted(list(available_models))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.31.dev1
3
+ Version: 0.0.32.dev1
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Home-page: https://github.com/jhd3197/prompture
6
6
  Author: Juan Denis
@@ -61,8 +61,7 @@ Dynamic: summary
61
61
  - ✅ **Usage & cost** → Token + $ tracking on every call (`usage` from driver meta)
62
62
  - ✅ **AI cleanup** → Optional LLM pass to fix malformed JSON
63
63
  - ✅ **Batch testing** → Define suites and compare models (spec-driven)
64
- - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text (see [analysis](toon_token_analysis.md))
65
-
64
+ - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text
66
65
  <br>
67
66
 
68
67
  > [!TIP]
@@ -122,6 +121,25 @@ export LMSTUDIO_ENDPOINT=...
122
121
 
123
122
  ---
124
123
 
124
+ ## 🔍 Model Discovery
125
+
126
+ Prompture can auto-detect available models from your configured environment. This is especially useful for local setups (like Ollama) or when you want to see which models are available to your application.
127
+
128
+ ```python
129
+ from prompture import get_available_models
130
+
131
+ # Returns a list of strings like ["openai/gpt-4o", "ollama/llama3:latest", ...]
132
+ models = get_available_models()
133
+
134
+ for model in models:
135
+ print(f"Found: {model}")
136
+ ```
137
+
138
+ - **Static Drivers** (OpenAI, Claude, Azure, etc.): Returns models listed in the driver's `MODEL_PRICING` configuration if the driver is configured (API key present).
139
+ - **Dynamic Drivers** (Ollama): Queries the local endpoint (e.g., `http://localhost:11434/api/tags`) to fetch currently installed models.
140
+
141
+ ---
142
+
125
143
  ## Quickstart: Pydantic in one line (auto driver)
126
144
 
127
145
  Use `extract_with_model` for a single LLM call that fills your Pydantic model.
@@ -41,6 +41,7 @@ packages/llm_to_toon/llm_to_toon/__init__.py
41
41
  prompture/__init__.py
42
42
  prompture/cli.py
43
43
  prompture/core.py
44
+ prompture/discovery.py
44
45
  prompture/driver.py
45
46
  prompture/field_definitions.py
46
47
  prompture/runner.py
File without changes
File without changes
File without changes