prompture 0.0.47.dev1__py3-none-any.whl → 0.0.47.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -122,8 +122,17 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
122
122
  "model_name": model,
123
123
  }
124
124
 
125
- text = resp["choices"][0]["message"]["content"]
126
- return {"text": text, "meta": meta}
125
+ message = resp["choices"][0]["message"]
126
+ text = message.get("content") or ""
127
+ reasoning_content = message.get("reasoning_content")
128
+
129
+ if not text and reasoning_content:
130
+ text = reasoning_content
131
+
132
+ result: dict[str, Any] = {"text": text, "meta": meta}
133
+ if reasoning_content is not None:
134
+ result["reasoning_content"] = reasoning_content
135
+ return result
127
136
 
128
137
  # ------------------------------------------------------------------
129
138
  # Tool use
@@ -196,18 +205,23 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
196
205
  args = json.loads(tc["function"]["arguments"])
197
206
  except (json.JSONDecodeError, TypeError):
198
207
  args = {}
199
- tool_calls_out.append({
200
- "id": tc["id"],
201
- "name": tc["function"]["name"],
202
- "arguments": args,
203
- })
208
+ tool_calls_out.append(
209
+ {
210
+ "id": tc["id"],
211
+ "name": tc["function"]["name"],
212
+ "arguments": args,
213
+ }
214
+ )
204
215
 
205
- return {
216
+ result: dict[str, Any] = {
206
217
  "text": text,
207
218
  "meta": meta,
208
219
  "tool_calls": tool_calls_out,
209
220
  "stop_reason": stop_reason,
210
221
  }
222
+ if choice["message"].get("reasoning_content") is not None:
223
+ result["reasoning_content"] = choice["message"]["reasoning_content"]
224
+ return result
211
225
 
212
226
  # ------------------------------------------------------------------
213
227
  # Streaming
@@ -238,21 +252,25 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
238
252
  data["temperature"] = opts["temperature"]
239
253
 
240
254
  full_text = ""
255
+ full_reasoning = ""
241
256
  prompt_tokens = 0
242
257
  completion_tokens = 0
243
258
 
244
- async with httpx.AsyncClient() as client, client.stream(
245
- "POST",
246
- f"{self.base_url}/chat/completions",
247
- headers=self.headers,
248
- json=data,
249
- timeout=120,
250
- ) as response:
259
+ async with (
260
+ httpx.AsyncClient() as client,
261
+ client.stream(
262
+ "POST",
263
+ f"{self.base_url}/chat/completions",
264
+ headers=self.headers,
265
+ json=data,
266
+ timeout=120,
267
+ ) as response,
268
+ ):
251
269
  response.raise_for_status()
252
270
  async for line in response.aiter_lines():
253
271
  if not line or not line.startswith("data: "):
254
272
  continue
255
- payload = line[len("data: "):]
273
+ payload = line[len("data: ") :]
256
274
  if payload.strip() == "[DONE]":
257
275
  break
258
276
  try:
@@ -270,6 +288,11 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
270
288
  if choices:
271
289
  delta = choices[0].get("delta", {})
272
290
  content = delta.get("content", "")
291
+ reasoning_chunk = delta.get("reasoning_content") or ""
292
+ if reasoning_chunk:
293
+ full_reasoning += reasoning_chunk
294
+ if not content and reasoning_chunk:
295
+ content = reasoning_chunk
273
296
  if content:
274
297
  full_text += content
275
298
  yield {"type": "delta", "text": content}
@@ -277,7 +300,7 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
277
300
  total_tokens = prompt_tokens + completion_tokens
278
301
  total_cost = self._calculate_cost("openrouter", model, prompt_tokens, completion_tokens)
279
302
 
280
- yield {
303
+ done_chunk: dict[str, Any] = {
281
304
  "type": "done",
282
305
  "text": full_text,
283
306
  "meta": {
@@ -289,3 +312,6 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
289
312
  "model_name": model,
290
313
  },
291
314
  }
315
+ if full_reasoning:
316
+ done_chunk["reasoning_content"] = full_reasoning
317
+ yield done_chunk
@@ -154,8 +154,17 @@ class GrokDriver(CostMixin, Driver):
154
154
  "model_name": model,
155
155
  }
156
156
 
157
- text = resp["choices"][0]["message"]["content"]
158
- return {"text": text, "meta": meta}
157
+ message = resp["choices"][0]["message"]
158
+ text = message.get("content") or ""
159
+ reasoning_content = message.get("reasoning_content")
160
+
161
+ if not text and reasoning_content:
162
+ text = reasoning_content
163
+
164
+ result: dict[str, Any] = {"text": text, "meta": meta}
165
+ if reasoning_content is not None:
166
+ result["reasoning_content"] = reasoning_content
167
+ return result
159
168
 
160
169
  # ------------------------------------------------------------------
161
170
  # Tool use
@@ -227,15 +236,20 @@ class GrokDriver(CostMixin, Driver):
227
236
  args = json.loads(tc["function"]["arguments"])
228
237
  except (json.JSONDecodeError, TypeError):
229
238
  args = {}
230
- tool_calls_out.append({
231
- "id": tc["id"],
232
- "name": tc["function"]["name"],
233
- "arguments": args,
234
- })
235
-
236
- return {
239
+ tool_calls_out.append(
240
+ {
241
+ "id": tc["id"],
242
+ "name": tc["function"]["name"],
243
+ "arguments": args,
244
+ }
245
+ )
246
+
247
+ result: dict[str, Any] = {
237
248
  "text": text,
238
249
  "meta": meta,
239
250
  "tool_calls": tool_calls_out,
240
251
  "stop_reason": stop_reason,
241
252
  }
253
+ if choice["message"].get("reasoning_content") is not None:
254
+ result["reasoning_content"] = choice["message"]["reasoning_content"]
255
+ return result
@@ -122,8 +122,16 @@ class GroqDriver(CostMixin, Driver):
122
122
  }
123
123
 
124
124
  # Extract generated text
125
- text = resp.choices[0].message.content
126
- return {"text": text, "meta": meta}
125
+ text = resp.choices[0].message.content or ""
126
+ reasoning_content = getattr(resp.choices[0].message, "reasoning_content", None)
127
+
128
+ if not text and reasoning_content:
129
+ text = reasoning_content
130
+
131
+ result: dict[str, Any] = {"text": text, "meta": meta}
132
+ if reasoning_content is not None:
133
+ result["reasoning_content"] = reasoning_content
134
+ return result
127
135
 
128
136
  # ------------------------------------------------------------------
129
137
  # Tool use
@@ -186,15 +194,21 @@ class GroqDriver(CostMixin, Driver):
186
194
  args = json.loads(tc.function.arguments)
187
195
  except (json.JSONDecodeError, TypeError):
188
196
  args = {}
189
- tool_calls_out.append({
190
- "id": tc.id,
191
- "name": tc.function.name,
192
- "arguments": args,
193
- })
194
-
195
- return {
197
+ tool_calls_out.append(
198
+ {
199
+ "id": tc.id,
200
+ "name": tc.function.name,
201
+ "arguments": args,
202
+ }
203
+ )
204
+
205
+ result: dict[str, Any] = {
196
206
  "text": text,
197
207
  "meta": meta,
198
208
  "tool_calls": tool_calls_out,
199
209
  "stop_reason": stop_reason,
200
210
  }
211
+ reasoning_content = getattr(choice.message, "reasoning_content", None)
212
+ if reasoning_content is not None:
213
+ result["reasoning_content"] = reasoning_content
214
+ return result
@@ -123,7 +123,13 @@ class LMStudioDriver(Driver):
123
123
  raise RuntimeError(f"LM Studio request failed: {e}") from e
124
124
 
125
125
  # Extract text
126
- text = response_data["choices"][0]["message"]["content"]
126
+ message = response_data["choices"][0]["message"]
127
+ text = message.get("content") or ""
128
+ reasoning_content = message.get("reasoning_content")
129
+
130
+ # Reasoning models (e.g. DeepSeek R1) may return content in reasoning_content
131
+ if not text and reasoning_content:
132
+ text = reasoning_content
127
133
 
128
134
  # Meta info
129
135
  usage = response_data.get("usage", {})
@@ -140,7 +146,10 @@ class LMStudioDriver(Driver):
140
146
  "model_name": merged_options.get("model", self.model),
141
147
  }
142
148
 
143
- return {"text": text, "meta": meta}
149
+ result: dict[str, Any] = {"text": text, "meta": meta}
150
+ if reasoning_content is not None:
151
+ result["reasoning_content"] = reasoning_content
152
+ return result
144
153
 
145
154
  # -- Model management (LM Studio 0.4.0+) ----------------------------------
146
155
 
@@ -228,10 +228,11 @@ class MoonshotDriver(CostMixin, Driver):
228
228
 
229
229
  message = resp["choices"][0]["message"]
230
230
  text = message.get("content") or ""
231
+ reasoning_content = message.get("reasoning_content")
231
232
 
232
233
  # Reasoning models may return content in reasoning_content when content is empty
233
- if not text and message.get("reasoning_content"):
234
- text = message["reasoning_content"]
234
+ if not text and reasoning_content:
235
+ text = reasoning_content
235
236
 
236
237
  # Structured output fallback: if we used json_schema mode and got an
237
238
  # empty response, retry with json_object mode and schema in the prompt.
@@ -275,8 +276,9 @@ class MoonshotDriver(CostMixin, Driver):
275
276
  resp = fb_resp
276
277
  fb_message = fb_resp["choices"][0]["message"]
277
278
  text = fb_message.get("content") or ""
278
- if not text and fb_message.get("reasoning_content"):
279
- text = fb_message["reasoning_content"]
279
+ reasoning_content = fb_message.get("reasoning_content")
280
+ if not text and reasoning_content:
281
+ text = reasoning_content
280
282
 
281
283
  total_cost = self._calculate_cost("moonshot", model, prompt_tokens, completion_tokens)
282
284
 
@@ -289,7 +291,10 @@ class MoonshotDriver(CostMixin, Driver):
289
291
  "model_name": model,
290
292
  }
291
293
 
292
- return {"text": text, "meta": meta}
294
+ result: dict[str, Any] = {"text": text, "meta": meta}
295
+ if reasoning_content is not None:
296
+ result["reasoning_content"] = reasoning_content
297
+ return result
293
298
 
294
299
  # ------------------------------------------------------------------
295
300
  # Tool use
@@ -364,11 +369,12 @@ class MoonshotDriver(CostMixin, Driver):
364
369
  }
365
370
 
366
371
  choice = resp["choices"][0]
367
- text = choice["message"].get("content") or ""
372
+ message = choice["message"]
373
+ text = message.get("content") or ""
368
374
  stop_reason = choice.get("finish_reason")
369
375
 
370
376
  tool_calls_out: list[dict[str, Any]] = []
371
- for tc in choice["message"].get("tool_calls", []):
377
+ for tc in message.get("tool_calls", []):
372
378
  try:
373
379
  args = json.loads(tc["function"]["arguments"])
374
380
  except (json.JSONDecodeError, TypeError):
@@ -381,13 +387,21 @@ class MoonshotDriver(CostMixin, Driver):
381
387
  }
382
388
  )
383
389
 
384
- return {
390
+ result: dict[str, Any] = {
385
391
  "text": text,
386
392
  "meta": meta,
387
393
  "tool_calls": tool_calls_out,
388
394
  "stop_reason": stop_reason,
389
395
  }
390
396
 
397
+ # Preserve reasoning_content for reasoning models so the
398
+ # conversation loop can include it when sending the assistant
399
+ # message back (Moonshot requires it on subsequent requests).
400
+ if message.get("reasoning_content") is not None:
401
+ result["reasoning_content"] = message["reasoning_content"]
402
+
403
+ return result
404
+
391
405
  # ------------------------------------------------------------------
392
406
  # Streaming
393
407
  # ------------------------------------------------------------------
@@ -430,6 +444,7 @@ class MoonshotDriver(CostMixin, Driver):
430
444
  response.raise_for_status()
431
445
 
432
446
  full_text = ""
447
+ full_reasoning = ""
433
448
  prompt_tokens = 0
434
449
  completion_tokens = 0
435
450
 
@@ -453,9 +468,11 @@ class MoonshotDriver(CostMixin, Driver):
453
468
  if choices:
454
469
  delta = choices[0].get("delta", {})
455
470
  content = delta.get("content") or ""
456
- # Reasoning models stream thinking via reasoning_content
457
- if not content:
458
- content = delta.get("reasoning_content") or ""
471
+ reasoning_chunk = delta.get("reasoning_content") or ""
472
+ if reasoning_chunk:
473
+ full_reasoning += reasoning_chunk
474
+ if not content and reasoning_chunk:
475
+ content = reasoning_chunk
459
476
  if content:
460
477
  full_text += content
461
478
  yield {"type": "delta", "text": content}
@@ -463,7 +480,7 @@ class MoonshotDriver(CostMixin, Driver):
463
480
  total_tokens = prompt_tokens + completion_tokens
464
481
  total_cost = self._calculate_cost("moonshot", model, prompt_tokens, completion_tokens)
465
482
 
466
- yield {
483
+ done_chunk: dict[str, Any] = {
467
484
  "type": "done",
468
485
  "text": full_text,
469
486
  "meta": {
@@ -475,3 +492,6 @@ class MoonshotDriver(CostMixin, Driver):
475
492
  "model_name": model,
476
493
  },
477
494
  }
495
+ if full_reasoning:
496
+ done_chunk["reasoning_content"] = full_reasoning
497
+ yield done_chunk
@@ -181,8 +181,18 @@ class OpenRouterDriver(CostMixin, Driver):
181
181
  "model_name": model,
182
182
  }
183
183
 
184
- text = resp["choices"][0]["message"]["content"]
185
- return {"text": text, "meta": meta}
184
+ message = resp["choices"][0]["message"]
185
+ text = message.get("content") or ""
186
+ reasoning_content = message.get("reasoning_content")
187
+
188
+ # Reasoning models may return content in reasoning_content when content is empty
189
+ if not text and reasoning_content:
190
+ text = reasoning_content
191
+
192
+ result: dict[str, Any] = {"text": text, "meta": meta}
193
+ if reasoning_content is not None:
194
+ result["reasoning_content"] = reasoning_content
195
+ return result
186
196
 
187
197
  # ------------------------------------------------------------------
188
198
  # Tool use
@@ -257,18 +267,23 @@ class OpenRouterDriver(CostMixin, Driver):
257
267
  args = json.loads(tc["function"]["arguments"])
258
268
  except (json.JSONDecodeError, TypeError):
259
269
  args = {}
260
- tool_calls_out.append({
261
- "id": tc["id"],
262
- "name": tc["function"]["name"],
263
- "arguments": args,
264
- })
270
+ tool_calls_out.append(
271
+ {
272
+ "id": tc["id"],
273
+ "name": tc["function"]["name"],
274
+ "arguments": args,
275
+ }
276
+ )
265
277
 
266
- return {
278
+ result: dict[str, Any] = {
267
279
  "text": text,
268
280
  "meta": meta,
269
281
  "tool_calls": tool_calls_out,
270
282
  "stop_reason": stop_reason,
271
283
  }
284
+ if choice["message"].get("reasoning_content") is not None:
285
+ result["reasoning_content"] = choice["message"]["reasoning_content"]
286
+ return result
272
287
 
273
288
  # ------------------------------------------------------------------
274
289
  # Streaming
@@ -311,13 +326,14 @@ class OpenRouterDriver(CostMixin, Driver):
311
326
  response.raise_for_status()
312
327
 
313
328
  full_text = ""
329
+ full_reasoning = ""
314
330
  prompt_tokens = 0
315
331
  completion_tokens = 0
316
332
 
317
333
  for line in response.iter_lines(decode_unicode=True):
318
334
  if not line or not line.startswith("data: "):
319
335
  continue
320
- payload = line[len("data: "):]
336
+ payload = line[len("data: ") :]
321
337
  if payload.strip() == "[DONE]":
322
338
  break
323
339
  try:
@@ -335,6 +351,11 @@ class OpenRouterDriver(CostMixin, Driver):
335
351
  if choices:
336
352
  delta = choices[0].get("delta", {})
337
353
  content = delta.get("content", "")
354
+ reasoning_chunk = delta.get("reasoning_content") or ""
355
+ if reasoning_chunk:
356
+ full_reasoning += reasoning_chunk
357
+ if not content and reasoning_chunk:
358
+ content = reasoning_chunk
338
359
  if content:
339
360
  full_text += content
340
361
  yield {"type": "delta", "text": content}
@@ -342,7 +363,7 @@ class OpenRouterDriver(CostMixin, Driver):
342
363
  total_tokens = prompt_tokens + completion_tokens
343
364
  total_cost = self._calculate_cost("openrouter", model, prompt_tokens, completion_tokens)
344
365
 
345
- yield {
366
+ done_chunk: dict[str, Any] = {
346
367
  "type": "done",
347
368
  "text": full_text,
348
369
  "meta": {
@@ -354,3 +375,6 @@ class OpenRouterDriver(CostMixin, Driver):
354
375
  "model_name": model,
355
376
  },
356
377
  }
378
+ if full_reasoning:
379
+ done_chunk["reasoning_content"] = full_reasoning
380
+ yield done_chunk
@@ -0,0 +1,115 @@
1
+ """Prompt-based tool calling for drivers without native tool use support.
2
+
3
+ When a driver lacks ``supports_tool_use`` the conversation classes can
4
+ fall back to *simulated* tool calling: the available tools are described
5
+ in the system prompt, the model is asked to respond with a structured
6
+ JSON object (either a tool call or a final answer), and Prompture
7
+ parses + dispatches accordingly.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ import logging
14
+ from typing import Any
15
+
16
+ from .tools import clean_json_text
17
+ from .tools_schema import ToolRegistry
18
+
19
+ logger = logging.getLogger("prompture.simulated_tools")
20
+
21
+
22
+ def build_tool_prompt(tools: ToolRegistry) -> str:
23
+ """Build a plain-text prompt section describing all registered tools.
24
+
25
+ The returned string should be appended to the system prompt so the
26
+ model knows which tools are available and how to call them.
27
+ """
28
+ lines = [
29
+ "You have access to the following tools:",
30
+ "",
31
+ tools.to_prompt_format(),
32
+ "",
33
+ "To use a tool, respond with ONLY a JSON object in this exact format:",
34
+ '{"type": "tool_call", "name": "<tool_name>", "arguments": {<args>}}',
35
+ "",
36
+ "When you have the final answer (after using tools or if no tool is needed), "
37
+ "respond with ONLY a JSON object in this format:",
38
+ '{"type": "final_answer", "content": "<your answer>"}',
39
+ "",
40
+ "IMPORTANT: Your entire response must be a single JSON object. "
41
+ "Do not include any other text, markdown, or explanation outside the JSON.",
42
+ ]
43
+ return "\n".join(lines)
44
+
45
+
46
+ def parse_simulated_response(text: str, tools: ToolRegistry) -> dict[str, Any]:
47
+ """Parse the model's response into a tool call or final answer dict.
48
+
49
+ Returns one of:
50
+ - ``{"type": "tool_call", "name": str, "arguments": dict}``
51
+ - ``{"type": "final_answer", "content": str}``
52
+ """
53
+ cleaned = clean_json_text(text).strip()
54
+
55
+ # Try JSON parse
56
+ try:
57
+ obj = json.loads(cleaned)
58
+ except (json.JSONDecodeError, ValueError):
59
+ # Non-JSON text → treat as final answer
60
+ logger.debug("Response is not valid JSON, treating as final answer")
61
+ return {"type": "final_answer", "content": text.strip()}
62
+
63
+ if not isinstance(obj, dict):
64
+ return {"type": "final_answer", "content": text.strip()}
65
+
66
+ # Explicit type discriminator
67
+ resp_type = obj.get("type")
68
+
69
+ if resp_type == "tool_call":
70
+ return {
71
+ "type": "tool_call",
72
+ "name": obj.get("name", ""),
73
+ "arguments": obj.get("arguments", {}),
74
+ }
75
+
76
+ if resp_type == "final_answer":
77
+ return {
78
+ "type": "final_answer",
79
+ "content": obj.get("content", ""),
80
+ }
81
+
82
+ # Infer type from keys when "type" is missing
83
+ if "name" in obj and "arguments" in obj:
84
+ logger.debug("Inferred tool_call from keys (no 'type' field)")
85
+ return {
86
+ "type": "tool_call",
87
+ "name": obj["name"],
88
+ "arguments": obj.get("arguments", {}),
89
+ }
90
+
91
+ if "content" in obj:
92
+ logger.debug("Inferred final_answer from keys (no 'type' field)")
93
+ return {
94
+ "type": "final_answer",
95
+ "content": obj["content"],
96
+ }
97
+
98
+ # Unrecognised JSON structure → final answer with the raw text
99
+ return {"type": "final_answer", "content": text.strip()}
100
+
101
+
102
+ def format_tool_result(tool_name: str, result: Any) -> str:
103
+ """Format a tool execution result as a user message for the next round."""
104
+ if isinstance(result, str):
105
+ result_str = result
106
+ else:
107
+ try:
108
+ result_str = json.dumps(result)
109
+ except (TypeError, ValueError):
110
+ result_str = str(result)
111
+
112
+ return (
113
+ f"Tool '{tool_name}' returned:\n{result_str}\n\n"
114
+ "Continue using the JSON format. Either call another tool or provide your final answer."
115
+ )
prompture/tools_schema.py CHANGED
@@ -109,6 +109,24 @@ class ToolDefinition:
109
109
  "input_schema": self.parameters,
110
110
  }
111
111
 
112
+ def to_prompt_format(self) -> str:
113
+ """Plain-text description suitable for prompt-based tool calling."""
114
+ lines = [f"Tool: {self.name}", f" Description: {self.description}", " Parameters:"]
115
+ props = self.parameters.get("properties", {})
116
+ required = set(self.parameters.get("required", []))
117
+ if not props:
118
+ lines.append(" (none)")
119
+ else:
120
+ for pname, pschema in props.items():
121
+ ptype = pschema.get("type", "string")
122
+ req_label = "required" if pname in required else "optional"
123
+ desc = pschema.get("description", "")
124
+ line = f" - {pname} ({ptype}, {req_label})"
125
+ if desc:
126
+ line += f": {desc}"
127
+ lines.append(line)
128
+ return "\n".join(lines)
129
+
112
130
 
113
131
  def tool_from_function(
114
132
  fn: Callable[..., Any], *, name: str | None = None, description: str | None = None
@@ -244,6 +262,10 @@ class ToolRegistry:
244
262
  def to_anthropic_format(self) -> list[dict[str, Any]]:
245
263
  return [td.to_anthropic_format() for td in self._tools.values()]
246
264
 
265
+ def to_prompt_format(self) -> str:
266
+ """Join all tool descriptions into a single plain-text block."""
267
+ return "\n\n".join(td.to_prompt_format() for td in self._tools.values())
268
+
247
269
  # ------------------------------------------------------------------
248
270
  # Execution
249
271
  # ------------------------------------------------------------------
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.47.dev1
3
+ Version: 0.0.47.dev3
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -83,7 +83,7 @@ print(person.name) # Maria
83
83
  - **Stepwise extraction** — Per-field prompts with smart type coercion (shorthand numbers, multilingual booleans, dates)
84
84
  - **Field registry** — 50+ predefined extraction fields with template variables and Pydantic integration
85
85
  - **Conversations** — Stateful multi-turn sessions with sync and async support
86
- - **Tool use** — Function calling and streaming across supported providers
86
+ - **Tool use** — Function calling and streaming across supported providers, with automatic prompt-based simulation for models without native tool support
87
87
  - **Caching** — Built-in response cache with memory, SQLite, and Redis backends
88
88
  - **Plugin system** — Register custom drivers via entry points
89
89
  - **Usage tracking** — Token counts and cost calculation on every call
@@ -296,6 +296,39 @@ response = conv.send("What is the capital of France?")
296
296
  follow_up = conv.send("What about Germany?") # retains context
297
297
  ```
298
298
 
299
+ ### Tool Use
300
+
301
+ Register Python functions as tools the LLM can call during a conversation:
302
+
303
+ ```python
304
+ from prompture import Conversation, ToolRegistry
305
+
306
+ registry = ToolRegistry()
307
+
308
+ @registry.tool
309
+ def get_weather(city: str, units: str = "celsius") -> str:
310
+ """Get the current weather for a city."""
311
+ return f"Weather in {city}: 22 {units}"
312
+
313
+ conv = Conversation("openai/gpt-4", tools=registry)
314
+ result = conv.ask("What's the weather in London?")
315
+ ```
316
+
317
+ For models without native function calling (Ollama, LM Studio, etc.), Prompture automatically simulates tool use by describing tools in the prompt and parsing structured JSON responses:
318
+
319
+ ```python
320
+ # Auto-detect: uses native tool calling if available, simulation otherwise
321
+ conv = Conversation("ollama/llama3.1:8b", tools=registry, simulated_tools="auto")
322
+
323
+ # Force simulation even on capable models
324
+ conv = Conversation("openai/gpt-4", tools=registry, simulated_tools=True)
325
+
326
+ # Disable tool use entirely
327
+ conv = Conversation("openai/gpt-4", tools=registry, simulated_tools=False)
328
+ ```
329
+
330
+ The simulation loop describes tools in the system prompt, asks the model to respond with JSON (`tool_call` or `final_answer`), executes tools, and feeds results back — all transparent to the caller.
331
+
299
332
  ### Model Discovery
300
333
 
301
334
  Auto-detect available models from configured providers: