prompture 0.0.46__py3-none-any.whl → 0.0.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -123,7 +123,13 @@ class LMStudioDriver(Driver):
123
123
  raise RuntimeError(f"LM Studio request failed: {e}") from e
124
124
 
125
125
  # Extract text
126
- text = response_data["choices"][0]["message"]["content"]
126
+ message = response_data["choices"][0]["message"]
127
+ text = message.get("content") or ""
128
+ reasoning_content = message.get("reasoning_content")
129
+
130
+ # Reasoning models (e.g. DeepSeek R1) may return content in reasoning_content
131
+ if not text and reasoning_content:
132
+ text = reasoning_content
127
133
 
128
134
  # Meta info
129
135
  usage = response_data.get("usage", {})
@@ -140,7 +146,10 @@ class LMStudioDriver(Driver):
140
146
  "model_name": merged_options.get("model", self.model),
141
147
  }
142
148
 
143
- return {"text": text, "meta": meta}
149
+ result: dict[str, Any] = {"text": text, "meta": meta}
150
+ if reasoning_content is not None:
151
+ result["reasoning_content"] = reasoning_content
152
+ return result
144
153
 
145
154
  # -- Model management (LM Studio 0.4.0+) ----------------------------------
146
155
 
@@ -228,10 +228,11 @@ class MoonshotDriver(CostMixin, Driver):
228
228
 
229
229
  message = resp["choices"][0]["message"]
230
230
  text = message.get("content") or ""
231
+ reasoning_content = message.get("reasoning_content")
231
232
 
232
233
  # Reasoning models may return content in reasoning_content when content is empty
233
- if not text and message.get("reasoning_content"):
234
- text = message["reasoning_content"]
234
+ if not text and reasoning_content:
235
+ text = reasoning_content
235
236
 
236
237
  # Structured output fallback: if we used json_schema mode and got an
237
238
  # empty response, retry with json_object mode and schema in the prompt.
@@ -275,8 +276,9 @@ class MoonshotDriver(CostMixin, Driver):
275
276
  resp = fb_resp
276
277
  fb_message = fb_resp["choices"][0]["message"]
277
278
  text = fb_message.get("content") or ""
278
- if not text and fb_message.get("reasoning_content"):
279
- text = fb_message["reasoning_content"]
279
+ reasoning_content = fb_message.get("reasoning_content")
280
+ if not text and reasoning_content:
281
+ text = reasoning_content
280
282
 
281
283
  total_cost = self._calculate_cost("moonshot", model, prompt_tokens, completion_tokens)
282
284
 
@@ -289,7 +291,10 @@ class MoonshotDriver(CostMixin, Driver):
289
291
  "model_name": model,
290
292
  }
291
293
 
292
- return {"text": text, "meta": meta}
294
+ result: dict[str, Any] = {"text": text, "meta": meta}
295
+ if reasoning_content is not None:
296
+ result["reasoning_content"] = reasoning_content
297
+ return result
293
298
 
294
299
  # ------------------------------------------------------------------
295
300
  # Tool use
@@ -364,11 +369,12 @@ class MoonshotDriver(CostMixin, Driver):
364
369
  }
365
370
 
366
371
  choice = resp["choices"][0]
367
- text = choice["message"].get("content") or ""
372
+ message = choice["message"]
373
+ text = message.get("content") or ""
368
374
  stop_reason = choice.get("finish_reason")
369
375
 
370
376
  tool_calls_out: list[dict[str, Any]] = []
371
- for tc in choice["message"].get("tool_calls", []):
377
+ for tc in message.get("tool_calls", []):
372
378
  try:
373
379
  args = json.loads(tc["function"]["arguments"])
374
380
  except (json.JSONDecodeError, TypeError):
@@ -381,13 +387,21 @@ class MoonshotDriver(CostMixin, Driver):
381
387
  }
382
388
  )
383
389
 
384
- return {
390
+ result: dict[str, Any] = {
385
391
  "text": text,
386
392
  "meta": meta,
387
393
  "tool_calls": tool_calls_out,
388
394
  "stop_reason": stop_reason,
389
395
  }
390
396
 
397
+ # Preserve reasoning_content for reasoning models so the
398
+ # conversation loop can include it when sending the assistant
399
+ # message back (Moonshot requires it on subsequent requests).
400
+ if message.get("reasoning_content") is not None:
401
+ result["reasoning_content"] = message["reasoning_content"]
402
+
403
+ return result
404
+
391
405
  # ------------------------------------------------------------------
392
406
  # Streaming
393
407
  # ------------------------------------------------------------------
@@ -430,6 +444,7 @@ class MoonshotDriver(CostMixin, Driver):
430
444
  response.raise_for_status()
431
445
 
432
446
  full_text = ""
447
+ full_reasoning = ""
433
448
  prompt_tokens = 0
434
449
  completion_tokens = 0
435
450
 
@@ -453,9 +468,11 @@ class MoonshotDriver(CostMixin, Driver):
453
468
  if choices:
454
469
  delta = choices[0].get("delta", {})
455
470
  content = delta.get("content") or ""
456
- # Reasoning models stream thinking via reasoning_content
457
- if not content:
458
- content = delta.get("reasoning_content") or ""
471
+ reasoning_chunk = delta.get("reasoning_content") or ""
472
+ if reasoning_chunk:
473
+ full_reasoning += reasoning_chunk
474
+ if not content and reasoning_chunk:
475
+ content = reasoning_chunk
459
476
  if content:
460
477
  full_text += content
461
478
  yield {"type": "delta", "text": content}
@@ -463,7 +480,7 @@ class MoonshotDriver(CostMixin, Driver):
463
480
  total_tokens = prompt_tokens + completion_tokens
464
481
  total_cost = self._calculate_cost("moonshot", model, prompt_tokens, completion_tokens)
465
482
 
466
- yield {
483
+ done_chunk: dict[str, Any] = {
467
484
  "type": "done",
468
485
  "text": full_text,
469
486
  "meta": {
@@ -475,3 +492,6 @@ class MoonshotDriver(CostMixin, Driver):
475
492
  "model_name": model,
476
493
  },
477
494
  }
495
+ if full_reasoning:
496
+ done_chunk["reasoning_content"] = full_reasoning
497
+ yield done_chunk
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  import logging
3
3
  import os
4
+ import uuid
4
5
  from collections.abc import Iterator
5
6
  from typing import Any, Optional
6
7
 
@@ -15,6 +16,7 @@ class OllamaDriver(Driver):
15
16
  supports_json_mode = True
16
17
  supports_json_schema = True
17
18
  supports_streaming = True
19
+ supports_tool_use = True
18
20
  supports_vision = True
19
21
 
20
22
  # Ollama is free – costs are always zero.
@@ -131,6 +133,95 @@ class OllamaDriver(Driver):
131
133
  # Ollama returns text in "response"
132
134
  return {"text": response_data.get("response", ""), "meta": meta}
133
135
 
136
+ # ------------------------------------------------------------------
137
+ # Tool use
138
+ # ------------------------------------------------------------------
139
+
140
+ def generate_messages_with_tools(
141
+ self,
142
+ messages: list[dict[str, Any]],
143
+ tools: list[dict[str, Any]],
144
+ options: dict[str, Any],
145
+ ) -> dict[str, Any]:
146
+ """Generate a response that may include tool calls via Ollama's /api/chat endpoint."""
147
+ merged_options = self.options.copy()
148
+ if options:
149
+ merged_options.update(options)
150
+
151
+ chat_endpoint = self.endpoint.replace("/api/generate", "/api/chat")
152
+
153
+ payload: dict[str, Any] = {
154
+ "model": merged_options.get("model", self.model),
155
+ "messages": messages,
156
+ "tools": tools,
157
+ "stream": False,
158
+ }
159
+
160
+ if "temperature" in merged_options:
161
+ payload["temperature"] = merged_options["temperature"]
162
+ if "top_p" in merged_options:
163
+ payload["top_p"] = merged_options["top_p"]
164
+ if "top_k" in merged_options:
165
+ payload["top_k"] = merged_options["top_k"]
166
+
167
+ try:
168
+ logger.debug(f"Sending tool use request to Ollama endpoint: {chat_endpoint}")
169
+ r = requests.post(chat_endpoint, json=payload, timeout=120)
170
+ r.raise_for_status()
171
+ response_data = r.json()
172
+
173
+ if not isinstance(response_data, dict):
174
+ raise ValueError(f"Expected dict response, got {type(response_data)}")
175
+ except requests.exceptions.ConnectionError:
176
+ raise
177
+ except requests.exceptions.HTTPError:
178
+ raise
179
+ except json.JSONDecodeError as e:
180
+ raise json.JSONDecodeError(f"Invalid JSON response from Ollama: {e.msg}", e.doc, e.pos) from e
181
+ except Exception as e:
182
+ raise RuntimeError(f"Ollama tool use request failed: {e}") from e
183
+
184
+ prompt_tokens = response_data.get("prompt_eval_count", 0)
185
+ completion_tokens = response_data.get("eval_count", 0)
186
+ total_tokens = prompt_tokens + completion_tokens
187
+
188
+ meta = {
189
+ "prompt_tokens": prompt_tokens,
190
+ "completion_tokens": completion_tokens,
191
+ "total_tokens": total_tokens,
192
+ "cost": 0.0,
193
+ "raw_response": response_data,
194
+ "model_name": merged_options.get("model", self.model),
195
+ }
196
+
197
+ message = response_data.get("message", {})
198
+ text = message.get("content") or ""
199
+ stop_reason = response_data.get("done_reason", "stop")
200
+
201
+ tool_calls_out: list[dict[str, Any]] = []
202
+ for tc in message.get("tool_calls", []):
203
+ func = tc.get("function", {})
204
+ # Ollama returns arguments as a dict already (no JSON string parsing needed)
205
+ args = func.get("arguments", {})
206
+ if isinstance(args, str):
207
+ try:
208
+ args = json.loads(args)
209
+ except (json.JSONDecodeError, TypeError):
210
+ args = {}
211
+ tool_calls_out.append({
212
+ # Ollama does not return tool_call IDs — generate one locally
213
+ "id": f"call_{uuid.uuid4().hex[:24]}",
214
+ "name": func.get("name", ""),
215
+ "arguments": args,
216
+ })
217
+
218
+ return {
219
+ "text": text,
220
+ "meta": meta,
221
+ "tool_calls": tool_calls_out,
222
+ "stop_reason": stop_reason,
223
+ }
224
+
134
225
  # ------------------------------------------------------------------
135
226
  # Streaming
136
227
  # ------------------------------------------------------------------
@@ -181,8 +181,18 @@ class OpenRouterDriver(CostMixin, Driver):
181
181
  "model_name": model,
182
182
  }
183
183
 
184
- text = resp["choices"][0]["message"]["content"]
185
- return {"text": text, "meta": meta}
184
+ message = resp["choices"][0]["message"]
185
+ text = message.get("content") or ""
186
+ reasoning_content = message.get("reasoning_content")
187
+
188
+ # Reasoning models may return content in reasoning_content when content is empty
189
+ if not text and reasoning_content:
190
+ text = reasoning_content
191
+
192
+ result: dict[str, Any] = {"text": text, "meta": meta}
193
+ if reasoning_content is not None:
194
+ result["reasoning_content"] = reasoning_content
195
+ return result
186
196
 
187
197
  # ------------------------------------------------------------------
188
198
  # Tool use
@@ -257,18 +267,23 @@ class OpenRouterDriver(CostMixin, Driver):
257
267
  args = json.loads(tc["function"]["arguments"])
258
268
  except (json.JSONDecodeError, TypeError):
259
269
  args = {}
260
- tool_calls_out.append({
261
- "id": tc["id"],
262
- "name": tc["function"]["name"],
263
- "arguments": args,
264
- })
270
+ tool_calls_out.append(
271
+ {
272
+ "id": tc["id"],
273
+ "name": tc["function"]["name"],
274
+ "arguments": args,
275
+ }
276
+ )
265
277
 
266
- return {
278
+ result: dict[str, Any] = {
267
279
  "text": text,
268
280
  "meta": meta,
269
281
  "tool_calls": tool_calls_out,
270
282
  "stop_reason": stop_reason,
271
283
  }
284
+ if choice["message"].get("reasoning_content") is not None:
285
+ result["reasoning_content"] = choice["message"]["reasoning_content"]
286
+ return result
272
287
 
273
288
  # ------------------------------------------------------------------
274
289
  # Streaming
@@ -311,13 +326,14 @@ class OpenRouterDriver(CostMixin, Driver):
311
326
  response.raise_for_status()
312
327
 
313
328
  full_text = ""
329
+ full_reasoning = ""
314
330
  prompt_tokens = 0
315
331
  completion_tokens = 0
316
332
 
317
333
  for line in response.iter_lines(decode_unicode=True):
318
334
  if not line or not line.startswith("data: "):
319
335
  continue
320
- payload = line[len("data: "):]
336
+ payload = line[len("data: ") :]
321
337
  if payload.strip() == "[DONE]":
322
338
  break
323
339
  try:
@@ -335,6 +351,11 @@ class OpenRouterDriver(CostMixin, Driver):
335
351
  if choices:
336
352
  delta = choices[0].get("delta", {})
337
353
  content = delta.get("content", "")
354
+ reasoning_chunk = delta.get("reasoning_content") or ""
355
+ if reasoning_chunk:
356
+ full_reasoning += reasoning_chunk
357
+ if not content and reasoning_chunk:
358
+ content = reasoning_chunk
338
359
  if content:
339
360
  full_text += content
340
361
  yield {"type": "delta", "text": content}
@@ -342,7 +363,7 @@ class OpenRouterDriver(CostMixin, Driver):
342
363
  total_tokens = prompt_tokens + completion_tokens
343
364
  total_cost = self._calculate_cost("openrouter", model, prompt_tokens, completion_tokens)
344
365
 
345
- yield {
366
+ done_chunk: dict[str, Any] = {
346
367
  "type": "done",
347
368
  "text": full_text,
348
369
  "meta": {
@@ -354,3 +375,6 @@ class OpenRouterDriver(CostMixin, Driver):
354
375
  "model_name": model,
355
376
  },
356
377
  }
378
+ if full_reasoning:
379
+ done_chunk["reasoning_content"] = full_reasoning
380
+ yield done_chunk
@@ -0,0 +1,115 @@
1
+ """Prompt-based tool calling for drivers without native tool use support.
2
+
3
+ When a driver lacks ``supports_tool_use`` the conversation classes can
4
+ fall back to *simulated* tool calling: the available tools are described
5
+ in the system prompt, the model is asked to respond with a structured
6
+ JSON object (either a tool call or a final answer), and Prompture
7
+ parses + dispatches accordingly.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ import logging
14
+ from typing import Any
15
+
16
+ from .tools import clean_json_text
17
+ from .tools_schema import ToolRegistry
18
+
19
+ logger = logging.getLogger("prompture.simulated_tools")
20
+
21
+
22
+ def build_tool_prompt(tools: ToolRegistry) -> str:
23
+ """Build a plain-text prompt section describing all registered tools.
24
+
25
+ The returned string should be appended to the system prompt so the
26
+ model knows which tools are available and how to call them.
27
+ """
28
+ lines = [
29
+ "You have access to the following tools:",
30
+ "",
31
+ tools.to_prompt_format(),
32
+ "",
33
+ "To use a tool, respond with ONLY a JSON object in this exact format:",
34
+ '{"type": "tool_call", "name": "<tool_name>", "arguments": {<args>}}',
35
+ "",
36
+ "When you have the final answer (after using tools or if no tool is needed), "
37
+ "respond with ONLY a JSON object in this format:",
38
+ '{"type": "final_answer", "content": "<your answer>"}',
39
+ "",
40
+ "IMPORTANT: Your entire response must be a single JSON object. "
41
+ "Do not include any other text, markdown, or explanation outside the JSON.",
42
+ ]
43
+ return "\n".join(lines)
44
+
45
+
46
+ def parse_simulated_response(text: str, tools: ToolRegistry) -> dict[str, Any]:
47
+ """Parse the model's response into a tool call or final answer dict.
48
+
49
+ Returns one of:
50
+ - ``{"type": "tool_call", "name": str, "arguments": dict}``
51
+ - ``{"type": "final_answer", "content": str}``
52
+ """
53
+ cleaned = clean_json_text(text).strip()
54
+
55
+ # Try JSON parse
56
+ try:
57
+ obj = json.loads(cleaned)
58
+ except (json.JSONDecodeError, ValueError):
59
+ # Non-JSON text → treat as final answer
60
+ logger.debug("Response is not valid JSON, treating as final answer")
61
+ return {"type": "final_answer", "content": text.strip()}
62
+
63
+ if not isinstance(obj, dict):
64
+ return {"type": "final_answer", "content": text.strip()}
65
+
66
+ # Explicit type discriminator
67
+ resp_type = obj.get("type")
68
+
69
+ if resp_type == "tool_call":
70
+ return {
71
+ "type": "tool_call",
72
+ "name": obj.get("name", ""),
73
+ "arguments": obj.get("arguments", {}),
74
+ }
75
+
76
+ if resp_type == "final_answer":
77
+ return {
78
+ "type": "final_answer",
79
+ "content": obj.get("content", ""),
80
+ }
81
+
82
+ # Infer type from keys when "type" is missing
83
+ if "name" in obj and "arguments" in obj:
84
+ logger.debug("Inferred tool_call from keys (no 'type' field)")
85
+ return {
86
+ "type": "tool_call",
87
+ "name": obj["name"],
88
+ "arguments": obj.get("arguments", {}),
89
+ }
90
+
91
+ if "content" in obj:
92
+ logger.debug("Inferred final_answer from keys (no 'type' field)")
93
+ return {
94
+ "type": "final_answer",
95
+ "content": obj["content"],
96
+ }
97
+
98
+ # Unrecognised JSON structure → final answer with the raw text
99
+ return {"type": "final_answer", "content": text.strip()}
100
+
101
+
102
+ def format_tool_result(tool_name: str, result: Any) -> str:
103
+ """Format a tool execution result as a user message for the next round."""
104
+ if isinstance(result, str):
105
+ result_str = result
106
+ else:
107
+ try:
108
+ result_str = json.dumps(result)
109
+ except (TypeError, ValueError):
110
+ result_str = str(result)
111
+
112
+ return (
113
+ f"Tool '{tool_name}' returned:\n{result_str}\n\n"
114
+ "Continue using the JSON format. Either call another tool or provide your final answer."
115
+ )
prompture/tools_schema.py CHANGED
@@ -109,6 +109,24 @@ class ToolDefinition:
109
109
  "input_schema": self.parameters,
110
110
  }
111
111
 
112
+ def to_prompt_format(self) -> str:
113
+ """Plain-text description suitable for prompt-based tool calling."""
114
+ lines = [f"Tool: {self.name}", f" Description: {self.description}", " Parameters:"]
115
+ props = self.parameters.get("properties", {})
116
+ required = set(self.parameters.get("required", []))
117
+ if not props:
118
+ lines.append(" (none)")
119
+ else:
120
+ for pname, pschema in props.items():
121
+ ptype = pschema.get("type", "string")
122
+ req_label = "required" if pname in required else "optional"
123
+ desc = pschema.get("description", "")
124
+ line = f" - {pname} ({ptype}, {req_label})"
125
+ if desc:
126
+ line += f": {desc}"
127
+ lines.append(line)
128
+ return "\n".join(lines)
129
+
112
130
 
113
131
  def tool_from_function(
114
132
  fn: Callable[..., Any], *, name: str | None = None, description: str | None = None
@@ -244,6 +262,10 @@ class ToolRegistry:
244
262
  def to_anthropic_format(self) -> list[dict[str, Any]]:
245
263
  return [td.to_anthropic_format() for td in self._tools.values()]
246
264
 
265
+ def to_prompt_format(self) -> str:
266
+ """Join all tool descriptions into a single plain-text block."""
267
+ return "\n\n".join(td.to_prompt_format() for td in self._tools.values())
268
+
247
269
  # ------------------------------------------------------------------
248
270
  # Execution
249
271
  # ------------------------------------------------------------------
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.46
3
+ Version: 0.0.47
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -83,7 +83,7 @@ print(person.name) # Maria
83
83
  - **Stepwise extraction** — Per-field prompts with smart type coercion (shorthand numbers, multilingual booleans, dates)
84
84
  - **Field registry** — 50+ predefined extraction fields with template variables and Pydantic integration
85
85
  - **Conversations** — Stateful multi-turn sessions with sync and async support
86
- - **Tool use** — Function calling and streaming across supported providers
86
+ - **Tool use** — Function calling and streaming across supported providers, with automatic prompt-based simulation for models without native tool support
87
87
  - **Caching** — Built-in response cache with memory, SQLite, and Redis backends
88
88
  - **Plugin system** — Register custom drivers via entry points
89
89
  - **Usage tracking** — Token counts and cost calculation on every call
@@ -296,6 +296,39 @@ response = conv.send("What is the capital of France?")
296
296
  follow_up = conv.send("What about Germany?") # retains context
297
297
  ```
298
298
 
299
+ ### Tool Use
300
+
301
+ Register Python functions as tools the LLM can call during a conversation:
302
+
303
+ ```python
304
+ from prompture import Conversation, ToolRegistry
305
+
306
+ registry = ToolRegistry()
307
+
308
+ @registry.tool
309
+ def get_weather(city: str, units: str = "celsius") -> str:
310
+ """Get the current weather for a city."""
311
+ return f"Weather in {city}: 22 {units}"
312
+
313
+ conv = Conversation("openai/gpt-4", tools=registry)
314
+ result = conv.ask("What's the weather in London?")
315
+ ```
316
+
317
+ For models without native function calling (Ollama, LM Studio, etc.), Prompture automatically simulates tool use by describing tools in the prompt and parsing structured JSON responses:
318
+
319
+ ```python
320
+ # Auto-detect: uses native tool calling if available, simulation otherwise
321
+ conv = Conversation("ollama/llama3.1:8b", tools=registry, simulated_tools="auto")
322
+
323
+ # Force simulation even on capable models
324
+ conv = Conversation("openai/gpt-4", tools=registry, simulated_tools=True)
325
+
326
+ # Disable tool use entirely
327
+ conv = Conversation("openai/gpt-4", tools=registry, simulated_tools=False)
328
+ ```
329
+
330
+ The simulation loop describes tools in the system prompt, asks the model to respond with JSON (`tool_call` or `final_answer`), executes tools, and feeds results back — all transparent to the caller.
331
+
299
332
  ### Model Discovery
300
333
 
301
334
  Auto-detect available models from configured providers:
@@ -1,16 +1,16 @@
1
1
  prompture/__init__.py,sha256=cJnkefDpiyFbU77juw4tXPdKJQWoJ-c6XBFt2v-e5Q4,7455
2
- prompture/_version.py,sha256=Zfksky0dIFN2RZprnAMflcd2YbMf3D0JpTlHasmdh24,706
2
+ prompture/_version.py,sha256=ToAw384WWguTEmRR8YKQKk9ZB9M3thyPsfPaIkOPqog,706
3
3
  prompture/agent.py,sha256=-8qdo_Lz20GGssCe5B_QPxb5Kct71YtKHh5vZgrSYik,34748
4
4
  prompture/agent_types.py,sha256=Icl16PQI-ThGLMFCU43adtQA6cqETbsPn4KssKBI4xc,4664
5
5
  prompture/async_agent.py,sha256=_6_IRb-LGzZxGxfPVy43SIWByUoQfN-5XnUWahVP6r8,33110
6
- prompture/async_conversation.py,sha256=m9sdKBu1wxo5veGwO6g6Zvf1sBzpuxP-mSIEeNKlBjQ,31155
6
+ prompture/async_conversation.py,sha256=tzOy2jiCOVkRHIJ9H6VeRwwcUtbjF33--igdxMC6F7s,34811
7
7
  prompture/async_core.py,sha256=hbRXLvsBJv3JAnUwGZbazsL6x022FrsJU6swmZolgxY,29745
8
8
  prompture/async_driver.py,sha256=4VQ9Q_tI6Ufw6W1CYJ5j8hVtgVdqFGuk6e2tLaSceWE,8581
9
9
  prompture/async_groups.py,sha256=pceKrt0UayQjMLFs1dFGoxOHpgD948aEjIY61r608C4,22459
10
10
  prompture/cache.py,sha256=4dfQDMsEZ9JMQDXLOkiugPmmMJQIfKVE8rTAKDH4oL8,14401
11
11
  prompture/callbacks.py,sha256=JPDqWGzPIzv44l54ocmezlYVBnbKPDEEXRrLdluWGAo,1731
12
12
  prompture/cli.py,sha256=tNiIddRmgC1BomjY5O1VVVAwvqHVzF8IHmQrM-cG2wQ,2902
13
- prompture/conversation.py,sha256=kBflwh7Qmw1I_jcUGyV36oskdVz4SYDSw_dCjemRRRc,32756
13
+ prompture/conversation.py,sha256=uxstayJjgY6a39DtU0YxQl0Dt3JBo2UVCyMPJW95MNI,36428
14
14
  prompture/core.py,sha256=5FHwX7fNPwFHMbFCMvV-RH7LpPpTToLAmcyDnKbrN0E,57202
15
15
  prompture/cost_mixin.py,sha256=Qx7gPgPsWgTHiaFeI7q_p9cfe95ccjgN8Mi56d_AVX0,4563
16
16
  prompture/discovery.py,sha256=K-svbO-qJraHinCbFVS64vEo5McWX5pURv26ZMmuL6U,10295
@@ -29,41 +29,42 @@ prompture/serialization.py,sha256=m4cdAQJspitMcfwRgecElkY2SBt3BjEwubbhS3W-0s0,74
29
29
  prompture/server.py,sha256=W6Kn6Et8nG5twXjD2wKn_N9yplGjz5Z-2naeI_UPd1Y,6198
30
30
  prompture/session.py,sha256=FldK3cKq_jO0-beukVOhIiwsYWb6U_lLBlAERx95aaM,3821
31
31
  prompture/settings.py,sha256=2cTuko8PLhq0SbBMtqmjBgzl9jv6SgoXeaUEhmm4G4Y,2562
32
+ prompture/simulated_tools.py,sha256=oL6W6hAEKXZHBfb8b-UDPfm3V4nSqXu7eG8IpvwtqKg,3901
32
33
  prompture/tools.py,sha256=PmFbGHTWYWahpJOG6BLlM0Y-EG6S37IFW57C-8GdsXo,36449
33
- prompture/tools_schema.py,sha256=c1ag6kyIGgZxWbZRsaHl72cAelb34J_JomyW1h5Atw0,7964
34
+ prompture/tools_schema.py,sha256=wuVfPyCKVWlhUDRsXWArtGpxkQRqNWyKeLJuXn_6X8k,8986
34
35
  prompture/validator.py,sha256=FY_VjIVEbjG2nwzh-r6l23Kt3UzaLyCis8_pZMNGHBA,993
35
36
  prompture/aio/__init__.py,sha256=bKqTu4Jxld16aP_7SP9wU5au45UBIb041ORo4E4HzVo,1810
36
37
  prompture/drivers/__init__.py,sha256=r8wBYGKD7C7v4CqcyRNoaITzGVyxasoiAU6jBYsPZio,8178
37
38
  prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
38
39
  prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
39
- prompture/drivers/async_azure_driver.py,sha256=uXPMStCn5jMnLFpiLYBvTheZm2dNlwKmSLWL3J2s8es,4544
40
+ prompture/drivers/async_azure_driver.py,sha256=s__y_EGQkK7UZjxiyF08uql8F09cnbJ0q7aFuxzreIw,7328
40
41
  prompture/drivers/async_claude_driver.py,sha256=oawbFVVMtRlikQOmu3jRjbdpoeu95JqTF1YHLKO3ybE,10576
41
42
  prompture/drivers/async_google_driver.py,sha256=LTUgCXJjzuTDGzsCsmY2-xH2KdTLJD7htwO49ZNFOdE,13711
42
- prompture/drivers/async_grok_driver.py,sha256=s3bXEGhVrMyw10CowkBhs5522mhipWJyWWu-xVixzyg,3538
43
- prompture/drivers/async_groq_driver.py,sha256=pjAh_bgZWSWaNSm5XrU-u3gRV6YSGwNG5NfAbkYeJ84,3067
43
+ prompture/drivers/async_grok_driver.py,sha256=lj160GHARe0fqTms4ovWhkpgt0idsGt55xnuc6JlH1w,7413
44
+ prompture/drivers/async_groq_driver.py,sha256=5G0rXAEAmsLNftI9YfGAh4E8X3B4Hb6_0cXBhf9LZMk,6348
44
45
  prompture/drivers/async_hugging_driver.py,sha256=IblxqU6TpNUiigZ0BCgNkAgzpUr2FtPHJOZnOZMnHF0,2152
45
- prompture/drivers/async_lmstudio_driver.py,sha256=rPn2qVPm6UE2APzAn7ZHYTELUwr0dQMi8XHv6gAhyH8,5782
46
+ prompture/drivers/async_lmstudio_driver.py,sha256=4bz8NFFiZiFFkzlYDcS7abnwmEbbvbKb-CQhHeTGlU8,6102
46
47
  prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
47
48
  prompture/drivers/async_modelscope_driver.py,sha256=wzHYGLf9qE9KXRFZYtN1hZS10Bw1m1Wy6HcmyUD67HM,10170
48
- prompture/drivers/async_moonshot_driver.py,sha256=Jl6rGlW3SsneFfmBiDo0RBZQN5c3-08kwax369me01E,14798
49
- prompture/drivers/async_ollama_driver.py,sha256=FaSXtFXrgeVHIe0b90Vg6rGeSTWLpPnjaThh9Ai7qQo,5042
49
+ prompture/drivers/async_moonshot_driver.py,sha256=a9gr3T_4NiDFd7foM1mSHJRvXYb43iqqJnQ0FVRyI2E,15669
50
+ prompture/drivers/async_ollama_driver.py,sha256=pFtCvh5bHe_qwGy-jIJbyG_zmnPbNbagJCGxCTJMdPU,8244
50
51
  prompture/drivers/async_openai_driver.py,sha256=COa_JE-AgKowKJpmRnfDJp4RSQKZel_7WswxOzvLksM,9044
51
- prompture/drivers/async_openrouter_driver.py,sha256=GnOMY67CCV3HV83lCC-CxcngwrUnuc7G-AX7fb1DYpg,10698
52
+ prompture/drivers/async_openrouter_driver.py,sha256=N7s72HuXHLs_RWmJO9P3pCayWE98ommfqVeAfru8Bl0,11758
52
53
  prompture/drivers/async_registry.py,sha256=JFEnXNPm-8AAUCiNLoKuYBSCYEK-4BmAen5t55QrMvg,5223
53
54
  prompture/drivers/async_zai_driver.py,sha256=zXHxske1CtK8dDTGY-D_kiyZZ_NfceNTJlyTpKn0R4c,10727
54
- prompture/drivers/azure_driver.py,sha256=zwCRNJRm18XEfYeqpFCDLMEEyY0vIGdqrwKk9ng6s4s,5798
55
+ prompture/drivers/azure_driver.py,sha256=gQFffA29gOr-GZ25fNXTokV8-mEmffeV9CT_UBZ3yXc,8565
55
56
  prompture/drivers/claude_driver.py,sha256=C8Av3DXP2x3f35jEv8BRwEM_4vh0cfmLsy3t5dsR6aM,11837
56
57
  prompture/drivers/google_driver.py,sha256=Zck5VUsW37kDgohXz3cUWRmZ88OfhmTpVD-qzAVMp-8,16318
57
- prompture/drivers/grok_driver.py,sha256=CzAXKAbbWmbE8qLFZxxoEhf4Qzbtc9YqDX7kkCsE4dk,5320
58
- prompture/drivers/groq_driver.py,sha256=61LKHhYyRiFkHKbLKFYX10fqjpL_INtPY_Zeb55AV0o,4221
58
+ prompture/drivers/grok_driver.py,sha256=fxl5Gx9acFq7BlOh_N9U66oJvG3y8YX4QuSAgZWHJmU,8963
59
+ prompture/drivers/groq_driver.py,sha256=7YEok1BQlsDZGkA-l9yrjTDapqIWX3yq_Ctgbhu8jSI,7490
59
60
  prompture/drivers/hugging_driver.py,sha256=gZir3XnM77VfYIdnu3S1pRftlZJM6G3L8bgGn5esg-Q,2346
60
- prompture/drivers/lmstudio_driver.py,sha256=9ZnJ1l5LuWAjkH2WKfFjZprNMVIXoSC7qXDNDTxm-tA,6748
61
+ prompture/drivers/lmstudio_driver.py,sha256=nZ5SvBC0kTDNDzsupIW_H7YK92dcYta_xSPUNs52gyM,7154
61
62
  prompture/drivers/local_http_driver.py,sha256=QJgEf9kAmy8YZ5fb8FHnWuhoDoZYNd8at4jegzNVJH0,1658
62
63
  prompture/drivers/modelscope_driver.py,sha256=yTxTG7j5f7zz4CjbrV8J0VKeoBmxv69F40bfp8nq6AE,10651
63
- prompture/drivers/moonshot_driver.py,sha256=MtlvtUUwE4WtzCKo_pJJ5wATB-h2GU4zY9jbGo3a_-g,18264
64
- prompture/drivers/ollama_driver.py,sha256=k9xeUwFp91OrDbjkbYI-F8CDFy5ew-zQ0btXqwbXXWM,10220
64
+ prompture/drivers/moonshot_driver.py,sha256=cm1XpU6EPFjcZaneXjfetRNSUxN9daP6hkJ1y99kqLI,19123
65
+ prompture/drivers/ollama_driver.py,sha256=SJtMRtAr8geUB4y5GIZxPr-RJ0C3q7yqigYei2b4luM,13710
65
66
  prompture/drivers/openai_driver.py,sha256=DqdMhxF8M2HdOY5vfsFrz0h23lqBoQlbxV3xUdHvZho,10548
66
- prompture/drivers/openrouter_driver.py,sha256=DaG1H99s8GaOgJXZK4TP28HM7U4wiLu9wHXzWZleW_U,12589
67
+ prompture/drivers/openrouter_driver.py,sha256=m2I5E9L5YYE_bV8PruKnAwjL63SIFEXevN_ThUzxQaA,13657
67
68
  prompture/drivers/registry.py,sha256=Dg_5w9alnIPKhOnsR9Xspuf5T7roBGu0r_L2Cf-UhXs,9926
68
69
  prompture/drivers/vision_helpers.py,sha256=l5iYXHJLR_vLFvqDPPPK1QqK7YPKh5GwocpbSyt0R04,5403
69
70
  prompture/drivers/zai_driver.py,sha256=Wkur0HfwKJt8ugYErpvz1Gy6e9an8vt4R7U3i6HWV_s,11038
@@ -76,9 +77,9 @@ prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-
76
77
  prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
77
78
  prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
78
79
  prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
79
- prompture-0.0.46.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
80
- prompture-0.0.46.dist-info/METADATA,sha256=CpjLp4ff432DdZCNmJciCbk_-pv7CXRHx2LYLP1jybA,10837
81
- prompture-0.0.46.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
82
- prompture-0.0.46.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
83
- prompture-0.0.46.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
84
- prompture-0.0.46.dist-info/RECORD,,
80
+ prompture-0.0.47.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
81
+ prompture-0.0.47.dist-info/METADATA,sha256=MY7C3DjGuhpY6ZlPEmg-58aSmJ4RdZ4WRuYtXiWFinM,12148
82
+ prompture-0.0.47.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
83
+ prompture-0.0.47.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
84
+ prompture-0.0.47.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
85
+ prompture-0.0.47.dist-info/RECORD,,