prompture 0.0.47.dev3__py3-none-any.whl → 0.0.48__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prompture/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.0.47.dev3'
32
- __version_tuple__ = version_tuple = (0, 0, 47, 'dev3')
31
+ __version__ = version = '0.0.48'
32
+ __version_tuple__ = version_tuple = (0, 0, 48)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -99,6 +99,13 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
99
99
  resp = await client.messages.create(**common_kwargs)
100
100
  text = resp.content[0].text
101
101
 
102
+ # Extract reasoning/thinking content from content blocks
103
+ reasoning_content = ClaudeDriver._extract_thinking(resp.content)
104
+
105
+ # Fallback: use reasoning as text if content is empty
106
+ if not text and reasoning_content:
107
+ text = reasoning_content
108
+
102
109
  prompt_tokens = resp.usage.input_tokens
103
110
  completion_tokens = resp.usage.output_tokens
104
111
  total_tokens = prompt_tokens + completion_tokens
@@ -114,7 +121,10 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
114
121
  "model_name": model,
115
122
  }
116
123
 
117
- return {"text": text, "meta": meta}
124
+ result: dict[str, Any] = {"text": text, "meta": meta}
125
+ if reasoning_content is not None:
126
+ result["reasoning_content"] = reasoning_content
127
+ return result
118
128
 
119
129
  # ------------------------------------------------------------------
120
130
  # Helpers
@@ -211,12 +221,17 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
211
221
  "arguments": block.input,
212
222
  })
213
223
 
214
- return {
224
+ reasoning_content = ClaudeDriver._extract_thinking(resp.content)
225
+
226
+ result: dict[str, Any] = {
215
227
  "text": text,
216
228
  "meta": meta,
217
229
  "tool_calls": tool_calls_out,
218
230
  "stop_reason": resp.stop_reason,
219
231
  }
232
+ if reasoning_content is not None:
233
+ result["reasoning_content"] = reasoning_content
234
+ return result
220
235
 
221
236
  # ------------------------------------------------------------------
222
237
  # Streaming
@@ -247,6 +262,7 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
247
262
  kwargs["system"] = system_content
248
263
 
249
264
  full_text = ""
265
+ full_reasoning = ""
250
266
  prompt_tokens = 0
251
267
  completion_tokens = 0
252
268
 
@@ -254,10 +270,16 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
254
270
  async for event in stream:
255
271
  if hasattr(event, "type"):
256
272
  if event.type == "content_block_delta" and hasattr(event, "delta"):
257
- delta_text = getattr(event.delta, "text", "")
258
- if delta_text:
259
- full_text += delta_text
260
- yield {"type": "delta", "text": delta_text}
273
+ delta_type = getattr(event.delta, "type", "")
274
+ if delta_type == "thinking_delta":
275
+ thinking_text = getattr(event.delta, "thinking", "")
276
+ if thinking_text:
277
+ full_reasoning += thinking_text
278
+ else:
279
+ delta_text = getattr(event.delta, "text", "")
280
+ if delta_text:
281
+ full_text += delta_text
282
+ yield {"type": "delta", "text": delta_text}
261
283
  elif event.type == "message_delta" and hasattr(event, "usage"):
262
284
  completion_tokens = getattr(event.usage, "output_tokens", 0)
263
285
  elif event.type == "message_start" and hasattr(event, "message"):
@@ -268,7 +290,7 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
268
290
  total_tokens = prompt_tokens + completion_tokens
269
291
  total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
270
292
 
271
- yield {
293
+ done_chunk: dict[str, Any] = {
272
294
  "type": "done",
273
295
  "text": full_text,
274
296
  "meta": {
@@ -280,3 +302,6 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
280
302
  "model_name": model,
281
303
  },
282
304
  }
305
+ if full_reasoning:
306
+ done_chunk["reasoning_content"] = full_reasoning
307
+ yield done_chunk
@@ -81,7 +81,16 @@ class AsyncOllamaDriver(AsyncDriver):
81
81
  "model_name": merged_options.get("model", self.model),
82
82
  }
83
83
 
84
- return {"text": response_data.get("response", ""), "meta": meta}
84
+ text = response_data.get("response", "")
85
+ reasoning_content = response_data.get("thinking") or None
86
+
87
+ if not text and reasoning_content:
88
+ text = reasoning_content
89
+
90
+ result: dict[str, Any] = {"text": text, "meta": meta}
91
+ if reasoning_content is not None:
92
+ result["reasoning_content"] = reasoning_content
93
+ return result
85
94
 
86
95
  # ------------------------------------------------------------------
87
96
  # Tool use
@@ -139,8 +148,12 @@ class AsyncOllamaDriver(AsyncDriver):
139
148
 
140
149
  message = response_data.get("message", {})
141
150
  text = message.get("content") or ""
151
+ reasoning_content = message.get("thinking") or None
142
152
  stop_reason = response_data.get("done_reason", "stop")
143
153
 
154
+ if not text and reasoning_content:
155
+ text = reasoning_content
156
+
144
157
  tool_calls_out: list[dict[str, Any]] = []
145
158
  for tc in message.get("tool_calls", []):
146
159
  func = tc.get("function", {})
@@ -158,12 +171,15 @@ class AsyncOllamaDriver(AsyncDriver):
158
171
  "arguments": args,
159
172
  })
160
173
 
161
- return {
174
+ result: dict[str, Any] = {
162
175
  "text": text,
163
176
  "meta": meta,
164
177
  "tool_calls": tool_calls_out,
165
178
  "stop_reason": stop_reason,
166
179
  }
180
+ if reasoning_content is not None:
181
+ result["reasoning_content"] = reasoning_content
182
+ return result
167
183
 
168
184
  async def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
169
185
  """Use Ollama's /api/chat endpoint for multi-turn conversations."""
@@ -217,4 +233,12 @@ class AsyncOllamaDriver(AsyncDriver):
217
233
 
218
234
  message = response_data.get("message", {})
219
235
  text = message.get("content", "")
220
- return {"text": text, "meta": meta}
236
+ reasoning_content = message.get("thinking") or None
237
+
238
+ if not text and reasoning_content:
239
+ text = reasoning_content
240
+
241
+ result: dict[str, Any] = {"text": text, "meta": meta}
242
+ if reasoning_content is not None:
243
+ result["reasoning_content"] = reasoning_content
244
+ return result
@@ -131,6 +131,13 @@ class ClaudeDriver(CostMixin, Driver):
131
131
  resp = client.messages.create(**common_kwargs)
132
132
  text = resp.content[0].text
133
133
 
134
+ # Extract reasoning/thinking content from content blocks
135
+ reasoning_content = self._extract_thinking(resp.content)
136
+
137
+ # Fallback: use reasoning as text if content is empty
138
+ if not text and reasoning_content:
139
+ text = reasoning_content
140
+
134
141
  # Extract token usage from Claude response
135
142
  prompt_tokens = resp.usage.input_tokens
136
143
  completion_tokens = resp.usage.output_tokens
@@ -149,12 +156,26 @@ class ClaudeDriver(CostMixin, Driver):
149
156
  "model_name": model,
150
157
  }
151
158
 
152
- return {"text": text, "meta": meta}
159
+ result: dict[str, Any] = {"text": text, "meta": meta}
160
+ if reasoning_content is not None:
161
+ result["reasoning_content"] = reasoning_content
162
+ return result
153
163
 
154
164
  # ------------------------------------------------------------------
155
165
  # Helpers
156
166
  # ------------------------------------------------------------------
157
167
 
168
+ @staticmethod
169
+ def _extract_thinking(content_blocks: list[Any]) -> str | None:
170
+ """Extract thinking/reasoning text from Claude content blocks."""
171
+ parts: list[str] = []
172
+ for block in content_blocks:
173
+ if getattr(block, "type", None) == "thinking":
174
+ thinking_text = getattr(block, "thinking", "")
175
+ if thinking_text:
176
+ parts.append(thinking_text)
177
+ return "\n".join(parts) if parts else None
178
+
158
179
  def _extract_system_and_messages(
159
180
  self, messages: list[dict[str, Any]]
160
181
  ) -> tuple[str | None, list[dict[str, Any]]]:
@@ -246,12 +267,17 @@ class ClaudeDriver(CostMixin, Driver):
246
267
  "arguments": block.input,
247
268
  })
248
269
 
249
- return {
270
+ reasoning_content = self._extract_thinking(resp.content)
271
+
272
+ result: dict[str, Any] = {
250
273
  "text": text,
251
274
  "meta": meta,
252
275
  "tool_calls": tool_calls_out,
253
276
  "stop_reason": resp.stop_reason,
254
277
  }
278
+ if reasoning_content is not None:
279
+ result["reasoning_content"] = reasoning_content
280
+ return result
255
281
 
256
282
  # ------------------------------------------------------------------
257
283
  # Streaming
@@ -282,6 +308,7 @@ class ClaudeDriver(CostMixin, Driver):
282
308
  kwargs["system"] = system_content
283
309
 
284
310
  full_text = ""
311
+ full_reasoning = ""
285
312
  prompt_tokens = 0
286
313
  completion_tokens = 0
287
314
 
@@ -289,10 +316,16 @@ class ClaudeDriver(CostMixin, Driver):
289
316
  for event in stream:
290
317
  if hasattr(event, "type"):
291
318
  if event.type == "content_block_delta" and hasattr(event, "delta"):
292
- delta_text = getattr(event.delta, "text", "")
293
- if delta_text:
294
- full_text += delta_text
295
- yield {"type": "delta", "text": delta_text}
319
+ delta_type = getattr(event.delta, "type", "")
320
+ if delta_type == "thinking_delta":
321
+ thinking_text = getattr(event.delta, "thinking", "")
322
+ if thinking_text:
323
+ full_reasoning += thinking_text
324
+ else:
325
+ delta_text = getattr(event.delta, "text", "")
326
+ if delta_text:
327
+ full_text += delta_text
328
+ yield {"type": "delta", "text": delta_text}
296
329
  elif event.type == "message_delta" and hasattr(event, "usage"):
297
330
  completion_tokens = getattr(event.usage, "output_tokens", 0)
298
331
  elif event.type == "message_start" and hasattr(event, "message"):
@@ -303,7 +336,7 @@ class ClaudeDriver(CostMixin, Driver):
303
336
  total_tokens = prompt_tokens + completion_tokens
304
337
  total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
305
338
 
306
- yield {
339
+ done_chunk: dict[str, Any] = {
307
340
  "type": "done",
308
341
  "text": full_text,
309
342
  "meta": {
@@ -315,3 +348,6 @@ class ClaudeDriver(CostMixin, Driver):
315
348
  "model_name": model,
316
349
  },
317
350
  }
351
+ if full_reasoning:
352
+ done_chunk["reasoning_content"] = full_reasoning
353
+ yield done_chunk
@@ -167,7 +167,7 @@ class MoonshotDriver(CostMixin, Driver):
167
167
  using_json_schema=bool(options.get("json_schema")),
168
168
  )
169
169
 
170
- opts = {"temperature": 1.0, "max_tokens": 512, **options}
170
+ opts = {"temperature": 1.0, "max_tokens": 512, "timeout": 300, **options}
171
171
  opts = self._clamp_temperature(opts)
172
172
 
173
173
  data: dict[str, Any] = {
@@ -210,7 +210,7 @@ class MoonshotDriver(CostMixin, Driver):
210
210
  f"{self.base_url}/chat/completions",
211
211
  headers=self.headers,
212
212
  json=data,
213
- timeout=120,
213
+ timeout=opts.get("timeout", 300),
214
214
  )
215
215
  response.raise_for_status()
216
216
  resp = response.json()
@@ -261,7 +261,7 @@ class MoonshotDriver(CostMixin, Driver):
261
261
  f"{self.base_url}/chat/completions",
262
262
  headers=self.headers,
263
263
  json=fallback_data,
264
- timeout=120,
264
+ timeout=opts.get("timeout", 300),
265
265
  )
266
266
  fb_response.raise_for_status()
267
267
  fb_resp = fb_response.json()
@@ -317,7 +317,7 @@ class MoonshotDriver(CostMixin, Driver):
317
317
 
318
318
  self._validate_model_capabilities("moonshot", model, using_tool_use=True)
319
319
 
320
- opts = {"temperature": 1.0, "max_tokens": 512, **options}
320
+ opts = {"temperature": 1.0, "max_tokens": 512, "timeout": 300, **options}
321
321
  opts = self._clamp_temperature(opts)
322
322
 
323
323
  sanitized_tools = self._sanitize_tools(tools)
@@ -342,7 +342,7 @@ class MoonshotDriver(CostMixin, Driver):
342
342
  f"{self.base_url}/chat/completions",
343
343
  headers=self.headers,
344
344
  json=data,
345
- timeout=120,
345
+ timeout=opts.get("timeout", 300),
346
346
  )
347
347
  response.raise_for_status()
348
348
  resp = response.json()
@@ -420,7 +420,7 @@ class MoonshotDriver(CostMixin, Driver):
420
420
  tokens_param = model_config["tokens_param"]
421
421
  supports_temperature = model_config["supports_temperature"]
422
422
 
423
- opts = {"temperature": 1.0, "max_tokens": 512, **options}
423
+ opts = {"temperature": 1.0, "max_tokens": 512, "timeout": 300, **options}
424
424
  opts = self._clamp_temperature(opts)
425
425
 
426
426
  data: dict[str, Any] = {
@@ -439,7 +439,7 @@ class MoonshotDriver(CostMixin, Driver):
439
439
  headers=self.headers,
440
440
  json=data,
441
441
  stream=True,
442
- timeout=120,
442
+ timeout=opts.get("timeout", 300),
443
443
  )
444
444
  response.raise_for_status()
445
445
 
@@ -84,7 +84,7 @@ class OllamaDriver(Driver):
84
84
  logger.debug(f"Sending request to Ollama endpoint: {self.endpoint}")
85
85
  logger.debug(f"Request payload: {payload}")
86
86
 
87
- r = requests.post(self.endpoint, json=payload, timeout=120)
87
+ r = requests.post(self.endpoint, json=payload, timeout=merged_options.get("timeout", 300))
88
88
  logger.debug(f"Response status code: {r.status_code}")
89
89
 
90
90
  r.raise_for_status()
@@ -131,7 +131,17 @@ class OllamaDriver(Driver):
131
131
  }
132
132
 
133
133
  # Ollama returns text in "response"
134
- return {"text": response_data.get("response", ""), "meta": meta}
134
+ text = response_data.get("response", "")
135
+ reasoning_content = response_data.get("thinking") or None
136
+
137
+ # Reasoning models may return content only in thinking
138
+ if not text and reasoning_content:
139
+ text = reasoning_content
140
+
141
+ result: dict[str, Any] = {"text": text, "meta": meta}
142
+ if reasoning_content is not None:
143
+ result["reasoning_content"] = reasoning_content
144
+ return result
135
145
 
136
146
  # ------------------------------------------------------------------
137
147
  # Tool use
@@ -166,7 +176,7 @@ class OllamaDriver(Driver):
166
176
 
167
177
  try:
168
178
  logger.debug(f"Sending tool use request to Ollama endpoint: {chat_endpoint}")
169
- r = requests.post(chat_endpoint, json=payload, timeout=120)
179
+ r = requests.post(chat_endpoint, json=payload, timeout=merged_options.get("timeout", 300))
170
180
  r.raise_for_status()
171
181
  response_data = r.json()
172
182
 
@@ -196,8 +206,12 @@ class OllamaDriver(Driver):
196
206
 
197
207
  message = response_data.get("message", {})
198
208
  text = message.get("content") or ""
209
+ reasoning_content = message.get("thinking") or None
199
210
  stop_reason = response_data.get("done_reason", "stop")
200
211
 
212
+ if not text and reasoning_content:
213
+ text = reasoning_content
214
+
201
215
  tool_calls_out: list[dict[str, Any]] = []
202
216
  for tc in message.get("tool_calls", []):
203
217
  func = tc.get("function", {})
@@ -215,12 +229,15 @@ class OllamaDriver(Driver):
215
229
  "arguments": args,
216
230
  })
217
231
 
218
- return {
232
+ result: dict[str, Any] = {
219
233
  "text": text,
220
234
  "meta": meta,
221
235
  "tool_calls": tool_calls_out,
222
236
  "stop_reason": stop_reason,
223
237
  }
238
+ if reasoning_content is not None:
239
+ result["reasoning_content"] = reasoning_content
240
+ return result
224
241
 
225
242
  # ------------------------------------------------------------------
226
243
  # Streaming
@@ -255,10 +272,11 @@ class OllamaDriver(Driver):
255
272
  payload["top_k"] = merged_options["top_k"]
256
273
 
257
274
  full_text = ""
275
+ full_reasoning = ""
258
276
  prompt_tokens = 0
259
277
  completion_tokens = 0
260
278
 
261
- r = requests.post(chat_endpoint, json=payload, timeout=120, stream=True)
279
+ r = requests.post(chat_endpoint, json=payload, timeout=merged_options.get("timeout", 300), stream=True)
262
280
  r.raise_for_status()
263
281
 
264
282
  for line in r.iter_lines():
@@ -269,13 +287,17 @@ class OllamaDriver(Driver):
269
287
  prompt_tokens = chunk.get("prompt_eval_count", 0)
270
288
  completion_tokens = chunk.get("eval_count", 0)
271
289
  else:
272
- content = chunk.get("message", {}).get("content", "")
290
+ msg = chunk.get("message", {})
291
+ thinking = msg.get("thinking", "")
292
+ if thinking:
293
+ full_reasoning += thinking
294
+ content = msg.get("content", "")
273
295
  if content:
274
296
  full_text += content
275
297
  yield {"type": "delta", "text": content}
276
298
 
277
299
  total_tokens = prompt_tokens + completion_tokens
278
- yield {
300
+ done_chunk: dict[str, Any] = {
279
301
  "type": "done",
280
302
  "text": full_text,
281
303
  "meta": {
@@ -287,6 +309,9 @@ class OllamaDriver(Driver):
287
309
  "model_name": merged_options.get("model", self.model),
288
310
  },
289
311
  }
312
+ if full_reasoning:
313
+ done_chunk["reasoning_content"] = full_reasoning
314
+ yield done_chunk
290
315
 
291
316
  def generate_messages(self, messages: list[dict[str, Any]], options: dict[str, Any]) -> dict[str, Any]:
292
317
  """Use Ollama's /api/chat endpoint for multi-turn conversations."""
@@ -318,7 +343,7 @@ class OllamaDriver(Driver):
318
343
 
319
344
  try:
320
345
  logger.debug(f"Sending chat request to Ollama endpoint: {chat_endpoint}")
321
- r = requests.post(chat_endpoint, json=payload, timeout=120)
346
+ r = requests.post(chat_endpoint, json=payload, timeout=merged_options.get("timeout", 300))
322
347
  r.raise_for_status()
323
348
  response_data = r.json()
324
349
 
@@ -349,4 +374,12 @@ class OllamaDriver(Driver):
349
374
  # Chat endpoint returns response in message.content
350
375
  message = response_data.get("message", {})
351
376
  text = message.get("content", "")
352
- return {"text": text, "meta": meta}
377
+ reasoning_content = message.get("thinking") or None
378
+
379
+ if not text and reasoning_content:
380
+ text = reasoning_content
381
+
382
+ result: dict[str, Any] = {"text": text, "meta": meta}
383
+ if reasoning_content is not None:
384
+ result["reasoning_content"] = reasoning_content
385
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.47.dev3
3
+ Version: 0.0.48
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -1,5 +1,5 @@
1
1
  prompture/__init__.py,sha256=cJnkefDpiyFbU77juw4tXPdKJQWoJ-c6XBFt2v-e5Q4,7455
2
- prompture/_version.py,sha256=u1zVumMYiBTlJQmr6vfEgpcsD6-vEHBNzFbXS_Hdi_Q,719
2
+ prompture/_version.py,sha256=3orT_GJGmzlbvqNNvwBEVfp9LYXjnkZRbNhNCEkyHao,706
3
3
  prompture/agent.py,sha256=-8qdo_Lz20GGssCe5B_QPxb5Kct71YtKHh5vZgrSYik,34748
4
4
  prompture/agent_types.py,sha256=Icl16PQI-ThGLMFCU43adtQA6cqETbsPn4KssKBI4xc,4664
5
5
  prompture/async_agent.py,sha256=_6_IRb-LGzZxGxfPVy43SIWByUoQfN-5XnUWahVP6r8,33110
@@ -38,7 +38,7 @@ prompture/drivers/__init__.py,sha256=r8wBYGKD7C7v4CqcyRNoaITzGVyxasoiAU6jBYsPZio
38
38
  prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
39
39
  prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
40
40
  prompture/drivers/async_azure_driver.py,sha256=s__y_EGQkK7UZjxiyF08uql8F09cnbJ0q7aFuxzreIw,7328
41
- prompture/drivers/async_claude_driver.py,sha256=oawbFVVMtRlikQOmu3jRjbdpoeu95JqTF1YHLKO3ybE,10576
41
+ prompture/drivers/async_claude_driver.py,sha256=k6D6aEgcy8HYbuCsoqDknh7aTfw_cJrV7kDMqCA0OSg,11746
42
42
  prompture/drivers/async_google_driver.py,sha256=LTUgCXJjzuTDGzsCsmY2-xH2KdTLJD7htwO49ZNFOdE,13711
43
43
  prompture/drivers/async_grok_driver.py,sha256=lj160GHARe0fqTms4ovWhkpgt0idsGt55xnuc6JlH1w,7413
44
44
  prompture/drivers/async_groq_driver.py,sha256=5G0rXAEAmsLNftI9YfGAh4E8X3B4Hb6_0cXBhf9LZMk,6348
@@ -47,13 +47,13 @@ prompture/drivers/async_lmstudio_driver.py,sha256=4bz8NFFiZiFFkzlYDcS7abnwmEbbvb
47
47
  prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
48
48
  prompture/drivers/async_modelscope_driver.py,sha256=wzHYGLf9qE9KXRFZYtN1hZS10Bw1m1Wy6HcmyUD67HM,10170
49
49
  prompture/drivers/async_moonshot_driver.py,sha256=a9gr3T_4NiDFd7foM1mSHJRvXYb43iqqJnQ0FVRyI2E,15669
50
- prompture/drivers/async_ollama_driver.py,sha256=pFtCvh5bHe_qwGy-jIJbyG_zmnPbNbagJCGxCTJMdPU,8244
50
+ prompture/drivers/async_ollama_driver.py,sha256=Li2ZKZrItxKLkbIuugF8LChlnN3xtXtIoc92Ek8_wMc,9121
51
51
  prompture/drivers/async_openai_driver.py,sha256=COa_JE-AgKowKJpmRnfDJp4RSQKZel_7WswxOzvLksM,9044
52
52
  prompture/drivers/async_openrouter_driver.py,sha256=N7s72HuXHLs_RWmJO9P3pCayWE98ommfqVeAfru8Bl0,11758
53
53
  prompture/drivers/async_registry.py,sha256=JFEnXNPm-8AAUCiNLoKuYBSCYEK-4BmAen5t55QrMvg,5223
54
54
  prompture/drivers/async_zai_driver.py,sha256=zXHxske1CtK8dDTGY-D_kiyZZ_NfceNTJlyTpKn0R4c,10727
55
55
  prompture/drivers/azure_driver.py,sha256=gQFffA29gOr-GZ25fNXTokV8-mEmffeV9CT_UBZ3yXc,8565
56
- prompture/drivers/claude_driver.py,sha256=C8Av3DXP2x3f35jEv8BRwEM_4vh0cfmLsy3t5dsR6aM,11837
56
+ prompture/drivers/claude_driver.py,sha256=TOJMhCSAyF8yRmKyVl0pACJUBrxMZHHnQE12iBijCCQ,13474
57
57
  prompture/drivers/google_driver.py,sha256=Zck5VUsW37kDgohXz3cUWRmZ88OfhmTpVD-qzAVMp-8,16318
58
58
  prompture/drivers/grok_driver.py,sha256=fxl5Gx9acFq7BlOh_N9U66oJvG3y8YX4QuSAgZWHJmU,8963
59
59
  prompture/drivers/groq_driver.py,sha256=7YEok1BQlsDZGkA-l9yrjTDapqIWX3yq_Ctgbhu8jSI,7490
@@ -61,8 +61,8 @@ prompture/drivers/hugging_driver.py,sha256=gZir3XnM77VfYIdnu3S1pRftlZJM6G3L8bgGn
61
61
  prompture/drivers/lmstudio_driver.py,sha256=nZ5SvBC0kTDNDzsupIW_H7YK92dcYta_xSPUNs52gyM,7154
62
62
  prompture/drivers/local_http_driver.py,sha256=QJgEf9kAmy8YZ5fb8FHnWuhoDoZYNd8at4jegzNVJH0,1658
63
63
  prompture/drivers/modelscope_driver.py,sha256=yTxTG7j5f7zz4CjbrV8J0VKeoBmxv69F40bfp8nq6AE,10651
64
- prompture/drivers/moonshot_driver.py,sha256=cm1XpU6EPFjcZaneXjfetRNSUxN9daP6hkJ1y99kqLI,19123
65
- prompture/drivers/ollama_driver.py,sha256=SJtMRtAr8geUB4y5GIZxPr-RJ0C3q7yqigYei2b4luM,13710
64
+ prompture/drivers/moonshot_driver.py,sha256=DXbm4P9vwdUqP9X7zxL_lvFWohVSD74nDbC9Gev-ofQ,19255
65
+ prompture/drivers/ollama_driver.py,sha256=vwxRHq7n1bWGPjTHMAUZY5wnOWmbERoWKsjXBkpTEdM,15091
66
66
  prompture/drivers/openai_driver.py,sha256=DqdMhxF8M2HdOY5vfsFrz0h23lqBoQlbxV3xUdHvZho,10548
67
67
  prompture/drivers/openrouter_driver.py,sha256=m2I5E9L5YYE_bV8PruKnAwjL63SIFEXevN_ThUzxQaA,13657
68
68
  prompture/drivers/registry.py,sha256=Dg_5w9alnIPKhOnsR9Xspuf5T7roBGu0r_L2Cf-UhXs,9926
@@ -77,9 +77,9 @@ prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-
77
77
  prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
78
78
  prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
79
79
  prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
80
- prompture-0.0.47.dev3.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
81
- prompture-0.0.47.dev3.dist-info/METADATA,sha256=GCknitr30odleYVAoFwVD9zZvMaHqed8z_3omEhs3Vc,12153
82
- prompture-0.0.47.dev3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
83
- prompture-0.0.47.dev3.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
84
- prompture-0.0.47.dev3.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
85
- prompture-0.0.47.dev3.dist-info/RECORD,,
80
+ prompture-0.0.48.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
81
+ prompture-0.0.48.dist-info/METADATA,sha256=ABz_es-nmGOPKgxOtEwQVhMytRkGJG0qN6tKnfvDQfQ,12148
82
+ prompture-0.0.48.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
83
+ prompture-0.0.48.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
84
+ prompture-0.0.48.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
85
+ prompture-0.0.48.dist-info/RECORD,,