prompture 0.0.35__py3-none-any.whl → 0.0.38.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. prompture/__init__.py +120 -2
  2. prompture/_version.py +2 -2
  3. prompture/agent.py +924 -0
  4. prompture/agent_types.py +156 -0
  5. prompture/async_agent.py +880 -0
  6. prompture/async_conversation.py +199 -17
  7. prompture/async_driver.py +24 -0
  8. prompture/async_groups.py +551 -0
  9. prompture/conversation.py +213 -18
  10. prompture/core.py +30 -12
  11. prompture/discovery.py +24 -1
  12. prompture/driver.py +38 -0
  13. prompture/drivers/__init__.py +5 -1
  14. prompture/drivers/async_azure_driver.py +7 -1
  15. prompture/drivers/async_claude_driver.py +7 -1
  16. prompture/drivers/async_google_driver.py +212 -28
  17. prompture/drivers/async_grok_driver.py +7 -1
  18. prompture/drivers/async_groq_driver.py +7 -1
  19. prompture/drivers/async_lmstudio_driver.py +74 -5
  20. prompture/drivers/async_ollama_driver.py +13 -3
  21. prompture/drivers/async_openai_driver.py +7 -1
  22. prompture/drivers/async_openrouter_driver.py +7 -1
  23. prompture/drivers/async_registry.py +5 -1
  24. prompture/drivers/azure_driver.py +7 -1
  25. prompture/drivers/claude_driver.py +7 -1
  26. prompture/drivers/google_driver.py +217 -33
  27. prompture/drivers/grok_driver.py +7 -1
  28. prompture/drivers/groq_driver.py +7 -1
  29. prompture/drivers/lmstudio_driver.py +73 -8
  30. prompture/drivers/ollama_driver.py +16 -5
  31. prompture/drivers/openai_driver.py +7 -1
  32. prompture/drivers/openrouter_driver.py +7 -1
  33. prompture/drivers/vision_helpers.py +153 -0
  34. prompture/group_types.py +147 -0
  35. prompture/groups.py +530 -0
  36. prompture/image.py +180 -0
  37. prompture/persistence.py +254 -0
  38. prompture/persona.py +482 -0
  39. prompture/serialization.py +218 -0
  40. prompture/settings.py +1 -0
  41. prompture-0.0.38.dev2.dist-info/METADATA +369 -0
  42. prompture-0.0.38.dev2.dist-info/RECORD +77 -0
  43. prompture-0.0.35.dist-info/METADATA +0 -464
  44. prompture-0.0.35.dist-info/RECORD +0 -66
  45. {prompture-0.0.35.dist-info → prompture-0.0.38.dev2.dist-info}/WHEEL +0 -0
  46. {prompture-0.0.35.dist-info → prompture-0.0.38.dev2.dist-info}/entry_points.txt +0 -0
  47. {prompture-0.0.35.dist-info → prompture-0.0.38.dev2.dist-info}/licenses/LICENSE +0 -0
  48. {prompture-0.0.35.dist-info → prompture-0.0.38.dev2.dist-info}/top_level.txt +0 -0
@@ -4,9 +4,11 @@ from __future__ import annotations
4
4
 
5
5
  import json
6
6
  import logging
7
+ import uuid
7
8
  from collections.abc import AsyncIterator
8
- from datetime import date, datetime
9
+ from datetime import date, datetime, timezone
9
10
  from decimal import Decimal
11
+ from pathlib import Path
10
12
  from typing import Any, Callable, Literal, Union
11
13
 
12
14
  from pydantic import BaseModel
@@ -15,6 +17,11 @@ from .async_driver import AsyncDriver
15
17
  from .callbacks import DriverCallbacks
16
18
  from .drivers.async_registry import get_async_driver_for_model
17
19
  from .field_definitions import get_registry_snapshot
20
+ from .image import ImageInput, make_image
21
+ from .persistence import load_from_file, save_to_file
22
+ from .persona import Persona, get_persona
23
+ from .serialization import export_conversation, import_conversation
24
+ from .session import UsageSession
18
25
  from .tools import (
19
26
  clean_json_text,
20
27
  convert_value,
@@ -43,13 +50,33 @@ class AsyncConversation:
43
50
  *,
44
51
  driver: AsyncDriver | None = None,
45
52
  system_prompt: str | None = None,
53
+ persona: str | Persona | None = None,
46
54
  options: dict[str, Any] | None = None,
47
55
  callbacks: DriverCallbacks | None = None,
48
56
  tools: ToolRegistry | None = None,
49
57
  max_tool_rounds: int = 10,
58
+ conversation_id: str | None = None,
59
+ auto_save: str | Path | None = None,
60
+ tags: list[str] | None = None,
50
61
  ) -> None:
62
+ if system_prompt is not None and persona is not None:
63
+ raise ValueError("Cannot provide both 'system_prompt' and 'persona'. Use one or the other.")
64
+
65
+ # Resolve persona
66
+ resolved_persona: Persona | None = None
67
+ if persona is not None:
68
+ if isinstance(persona, str):
69
+ resolved_persona = get_persona(persona)
70
+ if resolved_persona is None:
71
+ raise ValueError(f"Persona '{persona}' not found in registry.")
72
+ else:
73
+ resolved_persona = persona
74
+
51
75
  if model_name is None and driver is None:
52
- raise ValueError("Either model_name or driver must be provided")
76
+ if resolved_persona is not None and resolved_persona.model_hint:
77
+ model_name = resolved_persona.model_hint
78
+ else:
79
+ raise ValueError("Either model_name or driver must be provided")
53
80
 
54
81
  if driver is not None:
55
82
  self._driver = driver
@@ -60,8 +87,15 @@ class AsyncConversation:
60
87
  self._driver.callbacks = callbacks
61
88
 
62
89
  self._model_name = model_name or ""
63
- self._system_prompt = system_prompt
64
- self._options = dict(options) if options else {}
90
+
91
+ # Apply persona: render system_prompt and merge settings
92
+ if resolved_persona is not None:
93
+ self._system_prompt = resolved_persona.render()
94
+ self._options = {**resolved_persona.settings, **(dict(options) if options else {})}
95
+ else:
96
+ self._system_prompt = system_prompt
97
+ self._options = dict(options) if options else {}
98
+
65
99
  self._messages: list[dict[str, Any]] = []
66
100
  self._usage = {
67
101
  "prompt_tokens": 0,
@@ -73,6 +107,14 @@ class AsyncConversation:
73
107
  self._tools = tools or ToolRegistry()
74
108
  self._max_tool_rounds = max_tool_rounds
75
109
 
110
+ # Persistence
111
+ self._conversation_id = conversation_id or str(uuid.uuid4())
112
+ self._auto_save = Path(auto_save) if auto_save else None
113
+ self._metadata: dict[str, Any] = {
114
+ "created_at": datetime.now(timezone.utc).isoformat(),
115
+ "tags": list(tags) if tags else [],
116
+ }
117
+
76
118
  # ------------------------------------------------------------------
77
119
  # Public helpers
78
120
  # ------------------------------------------------------------------
@@ -91,11 +133,12 @@ class AsyncConversation:
91
133
  """Reset message history (keeps system_prompt and driver)."""
92
134
  self._messages.clear()
93
135
 
94
- def add_context(self, role: str, content: str) -> None:
136
+ def add_context(self, role: str, content: str, images: list[ImageInput] | None = None) -> None:
95
137
  """Seed the history with a user or assistant message."""
96
138
  if role not in ("user", "assistant"):
97
139
  raise ValueError("role must be 'user' or 'assistant'")
98
- self._messages.append({"role": role, "content": content})
140
+ msg_content = self._build_content_with_images(content, images)
141
+ self._messages.append({"role": role, "content": msg_content})
99
142
 
100
143
  def register_tool(
101
144
  self,
@@ -112,17 +155,145 @@ class AsyncConversation:
112
155
  u = self._usage
113
156
  return f"Conversation: {u['total_tokens']:,} tokens across {u['turns']} turn(s) costing ${u['cost']:.4f}"
114
157
 
158
+ # ------------------------------------------------------------------
159
+ # Persistence properties
160
+ # ------------------------------------------------------------------
161
+
162
+ @property
163
+ def conversation_id(self) -> str:
164
+ """Unique identifier for this conversation."""
165
+ return self._conversation_id
166
+
167
+ @property
168
+ def tags(self) -> list[str]:
169
+ """Tags attached to this conversation."""
170
+ return self._metadata.get("tags", [])
171
+
172
+ @tags.setter
173
+ def tags(self, value: list[str]) -> None:
174
+ self._metadata["tags"] = list(value)
175
+
176
+ # ------------------------------------------------------------------
177
+ # Export / Import
178
+ # ------------------------------------------------------------------
179
+
180
+ def export(self, *, usage_session: UsageSession | None = None, strip_images: bool = False) -> dict[str, Any]:
181
+ """Export conversation state to a JSON-serializable dict."""
182
+ tools_metadata = (
183
+ [
184
+ {"name": td.name, "description": td.description, "parameters": td.parameters}
185
+ for td in self._tools.definitions
186
+ ]
187
+ if self._tools and self._tools.definitions
188
+ else None
189
+ )
190
+ return export_conversation(
191
+ model_name=self._model_name,
192
+ system_prompt=self._system_prompt,
193
+ options=self._options,
194
+ messages=self._messages,
195
+ usage=self._usage,
196
+ max_tool_rounds=self._max_tool_rounds,
197
+ tools_metadata=tools_metadata,
198
+ usage_session=usage_session,
199
+ metadata=self._metadata,
200
+ conversation_id=self._conversation_id,
201
+ strip_images=strip_images,
202
+ )
203
+
204
+ @classmethod
205
+ def from_export(
206
+ cls,
207
+ data: dict[str, Any],
208
+ *,
209
+ callbacks: DriverCallbacks | None = None,
210
+ tools: ToolRegistry | None = None,
211
+ ) -> AsyncConversation:
212
+ """Reconstruct an :class:`AsyncConversation` from an export dict.
213
+
214
+ The driver is reconstructed from the stored ``model_name`` using
215
+ :func:`get_async_driver_for_model`. Callbacks and tool functions
216
+ must be re-attached by the caller.
217
+ """
218
+ imported = import_conversation(data)
219
+
220
+ model_name = imported.get("model_name") or ""
221
+ if not model_name:
222
+ raise ValueError("Cannot restore conversation: export has no model_name")
223
+ conv = cls(
224
+ model_name=model_name,
225
+ system_prompt=imported.get("system_prompt"),
226
+ options=imported.get("options", {}),
227
+ callbacks=callbacks,
228
+ tools=tools,
229
+ max_tool_rounds=imported.get("max_tool_rounds", 10),
230
+ conversation_id=imported.get("conversation_id"),
231
+ tags=imported.get("metadata", {}).get("tags", []),
232
+ )
233
+ conv._messages = imported.get("messages", [])
234
+ conv._usage = imported.get(
235
+ "usage",
236
+ {
237
+ "prompt_tokens": 0,
238
+ "completion_tokens": 0,
239
+ "total_tokens": 0,
240
+ "cost": 0.0,
241
+ "turns": 0,
242
+ },
243
+ )
244
+ meta = imported.get("metadata", {})
245
+ if "created_at" in meta:
246
+ conv._metadata["created_at"] = meta["created_at"]
247
+ return conv
248
+
249
+ def save(self, path: str | Path, **kwargs: Any) -> None:
250
+ """Export and write to a JSON file."""
251
+ save_to_file(self.export(**kwargs), path)
252
+
253
+ @classmethod
254
+ def load(
255
+ cls,
256
+ path: str | Path,
257
+ *,
258
+ callbacks: DriverCallbacks | None = None,
259
+ tools: ToolRegistry | None = None,
260
+ ) -> AsyncConversation:
261
+ """Load a conversation from a JSON file."""
262
+ data = load_from_file(path)
263
+ return cls.from_export(data, callbacks=callbacks, tools=tools)
264
+
265
+ def _maybe_auto_save(self) -> None:
266
+ """Auto-save after each turn if configured."""
267
+ if self._auto_save is None:
268
+ return
269
+ try:
270
+ self.save(self._auto_save)
271
+ except Exception:
272
+ logger.debug("Auto-save failed for conversation %s", self._conversation_id, exc_info=True)
273
+
115
274
  # ------------------------------------------------------------------
116
275
  # Core methods
117
276
  # ------------------------------------------------------------------
118
277
 
119
- def _build_messages(self, user_content: str) -> list[dict[str, Any]]:
278
+ @staticmethod
279
+ def _build_content_with_images(text: str, images: list[ImageInput] | None = None) -> str | list[dict[str, Any]]:
280
+ """Return plain string when no images, or a list of content blocks."""
281
+ if not images:
282
+ return text
283
+ blocks: list[dict[str, Any]] = [{"type": "text", "text": text}]
284
+ for img in images:
285
+ ic = make_image(img)
286
+ blocks.append({"type": "image", "source": ic})
287
+ return blocks
288
+
289
+ def _build_messages(self, user_content: str, images: list[ImageInput] | None = None) -> list[dict[str, Any]]:
120
290
  """Build the full messages array for an API call."""
121
291
  msgs: list[dict[str, Any]] = []
122
292
  if self._system_prompt:
123
293
  msgs.append({"role": "system", "content": self._system_prompt})
124
294
  msgs.extend(self._messages)
125
- msgs.append({"role": "user", "content": user_content})
295
+ content = self._build_content_with_images(user_content, images)
296
+ msgs.append({"role": "user", "content": content})
126
297
  return msgs
127
298
 
128
299
  def _accumulate_usage(self, meta: dict[str, Any]) -> None:
@@ -131,11 +302,13 @@ class AsyncConversation:
131
302
  self._usage["total_tokens"] += meta.get("total_tokens", 0)
132
303
  self._usage["cost"] += meta.get("cost", 0.0)
133
304
  self._usage["turns"] += 1
305
+ self._maybe_auto_save()
134
306
 
135
307
  async def ask(
136
308
  self,
137
309
  content: str,
138
310
  options: dict[str, Any] | None = None,
311
+ images: list[ImageInput] | None = None,
139
312
  ) -> str:
140
313
  """Send a message and get a raw text response (async).
141
314
 
@@ -143,16 +316,17 @@ class AsyncConversation:
143
316
  dispatches to the async tool execution loop.
144
317
  """
145
318
  if self._tools and getattr(self._driver, "supports_tool_use", False):
146
- return await self._ask_with_tools(content, options)
319
+ return await self._ask_with_tools(content, options, images=images)
147
320
 
148
321
  merged = {**self._options, **(options or {})}
149
- messages = self._build_messages(content)
322
+ messages = self._build_messages(content, images=images)
150
323
  resp = await self._driver.generate_messages_with_hooks(messages, merged)
151
324
 
152
325
  text = resp.get("text", "")
153
326
  meta = resp.get("meta", {})
154
327
 
155
- self._messages.append({"role": "user", "content": content})
328
+ user_content = self._build_content_with_images(content, images)
329
+ self._messages.append({"role": "user", "content": user_content})
156
330
  self._messages.append({"role": "assistant", "content": text})
157
331
  self._accumulate_usage(meta)
158
332
 
@@ -162,12 +336,14 @@ class AsyncConversation:
162
336
  self,
163
337
  content: str,
164
338
  options: dict[str, Any] | None = None,
339
+ images: list[ImageInput] | None = None,
165
340
  ) -> str:
166
341
  """Async tool-use loop: send -> check tool_calls -> execute -> re-send."""
167
342
  merged = {**self._options, **(options or {})}
168
343
  tool_defs = self._tools.to_openai_format()
169
344
 
170
- self._messages.append({"role": "user", "content": content})
345
+ user_content = self._build_content_with_images(content, images)
346
+ self._messages.append({"role": "user", "content": user_content})
171
347
  msgs = self._build_messages_raw()
172
348
 
173
349
  for _round in range(self._max_tool_rounds):
@@ -228,6 +404,7 @@ class AsyncConversation:
228
404
  self,
229
405
  content: str,
230
406
  options: dict[str, Any] | None = None,
407
+ images: list[ImageInput] | None = None,
231
408
  ) -> AsyncIterator[str]:
232
409
  """Send a message and yield text chunks as they arrive (async).
233
410
 
@@ -235,13 +412,14 @@ class AsyncConversation:
235
412
  support streaming.
236
413
  """
237
414
  if not getattr(self._driver, "supports_streaming", False):
238
- yield await self.ask(content, options)
415
+ yield await self.ask(content, options, images=images)
239
416
  return
240
417
 
241
418
  merged = {**self._options, **(options or {})}
242
- messages = self._build_messages(content)
419
+ messages = self._build_messages(content, images=images)
243
420
 
244
- self._messages.append({"role": "user", "content": content})
421
+ user_content = self._build_content_with_images(content, images)
422
+ self._messages.append({"role": "user", "content": user_content})
245
423
 
246
424
  full_text = ""
247
425
  async for chunk in self._driver.generate_messages_stream(messages, merged):
@@ -267,6 +445,7 @@ class AsyncConversation:
267
445
  options: dict[str, Any] | None = None,
268
446
  output_format: Literal["json", "toon"] = "json",
269
447
  json_mode: Literal["auto", "on", "off"] = "auto",
448
+ images: list[ImageInput] | None = None,
270
449
  ) -> dict[str, Any]:
271
450
  """Send a message with schema enforcement and get structured JSON back (async)."""
272
451
  merged = {**self._options, **(options or {})}
@@ -301,13 +480,14 @@ class AsyncConversation:
301
480
 
302
481
  full_user_content = f"{content}\n\n{instruct}"
303
482
 
304
- messages = self._build_messages(full_user_content)
483
+ messages = self._build_messages(full_user_content, images=images)
305
484
  resp = await self._driver.generate_messages_with_hooks(messages, merged)
306
485
 
307
486
  text = resp.get("text", "")
308
487
  meta = resp.get("meta", {})
309
488
 
310
- self._messages.append({"role": "user", "content": content})
489
+ user_content = self._build_content_with_images(content, images)
490
+ self._messages.append({"role": "user", "content": user_content})
311
491
 
312
492
  cleaned = clean_json_text(text)
313
493
  try:
@@ -361,6 +541,7 @@ class AsyncConversation:
361
541
  output_format: Literal["json", "toon"] = "json",
362
542
  options: dict[str, Any] | None = None,
363
543
  json_mode: Literal["auto", "on", "off"] = "auto",
544
+ images: list[ImageInput] | None = None,
364
545
  ) -> dict[str, Any]:
365
546
  """Extract structured information into a Pydantic model with conversation context (async)."""
366
547
  from .core import normalize_field_value
@@ -375,6 +556,7 @@ class AsyncConversation:
375
556
  options=options,
376
557
  output_format=output_format,
377
558
  json_mode=json_mode,
559
+ images=images,
378
560
  )
379
561
 
380
562
  json_object = result["json_object"]
prompture/async_driver.py CHANGED
@@ -35,6 +35,7 @@ class AsyncDriver:
35
35
  supports_messages: bool = False
36
36
  supports_tool_use: bool = False
37
37
  supports_streaming: bool = False
38
+ supports_vision: bool = False
38
39
 
39
40
  callbacks: DriverCallbacks | None = None
40
41
 
@@ -165,5 +166,28 @@ class AsyncDriver:
165
166
  except Exception:
166
167
  logger.exception("Callback %s raised an exception", event)
167
168
 
169
+ def _check_vision_support(self, messages: list[dict[str, Any]]) -> None:
170
+ """Raise if messages contain image blocks and the driver lacks vision support."""
171
+ if self.supports_vision:
172
+ return
173
+ for msg in messages:
174
+ content = msg.get("content")
175
+ if isinstance(content, list):
176
+ for block in content:
177
+ if isinstance(block, dict) and block.get("type") == "image":
178
+ raise NotImplementedError(
179
+ f"{self.__class__.__name__} does not support vision/image inputs. "
180
+ "Use a vision-capable model."
181
+ )
182
+
183
+ def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
184
+ """Transform universal message format into provider-specific wire format.
185
+
186
+ Vision-capable async drivers override this to convert the universal
187
+ image blocks into their provider-specific format.
188
+ """
189
+ self._check_vision_support(messages)
190
+ return messages
191
+
168
192
  # Re-export the static helper for convenience
169
193
  _flatten_messages = staticmethod(Driver._flatten_messages)