prompture 0.0.29.dev8__py3-none-any.whl → 0.0.38.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. prompture/__init__.py +264 -23
  2. prompture/_version.py +34 -0
  3. prompture/agent.py +924 -0
  4. prompture/agent_types.py +156 -0
  5. prompture/aio/__init__.py +74 -0
  6. prompture/async_agent.py +880 -0
  7. prompture/async_conversation.py +789 -0
  8. prompture/async_core.py +803 -0
  9. prompture/async_driver.py +193 -0
  10. prompture/async_groups.py +551 -0
  11. prompture/cache.py +469 -0
  12. prompture/callbacks.py +55 -0
  13. prompture/cli.py +63 -4
  14. prompture/conversation.py +826 -0
  15. prompture/core.py +894 -263
  16. prompture/cost_mixin.py +51 -0
  17. prompture/discovery.py +187 -0
  18. prompture/driver.py +206 -5
  19. prompture/drivers/__init__.py +175 -67
  20. prompture/drivers/airllm_driver.py +109 -0
  21. prompture/drivers/async_airllm_driver.py +26 -0
  22. prompture/drivers/async_azure_driver.py +123 -0
  23. prompture/drivers/async_claude_driver.py +113 -0
  24. prompture/drivers/async_google_driver.py +316 -0
  25. prompture/drivers/async_grok_driver.py +97 -0
  26. prompture/drivers/async_groq_driver.py +90 -0
  27. prompture/drivers/async_hugging_driver.py +61 -0
  28. prompture/drivers/async_lmstudio_driver.py +148 -0
  29. prompture/drivers/async_local_http_driver.py +44 -0
  30. prompture/drivers/async_ollama_driver.py +135 -0
  31. prompture/drivers/async_openai_driver.py +102 -0
  32. prompture/drivers/async_openrouter_driver.py +102 -0
  33. prompture/drivers/async_registry.py +133 -0
  34. prompture/drivers/azure_driver.py +42 -9
  35. prompture/drivers/claude_driver.py +257 -34
  36. prompture/drivers/google_driver.py +295 -42
  37. prompture/drivers/grok_driver.py +35 -32
  38. prompture/drivers/groq_driver.py +33 -26
  39. prompture/drivers/hugging_driver.py +6 -6
  40. prompture/drivers/lmstudio_driver.py +97 -19
  41. prompture/drivers/local_http_driver.py +6 -6
  42. prompture/drivers/ollama_driver.py +168 -23
  43. prompture/drivers/openai_driver.py +184 -9
  44. prompture/drivers/openrouter_driver.py +37 -25
  45. prompture/drivers/registry.py +306 -0
  46. prompture/drivers/vision_helpers.py +153 -0
  47. prompture/field_definitions.py +106 -96
  48. prompture/group_types.py +147 -0
  49. prompture/groups.py +530 -0
  50. prompture/image.py +180 -0
  51. prompture/logging.py +80 -0
  52. prompture/model_rates.py +217 -0
  53. prompture/persistence.py +254 -0
  54. prompture/persona.py +482 -0
  55. prompture/runner.py +49 -47
  56. prompture/scaffold/__init__.py +1 -0
  57. prompture/scaffold/generator.py +84 -0
  58. prompture/scaffold/templates/Dockerfile.j2 +12 -0
  59. prompture/scaffold/templates/README.md.j2 +41 -0
  60. prompture/scaffold/templates/config.py.j2 +21 -0
  61. prompture/scaffold/templates/env.example.j2 +8 -0
  62. prompture/scaffold/templates/main.py.j2 +86 -0
  63. prompture/scaffold/templates/models.py.j2 +40 -0
  64. prompture/scaffold/templates/requirements.txt.j2 +5 -0
  65. prompture/serialization.py +218 -0
  66. prompture/server.py +183 -0
  67. prompture/session.py +117 -0
  68. prompture/settings.py +19 -1
  69. prompture/tools.py +219 -267
  70. prompture/tools_schema.py +254 -0
  71. prompture/validator.py +3 -3
  72. prompture-0.0.38.dev2.dist-info/METADATA +369 -0
  73. prompture-0.0.38.dev2.dist-info/RECORD +77 -0
  74. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/WHEEL +1 -1
  75. prompture-0.0.29.dev8.dist-info/METADATA +0 -368
  76. prompture-0.0.29.dev8.dist-info/RECORD +0 -27
  77. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/entry_points.txt +0 -0
  78. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/licenses/LICENSE +0 -0
  79. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,193 @@
1
+ """Async driver base class for LLM adapters."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ import time
7
+ from collections.abc import AsyncIterator
8
+ from typing import Any
9
+
10
+ from .callbacks import DriverCallbacks
11
+ from .driver import Driver
12
+
13
+ logger = logging.getLogger("prompture.async_driver")
14
+
15
+
16
+ class AsyncDriver:
17
+ """Async adapter base. Implement ``async generate(prompt, options)``
18
+ returning ``{"text": ..., "meta": {...}}``.
19
+
20
+ The ``meta`` dict follows the same contract as :class:`Driver`:
21
+
22
+ .. code-block:: python
23
+
24
+ {
25
+ "prompt_tokens": int,
26
+ "completion_tokens": int,
27
+ "total_tokens": int,
28
+ "cost": float,
29
+ "raw_response": dict,
30
+ }
31
+ """
32
+
33
+ supports_json_mode: bool = False
34
+ supports_json_schema: bool = False
35
+ supports_messages: bool = False
36
+ supports_tool_use: bool = False
37
+ supports_streaming: bool = False
38
+ supports_vision: bool = False
39
+
40
+ callbacks: DriverCallbacks | None = None
41
+
42
+ async def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
43
+ raise NotImplementedError
44
+
45
+ async def generate_messages(self, messages: list[dict[str, Any]], options: dict[str, Any]) -> dict[str, Any]:
46
+ """Generate a response from a list of conversation messages (async).
47
+
48
+ Default implementation flattens the messages into a single prompt
49
+ and delegates to :meth:`generate`. Drivers that natively support
50
+ message arrays should override this and set
51
+ ``supports_messages = True``.
52
+ """
53
+ prompt = Driver._flatten_messages(messages)
54
+ return await self.generate(prompt, options)
55
+
56
+ # ------------------------------------------------------------------
57
+ # Tool use
58
+ # ------------------------------------------------------------------
59
+
60
+ async def generate_messages_with_tools(
61
+ self,
62
+ messages: list[dict[str, Any]],
63
+ tools: list[dict[str, Any]],
64
+ options: dict[str, Any],
65
+ ) -> dict[str, Any]:
66
+ """Generate a response that may include tool calls (async).
67
+
68
+ Returns a dict with keys: ``text``, ``meta``, ``tool_calls``, ``stop_reason``.
69
+ """
70
+ raise NotImplementedError(f"{self.__class__.__name__} does not support tool use")
71
+
72
+ # ------------------------------------------------------------------
73
+ # Streaming
74
+ # ------------------------------------------------------------------
75
+
76
+ async def generate_messages_stream(
77
+ self,
78
+ messages: list[dict[str, Any]],
79
+ options: dict[str, Any],
80
+ ) -> AsyncIterator[dict[str, Any]]:
81
+ """Yield response chunks incrementally (async).
82
+
83
+ Each chunk is a dict:
84
+ - ``{"type": "delta", "text": str}``
85
+ - ``{"type": "done", "text": str, "meta": dict}``
86
+ """
87
+ raise NotImplementedError(f"{self.__class__.__name__} does not support streaming")
88
+ # yield is needed to make this an async generator
89
+ yield # pragma: no cover
90
+
91
+ # ------------------------------------------------------------------
92
+ # Hook-aware wrappers
93
+ # ------------------------------------------------------------------
94
+
95
+ async def generate_with_hooks(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
96
+ """Wrap :meth:`generate` with on_request / on_response / on_error callbacks."""
97
+ driver_name = getattr(self, "model", self.__class__.__name__)
98
+ self._fire_callback(
99
+ "on_request",
100
+ {"prompt": prompt, "messages": None, "options": options, "driver": driver_name},
101
+ )
102
+ t0 = time.perf_counter()
103
+ try:
104
+ resp = await self.generate(prompt, options)
105
+ except Exception as exc:
106
+ self._fire_callback(
107
+ "on_error",
108
+ {"error": exc, "prompt": prompt, "messages": None, "options": options, "driver": driver_name},
109
+ )
110
+ raise
111
+ elapsed_ms = (time.perf_counter() - t0) * 1000
112
+ self._fire_callback(
113
+ "on_response",
114
+ {
115
+ "text": resp.get("text", ""),
116
+ "meta": resp.get("meta", {}),
117
+ "driver": driver_name,
118
+ "elapsed_ms": elapsed_ms,
119
+ },
120
+ )
121
+ return resp
122
+
123
+ async def generate_messages_with_hooks(
124
+ self, messages: list[dict[str, Any]], options: dict[str, Any]
125
+ ) -> dict[str, Any]:
126
+ """Wrap :meth:`generate_messages` with callbacks."""
127
+ driver_name = getattr(self, "model", self.__class__.__name__)
128
+ self._fire_callback(
129
+ "on_request",
130
+ {"prompt": None, "messages": messages, "options": options, "driver": driver_name},
131
+ )
132
+ t0 = time.perf_counter()
133
+ try:
134
+ resp = await self.generate_messages(messages, options)
135
+ except Exception as exc:
136
+ self._fire_callback(
137
+ "on_error",
138
+ {"error": exc, "prompt": None, "messages": messages, "options": options, "driver": driver_name},
139
+ )
140
+ raise
141
+ elapsed_ms = (time.perf_counter() - t0) * 1000
142
+ self._fire_callback(
143
+ "on_response",
144
+ {
145
+ "text": resp.get("text", ""),
146
+ "meta": resp.get("meta", {}),
147
+ "driver": driver_name,
148
+ "elapsed_ms": elapsed_ms,
149
+ },
150
+ )
151
+ return resp
152
+
153
+ # ------------------------------------------------------------------
154
+ # Internal helpers
155
+ # ------------------------------------------------------------------
156
+
157
+ def _fire_callback(self, event: str, payload: dict[str, Any]) -> None:
158
+ """Invoke a single callback, swallowing and logging any exception."""
159
+ if self.callbacks is None:
160
+ return
161
+ cb = getattr(self.callbacks, event, None)
162
+ if cb is None:
163
+ return
164
+ try:
165
+ cb(payload)
166
+ except Exception:
167
+ logger.exception("Callback %s raised an exception", event)
168
+
169
+ def _check_vision_support(self, messages: list[dict[str, Any]]) -> None:
170
+ """Raise if messages contain image blocks and the driver lacks vision support."""
171
+ if self.supports_vision:
172
+ return
173
+ for msg in messages:
174
+ content = msg.get("content")
175
+ if isinstance(content, list):
176
+ for block in content:
177
+ if isinstance(block, dict) and block.get("type") == "image":
178
+ raise NotImplementedError(
179
+ f"{self.__class__.__name__} does not support vision/image inputs. "
180
+ "Use a vision-capable model."
181
+ )
182
+
183
+ def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
184
+ """Transform universal message format into provider-specific wire format.
185
+
186
+ Vision-capable async drivers override this to convert the universal
187
+ image blocks into their provider-specific format.
188
+ """
189
+ self._check_vision_support(messages)
190
+ return messages
191
+
192
+ # Re-export the static helper for convenience
193
+ _flatten_messages = staticmethod(Driver._flatten_messages)