prompture 0.0.38.dev2__py3-none-any.whl → 0.0.38.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prompture/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.0.38.dev2'
32
- __version_tuple__ = version_tuple = (0, 0, 38, 'dev2')
31
+ __version__ = version = '0.0.38.dev3'
32
+ __version_tuple__ = version_tuple = (0, 0, 38, 'dev3')
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -113,7 +113,7 @@ class AsyncAzureDriver(CostMixin, AsyncDriver):
113
113
  "prompt_tokens": prompt_tokens,
114
114
  "completion_tokens": completion_tokens,
115
115
  "total_tokens": total_tokens,
116
- "cost": total_cost,
116
+ "cost": round(total_cost, 6),
117
117
  "raw_response": resp.model_dump(),
118
118
  "model_name": model,
119
119
  "deployment_id": self.deployment_id,
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import json
6
6
  import os
7
+ from collections.abc import AsyncIterator
7
8
  from typing import Any
8
9
 
9
10
  try:
@@ -19,6 +20,8 @@ from .claude_driver import ClaudeDriver
19
20
  class AsyncClaudeDriver(CostMixin, AsyncDriver):
20
21
  supports_json_mode = True
21
22
  supports_json_schema = True
23
+ supports_tool_use = True
24
+ supports_streaming = True
22
25
  supports_vision = True
23
26
 
24
27
  MODEL_PRICING = ClaudeDriver.MODEL_PRICING
@@ -51,13 +54,7 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
51
54
  client = anthropic.AsyncAnthropic(api_key=self.api_key)
52
55
 
53
56
  # Anthropic requires system messages as a top-level parameter
54
- system_content = None
55
- api_messages = []
56
- for msg in messages:
57
- if msg.get("role") == "system":
58
- system_content = msg.get("content", "")
59
- else:
60
- api_messages.append(msg)
57
+ system_content, api_messages = self._extract_system_and_messages(messages)
61
58
 
62
59
  # Build common kwargs
63
60
  common_kwargs: dict[str, Any] = {
@@ -105,9 +102,171 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
105
102
  "prompt_tokens": prompt_tokens,
106
103
  "completion_tokens": completion_tokens,
107
104
  "total_tokens": total_tokens,
108
- "cost": total_cost,
105
+ "cost": round(total_cost, 6),
109
106
  "raw_response": dict(resp),
110
107
  "model_name": model,
111
108
  }
112
109
 
113
110
  return {"text": text, "meta": meta}
111
+
112
+ # ------------------------------------------------------------------
113
+ # Helpers
114
+ # ------------------------------------------------------------------
115
+
116
+ def _extract_system_and_messages(
117
+ self, messages: list[dict[str, Any]]
118
+ ) -> tuple[str | None, list[dict[str, Any]]]:
119
+ """Separate system message from conversation messages for Anthropic API."""
120
+ system_content = None
121
+ api_messages: list[dict[str, Any]] = []
122
+ for msg in messages:
123
+ if msg.get("role") == "system":
124
+ system_content = msg.get("content", "")
125
+ else:
126
+ api_messages.append(msg)
127
+ return system_content, api_messages
128
+
129
+ # ------------------------------------------------------------------
130
+ # Tool use
131
+ # ------------------------------------------------------------------
132
+
133
+ async def generate_messages_with_tools(
134
+ self,
135
+ messages: list[dict[str, Any]],
136
+ tools: list[dict[str, Any]],
137
+ options: dict[str, Any],
138
+ ) -> dict[str, Any]:
139
+ """Generate a response that may include tool calls (Anthropic)."""
140
+ if anthropic is None:
141
+ raise RuntimeError("anthropic package not installed")
142
+
143
+ opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
144
+ model = options.get("model", self.model)
145
+ client = anthropic.AsyncAnthropic(api_key=self.api_key)
146
+
147
+ system_content, api_messages = self._extract_system_and_messages(messages)
148
+
149
+ # Convert tools from OpenAI format to Anthropic format if needed
150
+ anthropic_tools = []
151
+ for t in tools:
152
+ if "type" in t and t["type"] == "function":
153
+ # OpenAI format -> Anthropic format
154
+ fn = t["function"]
155
+ anthropic_tools.append({
156
+ "name": fn["name"],
157
+ "description": fn.get("description", ""),
158
+ "input_schema": fn.get("parameters", {"type": "object", "properties": {}}),
159
+ })
160
+ elif "input_schema" in t:
161
+ # Already Anthropic format
162
+ anthropic_tools.append(t)
163
+ else:
164
+ anthropic_tools.append(t)
165
+
166
+ kwargs: dict[str, Any] = {
167
+ "model": model,
168
+ "messages": api_messages,
169
+ "temperature": opts["temperature"],
170
+ "max_tokens": opts["max_tokens"],
171
+ "tools": anthropic_tools,
172
+ }
173
+ if system_content:
174
+ kwargs["system"] = system_content
175
+
176
+ resp = await client.messages.create(**kwargs)
177
+
178
+ prompt_tokens = resp.usage.input_tokens
179
+ completion_tokens = resp.usage.output_tokens
180
+ total_tokens = prompt_tokens + completion_tokens
181
+ total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
182
+
183
+ meta = {
184
+ "prompt_tokens": prompt_tokens,
185
+ "completion_tokens": completion_tokens,
186
+ "total_tokens": total_tokens,
187
+ "cost": round(total_cost, 6),
188
+ "raw_response": dict(resp),
189
+ "model_name": model,
190
+ }
191
+
192
+ text = ""
193
+ tool_calls_out: list[dict[str, Any]] = []
194
+ for block in resp.content:
195
+ if block.type == "text":
196
+ text += block.text
197
+ elif block.type == "tool_use":
198
+ tool_calls_out.append({
199
+ "id": block.id,
200
+ "name": block.name,
201
+ "arguments": block.input,
202
+ })
203
+
204
+ return {
205
+ "text": text,
206
+ "meta": meta,
207
+ "tool_calls": tool_calls_out,
208
+ "stop_reason": resp.stop_reason,
209
+ }
210
+
211
+ # ------------------------------------------------------------------
212
+ # Streaming
213
+ # ------------------------------------------------------------------
214
+
215
+ async def generate_messages_stream(
216
+ self,
217
+ messages: list[dict[str, Any]],
218
+ options: dict[str, Any],
219
+ ) -> AsyncIterator[dict[str, Any]]:
220
+ """Yield response chunks via Anthropic streaming API."""
221
+ if anthropic is None:
222
+ raise RuntimeError("anthropic package not installed")
223
+
224
+ opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
225
+ model = options.get("model", self.model)
226
+ client = anthropic.AsyncAnthropic(api_key=self.api_key)
227
+
228
+ system_content, api_messages = self._extract_system_and_messages(messages)
229
+
230
+ kwargs: dict[str, Any] = {
231
+ "model": model,
232
+ "messages": api_messages,
233
+ "temperature": opts["temperature"],
234
+ "max_tokens": opts["max_tokens"],
235
+ }
236
+ if system_content:
237
+ kwargs["system"] = system_content
238
+
239
+ full_text = ""
240
+ prompt_tokens = 0
241
+ completion_tokens = 0
242
+
243
+ async with client.messages.stream(**kwargs) as stream:
244
+ async for event in stream:
245
+ if hasattr(event, "type"):
246
+ if event.type == "content_block_delta" and hasattr(event, "delta"):
247
+ delta_text = getattr(event.delta, "text", "")
248
+ if delta_text:
249
+ full_text += delta_text
250
+ yield {"type": "delta", "text": delta_text}
251
+ elif event.type == "message_delta" and hasattr(event, "usage"):
252
+ completion_tokens = getattr(event.usage, "output_tokens", 0)
253
+ elif event.type == "message_start" and hasattr(event, "message"):
254
+ usage = getattr(event.message, "usage", None)
255
+ if usage:
256
+ prompt_tokens = getattr(usage, "input_tokens", 0)
257
+
258
+ total_tokens = prompt_tokens + completion_tokens
259
+ total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
260
+
261
+ yield {
262
+ "type": "done",
263
+ "text": full_text,
264
+ "meta": {
265
+ "prompt_tokens": prompt_tokens,
266
+ "completion_tokens": completion_tokens,
267
+ "total_tokens": total_tokens,
268
+ "cost": round(total_cost, 6),
269
+ "raw_response": {},
270
+ "model_name": model,
271
+ },
272
+ }
@@ -88,7 +88,7 @@ class AsyncGrokDriver(CostMixin, AsyncDriver):
88
88
  "prompt_tokens": prompt_tokens,
89
89
  "completion_tokens": completion_tokens,
90
90
  "total_tokens": total_tokens,
91
- "cost": total_cost,
91
+ "cost": round(total_cost, 6),
92
92
  "raw_response": resp,
93
93
  "model_name": model,
94
94
  }
@@ -81,7 +81,7 @@ class AsyncGroqDriver(CostMixin, AsyncDriver):
81
81
  "prompt_tokens": prompt_tokens,
82
82
  "completion_tokens": completion_tokens,
83
83
  "total_tokens": total_tokens,
84
- "cost": total_cost,
84
+ "cost": round(total_cost, 6),
85
85
  "raw_response": resp.model_dump(),
86
86
  "model_name": model,
87
87
  }
@@ -2,7 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import json
5
6
  import os
7
+ from collections.abc import AsyncIterator
6
8
  from typing import Any
7
9
 
8
10
  try:
@@ -18,6 +20,8 @@ from .openai_driver import OpenAIDriver
18
20
  class AsyncOpenAIDriver(CostMixin, AsyncDriver):
19
21
  supports_json_mode = True
20
22
  supports_json_schema = True
23
+ supports_tool_use = True
24
+ supports_streaming = True
21
25
  supports_vision = True
22
26
 
23
27
  MODEL_PRICING = OpenAIDriver.MODEL_PRICING
@@ -93,10 +97,148 @@ class AsyncOpenAIDriver(CostMixin, AsyncDriver):
93
97
  "prompt_tokens": prompt_tokens,
94
98
  "completion_tokens": completion_tokens,
95
99
  "total_tokens": total_tokens,
96
- "cost": total_cost,
100
+ "cost": round(total_cost, 6),
97
101
  "raw_response": resp.model_dump(),
98
102
  "model_name": model,
99
103
  }
100
104
 
101
105
  text = resp.choices[0].message.content
102
106
  return {"text": text, "meta": meta}
107
+
108
+ # ------------------------------------------------------------------
109
+ # Tool use
110
+ # ------------------------------------------------------------------
111
+
112
+ async def generate_messages_with_tools(
113
+ self,
114
+ messages: list[dict[str, Any]],
115
+ tools: list[dict[str, Any]],
116
+ options: dict[str, Any],
117
+ ) -> dict[str, Any]:
118
+ """Generate a response that may include tool calls."""
119
+ if self.client is None:
120
+ raise RuntimeError("openai package (>=1.0.0) is not installed")
121
+
122
+ model = options.get("model", self.model)
123
+ model_info = self.MODEL_PRICING.get(model, {})
124
+ tokens_param = model_info.get("tokens_param", "max_tokens")
125
+ supports_temperature = model_info.get("supports_temperature", True)
126
+
127
+ opts = {"temperature": 1.0, "max_tokens": 512, **options}
128
+
129
+ kwargs: dict[str, Any] = {
130
+ "model": model,
131
+ "messages": messages,
132
+ "tools": tools,
133
+ }
134
+ kwargs[tokens_param] = opts.get("max_tokens", 512)
135
+
136
+ if supports_temperature and "temperature" in opts:
137
+ kwargs["temperature"] = opts["temperature"]
138
+
139
+ resp = await self.client.chat.completions.create(**kwargs)
140
+
141
+ usage = getattr(resp, "usage", None)
142
+ prompt_tokens = getattr(usage, "prompt_tokens", 0)
143
+ completion_tokens = getattr(usage, "completion_tokens", 0)
144
+ total_tokens = getattr(usage, "total_tokens", 0)
145
+ total_cost = self._calculate_cost("openai", model, prompt_tokens, completion_tokens)
146
+
147
+ meta = {
148
+ "prompt_tokens": prompt_tokens,
149
+ "completion_tokens": completion_tokens,
150
+ "total_tokens": total_tokens,
151
+ "cost": round(total_cost, 6),
152
+ "raw_response": resp.model_dump(),
153
+ "model_name": model,
154
+ }
155
+
156
+ choice = resp.choices[0]
157
+ text = choice.message.content or ""
158
+ stop_reason = choice.finish_reason
159
+
160
+ tool_calls_out: list[dict[str, Any]] = []
161
+ if choice.message.tool_calls:
162
+ for tc in choice.message.tool_calls:
163
+ try:
164
+ args = json.loads(tc.function.arguments)
165
+ except (json.JSONDecodeError, TypeError):
166
+ args = {}
167
+ tool_calls_out.append({
168
+ "id": tc.id,
169
+ "name": tc.function.name,
170
+ "arguments": args,
171
+ })
172
+
173
+ return {
174
+ "text": text,
175
+ "meta": meta,
176
+ "tool_calls": tool_calls_out,
177
+ "stop_reason": stop_reason,
178
+ }
179
+
180
+ # ------------------------------------------------------------------
181
+ # Streaming
182
+ # ------------------------------------------------------------------
183
+
184
+ async def generate_messages_stream(
185
+ self,
186
+ messages: list[dict[str, Any]],
187
+ options: dict[str, Any],
188
+ ) -> AsyncIterator[dict[str, Any]]:
189
+ """Yield response chunks via OpenAI streaming API."""
190
+ if self.client is None:
191
+ raise RuntimeError("openai package (>=1.0.0) is not installed")
192
+
193
+ model = options.get("model", self.model)
194
+ model_info = self.MODEL_PRICING.get(model, {})
195
+ tokens_param = model_info.get("tokens_param", "max_tokens")
196
+ supports_temperature = model_info.get("supports_temperature", True)
197
+
198
+ opts = {"temperature": 1.0, "max_tokens": 512, **options}
199
+
200
+ kwargs: dict[str, Any] = {
201
+ "model": model,
202
+ "messages": messages,
203
+ "stream": True,
204
+ "stream_options": {"include_usage": True},
205
+ }
206
+ kwargs[tokens_param] = opts.get("max_tokens", 512)
207
+
208
+ if supports_temperature and "temperature" in opts:
209
+ kwargs["temperature"] = opts["temperature"]
210
+
211
+ stream = await self.client.chat.completions.create(**kwargs)
212
+
213
+ full_text = ""
214
+ prompt_tokens = 0
215
+ completion_tokens = 0
216
+
217
+ async for chunk in stream:
218
+ # Usage comes in the final chunk
219
+ if getattr(chunk, "usage", None):
220
+ prompt_tokens = chunk.usage.prompt_tokens or 0
221
+ completion_tokens = chunk.usage.completion_tokens or 0
222
+
223
+ if chunk.choices:
224
+ delta = chunk.choices[0].delta
225
+ content = getattr(delta, "content", None) or ""
226
+ if content:
227
+ full_text += content
228
+ yield {"type": "delta", "text": content}
229
+
230
+ total_tokens = prompt_tokens + completion_tokens
231
+ total_cost = self._calculate_cost("openai", model, prompt_tokens, completion_tokens)
232
+
233
+ yield {
234
+ "type": "done",
235
+ "text": full_text,
236
+ "meta": {
237
+ "prompt_tokens": prompt_tokens,
238
+ "completion_tokens": completion_tokens,
239
+ "total_tokens": total_tokens,
240
+ "cost": round(total_cost, 6),
241
+ "raw_response": {},
242
+ "model_name": model,
243
+ },
244
+ }
@@ -93,7 +93,7 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
93
93
  "prompt_tokens": prompt_tokens,
94
94
  "completion_tokens": completion_tokens,
95
95
  "total_tokens": total_tokens,
96
- "cost": total_cost,
96
+ "cost": round(total_cost, 6),
97
97
  "raw_response": resp,
98
98
  "model_name": model,
99
99
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.38.dev2
3
+ Version: 0.0.38.dev3
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -1,5 +1,5 @@
1
1
  prompture/__init__.py,sha256=RrpHZlLPpzntUOp2tL2II2DdVxQRoCxY6JBF_b4k3s0,7213
2
- prompture/_version.py,sha256=bWLaHigPR3CFjLRNxPpiOJ7bfcPJjU-AII8xRG2CiZQ,719
2
+ prompture/_version.py,sha256=e1uep7-PEqCFbKHaF3uTPcu4UaXdHJjkYrnGcuFmFZM,719
3
3
  prompture/agent.py,sha256=xe_yFHGDzTxaU4tmaLt5AQnzrN0I72hBGwGVrCxg2D0,34704
4
4
  prompture/agent_types.py,sha256=Icl16PQI-ThGLMFCU43adtQA6cqETbsPn4KssKBI4xc,4664
5
5
  prompture/async_agent.py,sha256=nOLOQCNkg0sKKTpryIiidmIcAAlA3FR2NfnZwrNBuCg,33066
@@ -35,17 +35,17 @@ prompture/aio/__init__.py,sha256=bKqTu4Jxld16aP_7SP9wU5au45UBIb041ORo4E4HzVo,181
35
35
  prompture/drivers/__init__.py,sha256=VuEBZPqaQzXLl_Lvn_c5mRlJJrrlObZCLeHaR8n2eJ4,7050
36
36
  prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
37
37
  prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
38
- prompture/drivers/async_azure_driver.py,sha256=Rqq_5Utgr-lvxMHwlU0B5lwCTtqDhuUW212G9k8P0fQ,4463
39
- prompture/drivers/async_claude_driver.py,sha256=yB5QLbXD7Uqs4j45yulj73QSJJx1-IyIo84YGA1xjkw,4092
38
+ prompture/drivers/async_azure_driver.py,sha256=lGZICROspP2_o2XlwIZZvrCDenSJZPNYTu7clCgRD68,4473
39
+ prompture/drivers/async_claude_driver.py,sha256=dbUHH2EEotxUWz8cTXVCWtf4ExtiLv3FzzNenvHSVVI,10275
40
40
  prompture/drivers/async_google_driver.py,sha256=MIemYcE0ppSWfvVaxv4V-Tqjmy6BKO7sRG6UfZqtdV8,13349
41
- prompture/drivers/async_grok_driver.py,sha256=bblcUY5c5NJ_IeuFQ-jHRapGi_WywVgH6SSWWWbUMzo,3546
42
- prompture/drivers/async_groq_driver.py,sha256=gHvVe4M5VaRcyvonK9FQMLmCuL7i7HV9hwWcRgASUSg,3075
41
+ prompture/drivers/async_grok_driver.py,sha256=fvqEK-mrAx4U4_0C1RePGdZ-TUmQI9Qvj-x1f_uGI5c,3556
42
+ prompture/drivers/async_groq_driver.py,sha256=PEAAj7QHjVqT9UtLfnFY4i__Mk-QpngmHGvbaBNEUrE,3085
43
43
  prompture/drivers/async_hugging_driver.py,sha256=IblxqU6TpNUiigZ0BCgNkAgzpUr2FtPHJOZnOZMnHF0,2152
44
44
  prompture/drivers/async_lmstudio_driver.py,sha256=rPn2qVPm6UE2APzAn7ZHYTELUwr0dQMi8XHv6gAhyH8,5782
45
45
  prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
46
46
  prompture/drivers/async_ollama_driver.py,sha256=FaSXtFXrgeVHIe0b90Vg6rGeSTWLpPnjaThh9Ai7qQo,5042
47
- prompture/drivers/async_openai_driver.py,sha256=eLdVYQ8BUErQzVr4Ek1BZ75riMbHMz3ZPm6VQSTNFxk,3572
48
- prompture/drivers/async_openrouter_driver.py,sha256=VcSYOeBhbzRbzorYh_7K58yWCXB4UO0d6MmpBLf-7lQ,3783
47
+ prompture/drivers/async_openai_driver.py,sha256=6p538rPlfAWhsTZ5HKAg8KEW1xM4WEFzXVPZsigz_P4,8704
48
+ prompture/drivers/async_openrouter_driver.py,sha256=qvvwJADjnEj6J9f8m0eGlfWTBEm6oXTjwrgt_Im4K7w,3793
49
49
  prompture/drivers/async_registry.py,sha256=syervbb7THneJ-NUVSuxy4cnxGW6VuNzKv-Aqqn2ysU,4329
50
50
  prompture/drivers/azure_driver.py,sha256=QZr7HEvgSKT9LOTCtCjuBdHl57yvrnWmeTHtmewuJQY,5727
51
51
  prompture/drivers/claude_driver.py,sha256=8XnCBHtk6N_PzHStwxIUlcvekdPN896BqOLShmgxU9k,11536
@@ -69,9 +69,9 @@ prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-
69
69
  prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
70
70
  prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
71
71
  prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
72
- prompture-0.0.38.dev2.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
73
- prompture-0.0.38.dev2.dist-info/METADATA,sha256=i1eCc1nlA9if0421sJ044TcnFKcmOi3GAcBb2HuKZMc,10842
74
- prompture-0.0.38.dev2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
75
- prompture-0.0.38.dev2.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
76
- prompture-0.0.38.dev2.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
77
- prompture-0.0.38.dev2.dist-info/RECORD,,
72
+ prompture-0.0.38.dev3.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
73
+ prompture-0.0.38.dev3.dist-info/METADATA,sha256=ejIH91dOyVKrmJ4nKEbsutiI5Gb2xMRiqKuhzgz04Kw,10842
74
+ prompture-0.0.38.dev3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
75
+ prompture-0.0.38.dev3.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
76
+ prompture-0.0.38.dev3.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
77
+ prompture-0.0.38.dev3.dist-info/RECORD,,