prompture 0.0.38__tar.gz → 0.0.38.dev2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. {prompture-0.0.38/prompture.egg-info → prompture-0.0.38.dev2}/PKG-INFO +1 -1
  2. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/_version.py +2 -2
  3. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_azure_driver.py +1 -1
  4. prompture-0.0.38.dev2/prompture/drivers/async_claude_driver.py +113 -0
  5. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_grok_driver.py +1 -1
  6. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_groq_driver.py +1 -1
  7. prompture-0.0.38.dev2/prompture/drivers/async_openai_driver.py +102 -0
  8. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_openrouter_driver.py +1 -1
  9. {prompture-0.0.38 → prompture-0.0.38.dev2/prompture.egg-info}/PKG-INFO +1 -1
  10. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture.egg-info/SOURCES.txt +0 -1
  11. prompture-0.0.38/VERSION +0 -1
  12. prompture-0.0.38/prompture/drivers/async_claude_driver.py +0 -272
  13. prompture-0.0.38/prompture/drivers/async_openai_driver.py +0 -244
  14. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/add-driver/SKILL.md +0 -0
  15. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/add-driver/references/driver-template.md +0 -0
  16. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/add-example/SKILL.md +0 -0
  17. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/add-field/SKILL.md +0 -0
  18. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/add-test/SKILL.md +0 -0
  19. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/run-tests/SKILL.md +0 -0
  20. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/scaffold-extraction/SKILL.md +0 -0
  21. {prompture-0.0.38 → prompture-0.0.38.dev2}/.claude/skills/update-pricing/SKILL.md +0 -0
  22. {prompture-0.0.38 → prompture-0.0.38.dev2}/.env.copy +0 -0
  23. {prompture-0.0.38 → prompture-0.0.38.dev2}/.github/FUNDING.yml +0 -0
  24. {prompture-0.0.38 → prompture-0.0.38.dev2}/.github/scripts/update_docs_version.py +0 -0
  25. {prompture-0.0.38 → prompture-0.0.38.dev2}/.github/scripts/update_wrapper_version.py +0 -0
  26. {prompture-0.0.38 → prompture-0.0.38.dev2}/.github/workflows/dev.yml +0 -0
  27. {prompture-0.0.38 → prompture-0.0.38.dev2}/.github/workflows/documentation.yml +0 -0
  28. {prompture-0.0.38 → prompture-0.0.38.dev2}/.github/workflows/publish.yml +0 -0
  29. {prompture-0.0.38 → prompture-0.0.38.dev2}/CLAUDE.md +0 -0
  30. {prompture-0.0.38 → prompture-0.0.38.dev2}/LICENSE +0 -0
  31. {prompture-0.0.38 → prompture-0.0.38.dev2}/MANIFEST.in +0 -0
  32. {prompture-0.0.38 → prompture-0.0.38.dev2}/README.md +0 -0
  33. {prompture-0.0.38 → prompture-0.0.38.dev2}/ROADMAP.md +0 -0
  34. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/_static/custom.css +0 -0
  35. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/_templates/footer.html +0 -0
  36. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/core.rst +0 -0
  37. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/drivers.rst +0 -0
  38. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/field_definitions.rst +0 -0
  39. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/index.rst +0 -0
  40. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/runner.rst +0 -0
  41. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/tools.rst +0 -0
  42. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/api/validator.rst +0 -0
  43. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/conf.py +0 -0
  44. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/contributing.rst +0 -0
  45. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/examples.rst +0 -0
  46. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/field_definitions_reference.rst +0 -0
  47. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/index.rst +0 -0
  48. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/installation.rst +0 -0
  49. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/quickstart.rst +0 -0
  50. {prompture-0.0.38 → prompture-0.0.38.dev2}/docs/source/toon_input_guide.rst +0 -0
  51. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/README.md +0 -0
  52. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_json/README.md +0 -0
  53. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_json/llm_to_json/__init__.py +0 -0
  54. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_json/pyproject.toml +0 -0
  55. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_json/test.py +0 -0
  56. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_toon/README.md +0 -0
  57. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_toon/llm_to_toon/__init__.py +0 -0
  58. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_toon/pyproject.toml +0 -0
  59. {prompture-0.0.38 → prompture-0.0.38.dev2}/packages/llm_to_toon/test.py +0 -0
  60. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/__init__.py +0 -0
  61. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/agent.py +0 -0
  62. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/agent_types.py +0 -0
  63. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/aio/__init__.py +0 -0
  64. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/async_agent.py +0 -0
  65. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/async_conversation.py +0 -0
  66. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/async_core.py +0 -0
  67. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/async_driver.py +0 -0
  68. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/async_groups.py +0 -0
  69. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/cache.py +0 -0
  70. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/callbacks.py +0 -0
  71. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/cli.py +0 -0
  72. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/conversation.py +0 -0
  73. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/core.py +0 -0
  74. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/cost_mixin.py +0 -0
  75. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/discovery.py +0 -0
  76. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/driver.py +0 -0
  77. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/__init__.py +0 -0
  78. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/airllm_driver.py +0 -0
  79. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_airllm_driver.py +0 -0
  80. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_google_driver.py +0 -0
  81. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_hugging_driver.py +0 -0
  82. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_lmstudio_driver.py +0 -0
  83. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_local_http_driver.py +0 -0
  84. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_ollama_driver.py +0 -0
  85. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/async_registry.py +0 -0
  86. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/azure_driver.py +0 -0
  87. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/claude_driver.py +0 -0
  88. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/google_driver.py +0 -0
  89. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/grok_driver.py +0 -0
  90. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/groq_driver.py +0 -0
  91. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/hugging_driver.py +0 -0
  92. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/lmstudio_driver.py +0 -0
  93. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/local_http_driver.py +0 -0
  94. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/ollama_driver.py +0 -0
  95. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/openai_driver.py +0 -0
  96. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/openrouter_driver.py +0 -0
  97. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/registry.py +0 -0
  98. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/drivers/vision_helpers.py +0 -0
  99. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/field_definitions.py +0 -0
  100. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/group_types.py +0 -0
  101. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/groups.py +0 -0
  102. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/image.py +0 -0
  103. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/logging.py +0 -0
  104. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/model_rates.py +0 -0
  105. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/persistence.py +0 -0
  106. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/persona.py +0 -0
  107. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/runner.py +0 -0
  108. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/__init__.py +0 -0
  109. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/generator.py +0 -0
  110. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/Dockerfile.j2 +0 -0
  111. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/README.md.j2 +0 -0
  112. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/config.py.j2 +0 -0
  113. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/env.example.j2 +0 -0
  114. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/main.py.j2 +0 -0
  115. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/models.py.j2 +0 -0
  116. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/scaffold/templates/requirements.txt.j2 +0 -0
  117. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/serialization.py +0 -0
  118. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/server.py +0 -0
  119. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/session.py +0 -0
  120. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/settings.py +0 -0
  121. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/tools.py +0 -0
  122. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/tools_schema.py +0 -0
  123. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture/validator.py +0 -0
  124. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture.egg-info/dependency_links.txt +0 -0
  125. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture.egg-info/entry_points.txt +0 -0
  126. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture.egg-info/requires.txt +0 -0
  127. {prompture-0.0.38 → prompture-0.0.38.dev2}/prompture.egg-info/top_level.txt +0 -0
  128. {prompture-0.0.38 → prompture-0.0.38.dev2}/pyproject.toml +0 -0
  129. {prompture-0.0.38 → prompture-0.0.38.dev2}/requirements.txt +0 -0
  130. {prompture-0.0.38 → prompture-0.0.38.dev2}/setup.cfg +0 -0
  131. {prompture-0.0.38 → prompture-0.0.38.dev2}/test.py +0 -0
  132. {prompture-0.0.38 → prompture-0.0.38.dev2}/test_version_diagnosis.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.38
3
+ Version: 0.0.38.dev2
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.0.38'
32
- __version_tuple__ = version_tuple = (0, 0, 38)
31
+ __version__ = version = '0.0.38.dev2'
32
+ __version_tuple__ = version_tuple = (0, 0, 38, 'dev2')
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -113,7 +113,7 @@ class AsyncAzureDriver(CostMixin, AsyncDriver):
113
113
  "prompt_tokens": prompt_tokens,
114
114
  "completion_tokens": completion_tokens,
115
115
  "total_tokens": total_tokens,
116
- "cost": round(total_cost, 6),
116
+ "cost": total_cost,
117
117
  "raw_response": resp.model_dump(),
118
118
  "model_name": model,
119
119
  "deployment_id": self.deployment_id,
@@ -0,0 +1,113 @@
1
+ """Async Anthropic Claude driver. Requires the ``anthropic`` package."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ from typing import Any
8
+
9
+ try:
10
+ import anthropic
11
+ except Exception:
12
+ anthropic = None
13
+
14
+ from ..async_driver import AsyncDriver
15
+ from ..cost_mixin import CostMixin
16
+ from .claude_driver import ClaudeDriver
17
+
18
+
19
+ class AsyncClaudeDriver(CostMixin, AsyncDriver):
20
+ supports_json_mode = True
21
+ supports_json_schema = True
22
+ supports_vision = True
23
+
24
+ MODEL_PRICING = ClaudeDriver.MODEL_PRICING
25
+
26
+ def __init__(self, api_key: str | None = None, model: str = "claude-3-5-haiku-20241022"):
27
+ self.api_key = api_key or os.getenv("CLAUDE_API_KEY")
28
+ self.model = model or os.getenv("CLAUDE_MODEL_NAME", "claude-3-5-haiku-20241022")
29
+
30
+ supports_messages = True
31
+
32
+ def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
33
+ from .vision_helpers import _prepare_claude_vision_messages
34
+
35
+ return _prepare_claude_vision_messages(messages)
36
+
37
+ async def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
38
+ messages = [{"role": "user", "content": prompt}]
39
+ return await self._do_generate(messages, options)
40
+
41
+ async def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
42
+ return await self._do_generate(self._prepare_messages(messages), options)
43
+
44
+ async def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
45
+ if anthropic is None:
46
+ raise RuntimeError("anthropic package not installed")
47
+
48
+ opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
49
+ model = options.get("model", self.model)
50
+
51
+ client = anthropic.AsyncAnthropic(api_key=self.api_key)
52
+
53
+ # Anthropic requires system messages as a top-level parameter
54
+ system_content = None
55
+ api_messages = []
56
+ for msg in messages:
57
+ if msg.get("role") == "system":
58
+ system_content = msg.get("content", "")
59
+ else:
60
+ api_messages.append(msg)
61
+
62
+ # Build common kwargs
63
+ common_kwargs: dict[str, Any] = {
64
+ "model": model,
65
+ "messages": api_messages,
66
+ "temperature": opts["temperature"],
67
+ "max_tokens": opts["max_tokens"],
68
+ }
69
+ if system_content:
70
+ common_kwargs["system"] = system_content
71
+
72
+ # Native JSON mode: use tool-use for schema enforcement
73
+ if options.get("json_mode"):
74
+ json_schema = options.get("json_schema")
75
+ if json_schema:
76
+ tool_def = {
77
+ "name": "extract_json",
78
+ "description": "Extract structured data matching the schema",
79
+ "input_schema": json_schema,
80
+ }
81
+ resp = await client.messages.create(
82
+ **common_kwargs,
83
+ tools=[tool_def],
84
+ tool_choice={"type": "tool", "name": "extract_json"},
85
+ )
86
+ text = ""
87
+ for block in resp.content:
88
+ if block.type == "tool_use":
89
+ text = json.dumps(block.input)
90
+ break
91
+ else:
92
+ resp = await client.messages.create(**common_kwargs)
93
+ text = resp.content[0].text
94
+ else:
95
+ resp = await client.messages.create(**common_kwargs)
96
+ text = resp.content[0].text
97
+
98
+ prompt_tokens = resp.usage.input_tokens
99
+ completion_tokens = resp.usage.output_tokens
100
+ total_tokens = prompt_tokens + completion_tokens
101
+
102
+ total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
103
+
104
+ meta = {
105
+ "prompt_tokens": prompt_tokens,
106
+ "completion_tokens": completion_tokens,
107
+ "total_tokens": total_tokens,
108
+ "cost": total_cost,
109
+ "raw_response": dict(resp),
110
+ "model_name": model,
111
+ }
112
+
113
+ return {"text": text, "meta": meta}
@@ -88,7 +88,7 @@ class AsyncGrokDriver(CostMixin, AsyncDriver):
88
88
  "prompt_tokens": prompt_tokens,
89
89
  "completion_tokens": completion_tokens,
90
90
  "total_tokens": total_tokens,
91
- "cost": round(total_cost, 6),
91
+ "cost": total_cost,
92
92
  "raw_response": resp,
93
93
  "model_name": model,
94
94
  }
@@ -81,7 +81,7 @@ class AsyncGroqDriver(CostMixin, AsyncDriver):
81
81
  "prompt_tokens": prompt_tokens,
82
82
  "completion_tokens": completion_tokens,
83
83
  "total_tokens": total_tokens,
84
- "cost": round(total_cost, 6),
84
+ "cost": total_cost,
85
85
  "raw_response": resp.model_dump(),
86
86
  "model_name": model,
87
87
  }
@@ -0,0 +1,102 @@
1
+ """Async OpenAI driver. Requires the ``openai`` package (>=1.0.0)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ from typing import Any
7
+
8
+ try:
9
+ from openai import AsyncOpenAI
10
+ except Exception:
11
+ AsyncOpenAI = None
12
+
13
+ from ..async_driver import AsyncDriver
14
+ from ..cost_mixin import CostMixin
15
+ from .openai_driver import OpenAIDriver
16
+
17
+
18
+ class AsyncOpenAIDriver(CostMixin, AsyncDriver):
19
+ supports_json_mode = True
20
+ supports_json_schema = True
21
+ supports_vision = True
22
+
23
+ MODEL_PRICING = OpenAIDriver.MODEL_PRICING
24
+
25
+ def __init__(self, api_key: str | None = None, model: str = "gpt-4o-mini"):
26
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY")
27
+ self.model = model
28
+ if AsyncOpenAI:
29
+ self.client = AsyncOpenAI(api_key=self.api_key)
30
+ else:
31
+ self.client = None
32
+
33
+ supports_messages = True
34
+
35
+ def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
36
+ from .vision_helpers import _prepare_openai_vision_messages
37
+
38
+ return _prepare_openai_vision_messages(messages)
39
+
40
+ async def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
41
+ messages = [{"role": "user", "content": prompt}]
42
+ return await self._do_generate(messages, options)
43
+
44
+ async def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
45
+ return await self._do_generate(self._prepare_messages(messages), options)
46
+
47
+ async def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
48
+ if self.client is None:
49
+ raise RuntimeError("openai package (>=1.0.0) is not installed")
50
+
51
+ model = options.get("model", self.model)
52
+
53
+ model_info = self.MODEL_PRICING.get(model, {})
54
+ tokens_param = model_info.get("tokens_param", "max_tokens")
55
+ supports_temperature = model_info.get("supports_temperature", True)
56
+
57
+ opts = {"temperature": 1.0, "max_tokens": 512, **options}
58
+
59
+ kwargs = {
60
+ "model": model,
61
+ "messages": messages,
62
+ }
63
+ kwargs[tokens_param] = opts.get("max_tokens", 512)
64
+
65
+ if supports_temperature and "temperature" in opts:
66
+ kwargs["temperature"] = opts["temperature"]
67
+
68
+ # Native JSON mode support
69
+ if options.get("json_mode"):
70
+ json_schema = options.get("json_schema")
71
+ if json_schema:
72
+ kwargs["response_format"] = {
73
+ "type": "json_schema",
74
+ "json_schema": {
75
+ "name": "extraction",
76
+ "strict": True,
77
+ "schema": json_schema,
78
+ },
79
+ }
80
+ else:
81
+ kwargs["response_format"] = {"type": "json_object"}
82
+
83
+ resp = await self.client.chat.completions.create(**kwargs)
84
+
85
+ usage = getattr(resp, "usage", None)
86
+ prompt_tokens = getattr(usage, "prompt_tokens", 0)
87
+ completion_tokens = getattr(usage, "completion_tokens", 0)
88
+ total_tokens = getattr(usage, "total_tokens", 0)
89
+
90
+ total_cost = self._calculate_cost("openai", model, prompt_tokens, completion_tokens)
91
+
92
+ meta = {
93
+ "prompt_tokens": prompt_tokens,
94
+ "completion_tokens": completion_tokens,
95
+ "total_tokens": total_tokens,
96
+ "cost": total_cost,
97
+ "raw_response": resp.model_dump(),
98
+ "model_name": model,
99
+ }
100
+
101
+ text = resp.choices[0].message.content
102
+ return {"text": text, "meta": meta}
@@ -93,7 +93,7 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
93
93
  "prompt_tokens": prompt_tokens,
94
94
  "completion_tokens": completion_tokens,
95
95
  "total_tokens": total_tokens,
96
- "cost": round(total_cost, 6),
96
+ "cost": total_cost,
97
97
  "raw_response": resp,
98
98
  "model_name": model,
99
99
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.38
3
+ Version: 0.0.38.dev2
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -4,7 +4,6 @@ LICENSE
4
4
  MANIFEST.in
5
5
  README.md
6
6
  ROADMAP.md
7
- VERSION
8
7
  pyproject.toml
9
8
  requirements.txt
10
9
  test.py
prompture-0.0.38/VERSION DELETED
@@ -1 +0,0 @@
1
- 0.0.38
@@ -1,272 +0,0 @@
1
- """Async Anthropic Claude driver. Requires the ``anthropic`` package."""
2
-
3
- from __future__ import annotations
4
-
5
- import json
6
- import os
7
- from collections.abc import AsyncIterator
8
- from typing import Any
9
-
10
- try:
11
- import anthropic
12
- except Exception:
13
- anthropic = None
14
-
15
- from ..async_driver import AsyncDriver
16
- from ..cost_mixin import CostMixin
17
- from .claude_driver import ClaudeDriver
18
-
19
-
20
- class AsyncClaudeDriver(CostMixin, AsyncDriver):
21
- supports_json_mode = True
22
- supports_json_schema = True
23
- supports_tool_use = True
24
- supports_streaming = True
25
- supports_vision = True
26
-
27
- MODEL_PRICING = ClaudeDriver.MODEL_PRICING
28
-
29
- def __init__(self, api_key: str | None = None, model: str = "claude-3-5-haiku-20241022"):
30
- self.api_key = api_key or os.getenv("CLAUDE_API_KEY")
31
- self.model = model or os.getenv("CLAUDE_MODEL_NAME", "claude-3-5-haiku-20241022")
32
-
33
- supports_messages = True
34
-
35
- def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
36
- from .vision_helpers import _prepare_claude_vision_messages
37
-
38
- return _prepare_claude_vision_messages(messages)
39
-
40
- async def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
41
- messages = [{"role": "user", "content": prompt}]
42
- return await self._do_generate(messages, options)
43
-
44
- async def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
45
- return await self._do_generate(self._prepare_messages(messages), options)
46
-
47
- async def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
48
- if anthropic is None:
49
- raise RuntimeError("anthropic package not installed")
50
-
51
- opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
52
- model = options.get("model", self.model)
53
-
54
- client = anthropic.AsyncAnthropic(api_key=self.api_key)
55
-
56
- # Anthropic requires system messages as a top-level parameter
57
- system_content, api_messages = self._extract_system_and_messages(messages)
58
-
59
- # Build common kwargs
60
- common_kwargs: dict[str, Any] = {
61
- "model": model,
62
- "messages": api_messages,
63
- "temperature": opts["temperature"],
64
- "max_tokens": opts["max_tokens"],
65
- }
66
- if system_content:
67
- common_kwargs["system"] = system_content
68
-
69
- # Native JSON mode: use tool-use for schema enforcement
70
- if options.get("json_mode"):
71
- json_schema = options.get("json_schema")
72
- if json_schema:
73
- tool_def = {
74
- "name": "extract_json",
75
- "description": "Extract structured data matching the schema",
76
- "input_schema": json_schema,
77
- }
78
- resp = await client.messages.create(
79
- **common_kwargs,
80
- tools=[tool_def],
81
- tool_choice={"type": "tool", "name": "extract_json"},
82
- )
83
- text = ""
84
- for block in resp.content:
85
- if block.type == "tool_use":
86
- text = json.dumps(block.input)
87
- break
88
- else:
89
- resp = await client.messages.create(**common_kwargs)
90
- text = resp.content[0].text
91
- else:
92
- resp = await client.messages.create(**common_kwargs)
93
- text = resp.content[0].text
94
-
95
- prompt_tokens = resp.usage.input_tokens
96
- completion_tokens = resp.usage.output_tokens
97
- total_tokens = prompt_tokens + completion_tokens
98
-
99
- total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
100
-
101
- meta = {
102
- "prompt_tokens": prompt_tokens,
103
- "completion_tokens": completion_tokens,
104
- "total_tokens": total_tokens,
105
- "cost": round(total_cost, 6),
106
- "raw_response": dict(resp),
107
- "model_name": model,
108
- }
109
-
110
- return {"text": text, "meta": meta}
111
-
112
- # ------------------------------------------------------------------
113
- # Helpers
114
- # ------------------------------------------------------------------
115
-
116
- def _extract_system_and_messages(
117
- self, messages: list[dict[str, Any]]
118
- ) -> tuple[str | None, list[dict[str, Any]]]:
119
- """Separate system message from conversation messages for Anthropic API."""
120
- system_content = None
121
- api_messages: list[dict[str, Any]] = []
122
- for msg in messages:
123
- if msg.get("role") == "system":
124
- system_content = msg.get("content", "")
125
- else:
126
- api_messages.append(msg)
127
- return system_content, api_messages
128
-
129
- # ------------------------------------------------------------------
130
- # Tool use
131
- # ------------------------------------------------------------------
132
-
133
- async def generate_messages_with_tools(
134
- self,
135
- messages: list[dict[str, Any]],
136
- tools: list[dict[str, Any]],
137
- options: dict[str, Any],
138
- ) -> dict[str, Any]:
139
- """Generate a response that may include tool calls (Anthropic)."""
140
- if anthropic is None:
141
- raise RuntimeError("anthropic package not installed")
142
-
143
- opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
144
- model = options.get("model", self.model)
145
- client = anthropic.AsyncAnthropic(api_key=self.api_key)
146
-
147
- system_content, api_messages = self._extract_system_and_messages(messages)
148
-
149
- # Convert tools from OpenAI format to Anthropic format if needed
150
- anthropic_tools = []
151
- for t in tools:
152
- if "type" in t and t["type"] == "function":
153
- # OpenAI format -> Anthropic format
154
- fn = t["function"]
155
- anthropic_tools.append({
156
- "name": fn["name"],
157
- "description": fn.get("description", ""),
158
- "input_schema": fn.get("parameters", {"type": "object", "properties": {}}),
159
- })
160
- elif "input_schema" in t:
161
- # Already Anthropic format
162
- anthropic_tools.append(t)
163
- else:
164
- anthropic_tools.append(t)
165
-
166
- kwargs: dict[str, Any] = {
167
- "model": model,
168
- "messages": api_messages,
169
- "temperature": opts["temperature"],
170
- "max_tokens": opts["max_tokens"],
171
- "tools": anthropic_tools,
172
- }
173
- if system_content:
174
- kwargs["system"] = system_content
175
-
176
- resp = await client.messages.create(**kwargs)
177
-
178
- prompt_tokens = resp.usage.input_tokens
179
- completion_tokens = resp.usage.output_tokens
180
- total_tokens = prompt_tokens + completion_tokens
181
- total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
182
-
183
- meta = {
184
- "prompt_tokens": prompt_tokens,
185
- "completion_tokens": completion_tokens,
186
- "total_tokens": total_tokens,
187
- "cost": round(total_cost, 6),
188
- "raw_response": dict(resp),
189
- "model_name": model,
190
- }
191
-
192
- text = ""
193
- tool_calls_out: list[dict[str, Any]] = []
194
- for block in resp.content:
195
- if block.type == "text":
196
- text += block.text
197
- elif block.type == "tool_use":
198
- tool_calls_out.append({
199
- "id": block.id,
200
- "name": block.name,
201
- "arguments": block.input,
202
- })
203
-
204
- return {
205
- "text": text,
206
- "meta": meta,
207
- "tool_calls": tool_calls_out,
208
- "stop_reason": resp.stop_reason,
209
- }
210
-
211
- # ------------------------------------------------------------------
212
- # Streaming
213
- # ------------------------------------------------------------------
214
-
215
- async def generate_messages_stream(
216
- self,
217
- messages: list[dict[str, Any]],
218
- options: dict[str, Any],
219
- ) -> AsyncIterator[dict[str, Any]]:
220
- """Yield response chunks via Anthropic streaming API."""
221
- if anthropic is None:
222
- raise RuntimeError("anthropic package not installed")
223
-
224
- opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
225
- model = options.get("model", self.model)
226
- client = anthropic.AsyncAnthropic(api_key=self.api_key)
227
-
228
- system_content, api_messages = self._extract_system_and_messages(messages)
229
-
230
- kwargs: dict[str, Any] = {
231
- "model": model,
232
- "messages": api_messages,
233
- "temperature": opts["temperature"],
234
- "max_tokens": opts["max_tokens"],
235
- }
236
- if system_content:
237
- kwargs["system"] = system_content
238
-
239
- full_text = ""
240
- prompt_tokens = 0
241
- completion_tokens = 0
242
-
243
- async with client.messages.stream(**kwargs) as stream:
244
- async for event in stream:
245
- if hasattr(event, "type"):
246
- if event.type == "content_block_delta" and hasattr(event, "delta"):
247
- delta_text = getattr(event.delta, "text", "")
248
- if delta_text:
249
- full_text += delta_text
250
- yield {"type": "delta", "text": delta_text}
251
- elif event.type == "message_delta" and hasattr(event, "usage"):
252
- completion_tokens = getattr(event.usage, "output_tokens", 0)
253
- elif event.type == "message_start" and hasattr(event, "message"):
254
- usage = getattr(event.message, "usage", None)
255
- if usage:
256
- prompt_tokens = getattr(usage, "input_tokens", 0)
257
-
258
- total_tokens = prompt_tokens + completion_tokens
259
- total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
260
-
261
- yield {
262
- "type": "done",
263
- "text": full_text,
264
- "meta": {
265
- "prompt_tokens": prompt_tokens,
266
- "completion_tokens": completion_tokens,
267
- "total_tokens": total_tokens,
268
- "cost": round(total_cost, 6),
269
- "raw_response": {},
270
- "model_name": model,
271
- },
272
- }