prompture 0.0.37.dev2__tar.gz → 0.0.38__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.github/workflows/dev.yml +0 -7
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.github/workflows/publish.yml +1 -1
- {prompture-0.0.37.dev2/prompture.egg-info → prompture-0.0.38}/PKG-INFO +1 -1
- prompture-0.0.38/VERSION +1 -0
- prompture-0.0.38/docs/source/_templates/footer.html +16 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/conf.py +1 -1
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/_version.py +2 -2
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/agent.py +1 -2
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/async_agent.py +1 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_azure_driver.py +1 -1
- prompture-0.0.38/prompture/drivers/async_claude_driver.py +272 -0
- prompture-0.0.38/prompture/drivers/async_google_driver.py +316 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_grok_driver.py +1 -1
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_groq_driver.py +1 -1
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_lmstudio_driver.py +16 -3
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_ollama_driver.py +6 -3
- prompture-0.0.38/prompture/drivers/async_openai_driver.py +244 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_openrouter_driver.py +1 -1
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/google_driver.py +207 -43
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/lmstudio_driver.py +16 -3
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/ollama_driver.py +9 -5
- {prompture-0.0.37.dev2 → prompture-0.0.38/prompture.egg-info}/PKG-INFO +1 -1
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture.egg-info/SOURCES.txt +1 -0
- prompture-0.0.37.dev2/VERSION +0 -1
- prompture-0.0.37.dev2/prompture/drivers/async_claude_driver.py +0 -113
- prompture-0.0.37.dev2/prompture/drivers/async_google_driver.py +0 -152
- prompture-0.0.37.dev2/prompture/drivers/async_openai_driver.py +0 -102
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/add-driver/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/add-driver/references/driver-template.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/add-example/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/add-field/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/add-test/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/run-tests/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/scaffold-extraction/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.claude/skills/update-pricing/SKILL.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.env.copy +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.github/FUNDING.yml +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.github/scripts/update_docs_version.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.github/scripts/update_wrapper_version.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/.github/workflows/documentation.yml +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/CLAUDE.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/LICENSE +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/MANIFEST.in +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/README.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/ROADMAP.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/_static/custom.css +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/core.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/drivers.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/field_definitions.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/index.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/runner.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/tools.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/api/validator.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/contributing.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/examples.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/field_definitions_reference.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/index.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/installation.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/quickstart.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/docs/source/toon_input_guide.rst +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/README.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_json/README.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_json/llm_to_json/__init__.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_json/pyproject.toml +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_json/test.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_toon/README.md +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_toon/llm_to_toon/__init__.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_toon/pyproject.toml +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/packages/llm_to_toon/test.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/__init__.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/agent_types.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/aio/__init__.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/async_conversation.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/async_core.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/async_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/async_groups.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/cache.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/callbacks.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/cli.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/conversation.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/core.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/cost_mixin.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/discovery.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/__init__.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/airllm_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_airllm_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_hugging_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_local_http_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/async_registry.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/azure_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/claude_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/grok_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/groq_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/hugging_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/local_http_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/openai_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/openrouter_driver.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/registry.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/drivers/vision_helpers.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/field_definitions.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/group_types.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/groups.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/image.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/logging.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/model_rates.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/persistence.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/persona.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/runner.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/__init__.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/generator.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/Dockerfile.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/README.md.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/config.py.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/env.example.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/main.py.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/models.py.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/scaffold/templates/requirements.txt.j2 +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/serialization.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/server.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/session.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/settings.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/tools.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/tools_schema.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture/validator.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture.egg-info/dependency_links.txt +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture.egg-info/entry_points.txt +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture.egg-info/requires.txt +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/prompture.egg-info/top_level.txt +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/pyproject.toml +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/requirements.txt +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/setup.cfg +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/test.py +0 -0
- {prompture-0.0.37.dev2 → prompture-0.0.38}/test_version_diagnosis.py +0 -0
|
@@ -62,13 +62,6 @@ jobs:
|
|
|
62
62
|
git config --global user.name "github-actions[bot]"
|
|
63
63
|
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
|
64
64
|
|
|
65
|
-
- name: Update VERSION file
|
|
66
|
-
run: |
|
|
67
|
-
echo "${{ steps.ver.outputs.new }}" > VERSION
|
|
68
|
-
git add VERSION
|
|
69
|
-
git commit -m "chore: bump dev version to ${{ steps.ver.outputs.new }} [skip ci]" || true
|
|
70
|
-
git push origin dev || true
|
|
71
|
-
|
|
72
65
|
- name: Create and push dev tag
|
|
73
66
|
run: |
|
|
74
67
|
git tag -a "${{ steps.ver.outputs.tag }}" -m "pre: ${{ steps.ver.outputs.new }}"
|
|
@@ -38,7 +38,7 @@ jobs:
|
|
|
38
38
|
echo "${{ steps.bump_version.outputs.new_version }}" > VERSION
|
|
39
39
|
git config user.name "github-actions[bot]"
|
|
40
40
|
git config user.email "github-actions[bot]@users.noreply.github.com"
|
|
41
|
-
git add VERSION
|
|
41
|
+
git add -f VERSION
|
|
42
42
|
git commit -m "🔖 Version v${{ steps.bump_version.outputs.new_version }} [skip ci]" || true
|
|
43
43
|
git push origin main || true
|
|
44
44
|
|
prompture-0.0.38/VERSION
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
0.0.38
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{%- extends "!footer.html" %}
|
|
2
|
+
|
|
3
|
+
{% block extrafooter %}
|
|
4
|
+
<script>
|
|
5
|
+
document.addEventListener("DOMContentLoaded", function() {
|
|
6
|
+
var footerCopy = document.querySelector("footer .copyright");
|
|
7
|
+
if (footerCopy) {
|
|
8
|
+
footerCopy.innerHTML = footerCopy.innerHTML.replace(
|
|
9
|
+
"Juan Denis",
|
|
10
|
+
'<a href="https://juandenis.com">Juan Denis</a>'
|
|
11
|
+
);
|
|
12
|
+
}
|
|
13
|
+
});
|
|
14
|
+
</script>
|
|
15
|
+
{{ super() }}
|
|
16
|
+
{% endblock %}
|
|
@@ -14,7 +14,7 @@ sys.path.insert(0, os.path.abspath("../../"))
|
|
|
14
14
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
|
15
15
|
|
|
16
16
|
project = "Prompture"
|
|
17
|
-
copyright = '2026,
|
|
17
|
+
copyright = '2026, Juan Denis'
|
|
18
18
|
author = "Juan Denis"
|
|
19
19
|
|
|
20
20
|
# Read version dynamically: VERSION file > setuptools_scm > fallback
|
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.0.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 0,
|
|
31
|
+
__version__ = version = '0.0.38'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 0, 38)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -80,6 +80,7 @@ def _tool_wants_context(fn: Callable[..., Any]) -> bool:
|
|
|
80
80
|
hints = typing.get_type_hints(fn, include_extras=True)
|
|
81
81
|
annotation = hints.get(first_param)
|
|
82
82
|
except Exception:
|
|
83
|
+
# get_type_hints can fail with local/forward references; fall back to raw annotation
|
|
83
84
|
pass
|
|
84
85
|
|
|
85
86
|
# Fallback: inspect raw annotation (may be a string)
|
|
@@ -794,9 +795,7 @@ class Agent(Generic[DepsType]):
|
|
|
794
795
|
|
|
795
796
|
if has_tools:
|
|
796
797
|
# Tools registered: fall back to non-streaming conv.ask()
|
|
797
|
-
t0 = time.perf_counter()
|
|
798
798
|
response_text = conv.ask(effective_prompt)
|
|
799
|
-
_elapsed_ms = (time.perf_counter() - t0) * 1000
|
|
800
799
|
|
|
801
800
|
# Yield the full text as a single delta
|
|
802
801
|
yield StreamEvent(
|
|
@@ -81,6 +81,7 @@ def _tool_wants_context(fn: Callable[..., Any]) -> bool:
|
|
|
81
81
|
hints = typing.get_type_hints(fn, include_extras=True)
|
|
82
82
|
annotation = hints.get(first_param)
|
|
83
83
|
except Exception:
|
|
84
|
+
# get_type_hints can fail with local/forward references; fall back to raw annotation
|
|
84
85
|
pass
|
|
85
86
|
|
|
86
87
|
if annotation is None:
|
|
@@ -113,7 +113,7 @@ class AsyncAzureDriver(CostMixin, AsyncDriver):
|
|
|
113
113
|
"prompt_tokens": prompt_tokens,
|
|
114
114
|
"completion_tokens": completion_tokens,
|
|
115
115
|
"total_tokens": total_tokens,
|
|
116
|
-
"cost": total_cost,
|
|
116
|
+
"cost": round(total_cost, 6),
|
|
117
117
|
"raw_response": resp.model_dump(),
|
|
118
118
|
"model_name": model,
|
|
119
119
|
"deployment_id": self.deployment_id,
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
"""Async Anthropic Claude driver. Requires the ``anthropic`` package."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from collections.abc import AsyncIterator
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import anthropic
|
|
12
|
+
except Exception:
|
|
13
|
+
anthropic = None
|
|
14
|
+
|
|
15
|
+
from ..async_driver import AsyncDriver
|
|
16
|
+
from ..cost_mixin import CostMixin
|
|
17
|
+
from .claude_driver import ClaudeDriver
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AsyncClaudeDriver(CostMixin, AsyncDriver):
|
|
21
|
+
supports_json_mode = True
|
|
22
|
+
supports_json_schema = True
|
|
23
|
+
supports_tool_use = True
|
|
24
|
+
supports_streaming = True
|
|
25
|
+
supports_vision = True
|
|
26
|
+
|
|
27
|
+
MODEL_PRICING = ClaudeDriver.MODEL_PRICING
|
|
28
|
+
|
|
29
|
+
def __init__(self, api_key: str | None = None, model: str = "claude-3-5-haiku-20241022"):
|
|
30
|
+
self.api_key = api_key or os.getenv("CLAUDE_API_KEY")
|
|
31
|
+
self.model = model or os.getenv("CLAUDE_MODEL_NAME", "claude-3-5-haiku-20241022")
|
|
32
|
+
|
|
33
|
+
supports_messages = True
|
|
34
|
+
|
|
35
|
+
def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
36
|
+
from .vision_helpers import _prepare_claude_vision_messages
|
|
37
|
+
|
|
38
|
+
return _prepare_claude_vision_messages(messages)
|
|
39
|
+
|
|
40
|
+
async def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
|
|
41
|
+
messages = [{"role": "user", "content": prompt}]
|
|
42
|
+
return await self._do_generate(messages, options)
|
|
43
|
+
|
|
44
|
+
async def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
|
|
45
|
+
return await self._do_generate(self._prepare_messages(messages), options)
|
|
46
|
+
|
|
47
|
+
async def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
|
|
48
|
+
if anthropic is None:
|
|
49
|
+
raise RuntimeError("anthropic package not installed")
|
|
50
|
+
|
|
51
|
+
opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
|
|
52
|
+
model = options.get("model", self.model)
|
|
53
|
+
|
|
54
|
+
client = anthropic.AsyncAnthropic(api_key=self.api_key)
|
|
55
|
+
|
|
56
|
+
# Anthropic requires system messages as a top-level parameter
|
|
57
|
+
system_content, api_messages = self._extract_system_and_messages(messages)
|
|
58
|
+
|
|
59
|
+
# Build common kwargs
|
|
60
|
+
common_kwargs: dict[str, Any] = {
|
|
61
|
+
"model": model,
|
|
62
|
+
"messages": api_messages,
|
|
63
|
+
"temperature": opts["temperature"],
|
|
64
|
+
"max_tokens": opts["max_tokens"],
|
|
65
|
+
}
|
|
66
|
+
if system_content:
|
|
67
|
+
common_kwargs["system"] = system_content
|
|
68
|
+
|
|
69
|
+
# Native JSON mode: use tool-use for schema enforcement
|
|
70
|
+
if options.get("json_mode"):
|
|
71
|
+
json_schema = options.get("json_schema")
|
|
72
|
+
if json_schema:
|
|
73
|
+
tool_def = {
|
|
74
|
+
"name": "extract_json",
|
|
75
|
+
"description": "Extract structured data matching the schema",
|
|
76
|
+
"input_schema": json_schema,
|
|
77
|
+
}
|
|
78
|
+
resp = await client.messages.create(
|
|
79
|
+
**common_kwargs,
|
|
80
|
+
tools=[tool_def],
|
|
81
|
+
tool_choice={"type": "tool", "name": "extract_json"},
|
|
82
|
+
)
|
|
83
|
+
text = ""
|
|
84
|
+
for block in resp.content:
|
|
85
|
+
if block.type == "tool_use":
|
|
86
|
+
text = json.dumps(block.input)
|
|
87
|
+
break
|
|
88
|
+
else:
|
|
89
|
+
resp = await client.messages.create(**common_kwargs)
|
|
90
|
+
text = resp.content[0].text
|
|
91
|
+
else:
|
|
92
|
+
resp = await client.messages.create(**common_kwargs)
|
|
93
|
+
text = resp.content[0].text
|
|
94
|
+
|
|
95
|
+
prompt_tokens = resp.usage.input_tokens
|
|
96
|
+
completion_tokens = resp.usage.output_tokens
|
|
97
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
98
|
+
|
|
99
|
+
total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
|
|
100
|
+
|
|
101
|
+
meta = {
|
|
102
|
+
"prompt_tokens": prompt_tokens,
|
|
103
|
+
"completion_tokens": completion_tokens,
|
|
104
|
+
"total_tokens": total_tokens,
|
|
105
|
+
"cost": round(total_cost, 6),
|
|
106
|
+
"raw_response": dict(resp),
|
|
107
|
+
"model_name": model,
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
return {"text": text, "meta": meta}
|
|
111
|
+
|
|
112
|
+
# ------------------------------------------------------------------
|
|
113
|
+
# Helpers
|
|
114
|
+
# ------------------------------------------------------------------
|
|
115
|
+
|
|
116
|
+
def _extract_system_and_messages(
|
|
117
|
+
self, messages: list[dict[str, Any]]
|
|
118
|
+
) -> tuple[str | None, list[dict[str, Any]]]:
|
|
119
|
+
"""Separate system message from conversation messages for Anthropic API."""
|
|
120
|
+
system_content = None
|
|
121
|
+
api_messages: list[dict[str, Any]] = []
|
|
122
|
+
for msg in messages:
|
|
123
|
+
if msg.get("role") == "system":
|
|
124
|
+
system_content = msg.get("content", "")
|
|
125
|
+
else:
|
|
126
|
+
api_messages.append(msg)
|
|
127
|
+
return system_content, api_messages
|
|
128
|
+
|
|
129
|
+
# ------------------------------------------------------------------
|
|
130
|
+
# Tool use
|
|
131
|
+
# ------------------------------------------------------------------
|
|
132
|
+
|
|
133
|
+
async def generate_messages_with_tools(
|
|
134
|
+
self,
|
|
135
|
+
messages: list[dict[str, Any]],
|
|
136
|
+
tools: list[dict[str, Any]],
|
|
137
|
+
options: dict[str, Any],
|
|
138
|
+
) -> dict[str, Any]:
|
|
139
|
+
"""Generate a response that may include tool calls (Anthropic)."""
|
|
140
|
+
if anthropic is None:
|
|
141
|
+
raise RuntimeError("anthropic package not installed")
|
|
142
|
+
|
|
143
|
+
opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
|
|
144
|
+
model = options.get("model", self.model)
|
|
145
|
+
client = anthropic.AsyncAnthropic(api_key=self.api_key)
|
|
146
|
+
|
|
147
|
+
system_content, api_messages = self._extract_system_and_messages(messages)
|
|
148
|
+
|
|
149
|
+
# Convert tools from OpenAI format to Anthropic format if needed
|
|
150
|
+
anthropic_tools = []
|
|
151
|
+
for t in tools:
|
|
152
|
+
if "type" in t and t["type"] == "function":
|
|
153
|
+
# OpenAI format -> Anthropic format
|
|
154
|
+
fn = t["function"]
|
|
155
|
+
anthropic_tools.append({
|
|
156
|
+
"name": fn["name"],
|
|
157
|
+
"description": fn.get("description", ""),
|
|
158
|
+
"input_schema": fn.get("parameters", {"type": "object", "properties": {}}),
|
|
159
|
+
})
|
|
160
|
+
elif "input_schema" in t:
|
|
161
|
+
# Already Anthropic format
|
|
162
|
+
anthropic_tools.append(t)
|
|
163
|
+
else:
|
|
164
|
+
anthropic_tools.append(t)
|
|
165
|
+
|
|
166
|
+
kwargs: dict[str, Any] = {
|
|
167
|
+
"model": model,
|
|
168
|
+
"messages": api_messages,
|
|
169
|
+
"temperature": opts["temperature"],
|
|
170
|
+
"max_tokens": opts["max_tokens"],
|
|
171
|
+
"tools": anthropic_tools,
|
|
172
|
+
}
|
|
173
|
+
if system_content:
|
|
174
|
+
kwargs["system"] = system_content
|
|
175
|
+
|
|
176
|
+
resp = await client.messages.create(**kwargs)
|
|
177
|
+
|
|
178
|
+
prompt_tokens = resp.usage.input_tokens
|
|
179
|
+
completion_tokens = resp.usage.output_tokens
|
|
180
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
181
|
+
total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
|
|
182
|
+
|
|
183
|
+
meta = {
|
|
184
|
+
"prompt_tokens": prompt_tokens,
|
|
185
|
+
"completion_tokens": completion_tokens,
|
|
186
|
+
"total_tokens": total_tokens,
|
|
187
|
+
"cost": round(total_cost, 6),
|
|
188
|
+
"raw_response": dict(resp),
|
|
189
|
+
"model_name": model,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
text = ""
|
|
193
|
+
tool_calls_out: list[dict[str, Any]] = []
|
|
194
|
+
for block in resp.content:
|
|
195
|
+
if block.type == "text":
|
|
196
|
+
text += block.text
|
|
197
|
+
elif block.type == "tool_use":
|
|
198
|
+
tool_calls_out.append({
|
|
199
|
+
"id": block.id,
|
|
200
|
+
"name": block.name,
|
|
201
|
+
"arguments": block.input,
|
|
202
|
+
})
|
|
203
|
+
|
|
204
|
+
return {
|
|
205
|
+
"text": text,
|
|
206
|
+
"meta": meta,
|
|
207
|
+
"tool_calls": tool_calls_out,
|
|
208
|
+
"stop_reason": resp.stop_reason,
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
# ------------------------------------------------------------------
|
|
212
|
+
# Streaming
|
|
213
|
+
# ------------------------------------------------------------------
|
|
214
|
+
|
|
215
|
+
async def generate_messages_stream(
|
|
216
|
+
self,
|
|
217
|
+
messages: list[dict[str, Any]],
|
|
218
|
+
options: dict[str, Any],
|
|
219
|
+
) -> AsyncIterator[dict[str, Any]]:
|
|
220
|
+
"""Yield response chunks via Anthropic streaming API."""
|
|
221
|
+
if anthropic is None:
|
|
222
|
+
raise RuntimeError("anthropic package not installed")
|
|
223
|
+
|
|
224
|
+
opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
|
|
225
|
+
model = options.get("model", self.model)
|
|
226
|
+
client = anthropic.AsyncAnthropic(api_key=self.api_key)
|
|
227
|
+
|
|
228
|
+
system_content, api_messages = self._extract_system_and_messages(messages)
|
|
229
|
+
|
|
230
|
+
kwargs: dict[str, Any] = {
|
|
231
|
+
"model": model,
|
|
232
|
+
"messages": api_messages,
|
|
233
|
+
"temperature": opts["temperature"],
|
|
234
|
+
"max_tokens": opts["max_tokens"],
|
|
235
|
+
}
|
|
236
|
+
if system_content:
|
|
237
|
+
kwargs["system"] = system_content
|
|
238
|
+
|
|
239
|
+
full_text = ""
|
|
240
|
+
prompt_tokens = 0
|
|
241
|
+
completion_tokens = 0
|
|
242
|
+
|
|
243
|
+
async with client.messages.stream(**kwargs) as stream:
|
|
244
|
+
async for event in stream:
|
|
245
|
+
if hasattr(event, "type"):
|
|
246
|
+
if event.type == "content_block_delta" and hasattr(event, "delta"):
|
|
247
|
+
delta_text = getattr(event.delta, "text", "")
|
|
248
|
+
if delta_text:
|
|
249
|
+
full_text += delta_text
|
|
250
|
+
yield {"type": "delta", "text": delta_text}
|
|
251
|
+
elif event.type == "message_delta" and hasattr(event, "usage"):
|
|
252
|
+
completion_tokens = getattr(event.usage, "output_tokens", 0)
|
|
253
|
+
elif event.type == "message_start" and hasattr(event, "message"):
|
|
254
|
+
usage = getattr(event.message, "usage", None)
|
|
255
|
+
if usage:
|
|
256
|
+
prompt_tokens = getattr(usage, "input_tokens", 0)
|
|
257
|
+
|
|
258
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
259
|
+
total_cost = self._calculate_cost("claude", model, prompt_tokens, completion_tokens)
|
|
260
|
+
|
|
261
|
+
yield {
|
|
262
|
+
"type": "done",
|
|
263
|
+
"text": full_text,
|
|
264
|
+
"meta": {
|
|
265
|
+
"prompt_tokens": prompt_tokens,
|
|
266
|
+
"completion_tokens": completion_tokens,
|
|
267
|
+
"total_tokens": total_tokens,
|
|
268
|
+
"cost": round(total_cost, 6),
|
|
269
|
+
"raw_response": {},
|
|
270
|
+
"model_name": model,
|
|
271
|
+
},
|
|
272
|
+
}
|