prompture 0.0.29.dev8__py3-none-any.whl → 0.0.38.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. prompture/__init__.py +264 -23
  2. prompture/_version.py +34 -0
  3. prompture/agent.py +924 -0
  4. prompture/agent_types.py +156 -0
  5. prompture/aio/__init__.py +74 -0
  6. prompture/async_agent.py +880 -0
  7. prompture/async_conversation.py +789 -0
  8. prompture/async_core.py +803 -0
  9. prompture/async_driver.py +193 -0
  10. prompture/async_groups.py +551 -0
  11. prompture/cache.py +469 -0
  12. prompture/callbacks.py +55 -0
  13. prompture/cli.py +63 -4
  14. prompture/conversation.py +826 -0
  15. prompture/core.py +894 -263
  16. prompture/cost_mixin.py +51 -0
  17. prompture/discovery.py +187 -0
  18. prompture/driver.py +206 -5
  19. prompture/drivers/__init__.py +175 -67
  20. prompture/drivers/airllm_driver.py +109 -0
  21. prompture/drivers/async_airllm_driver.py +26 -0
  22. prompture/drivers/async_azure_driver.py +123 -0
  23. prompture/drivers/async_claude_driver.py +113 -0
  24. prompture/drivers/async_google_driver.py +316 -0
  25. prompture/drivers/async_grok_driver.py +97 -0
  26. prompture/drivers/async_groq_driver.py +90 -0
  27. prompture/drivers/async_hugging_driver.py +61 -0
  28. prompture/drivers/async_lmstudio_driver.py +148 -0
  29. prompture/drivers/async_local_http_driver.py +44 -0
  30. prompture/drivers/async_ollama_driver.py +135 -0
  31. prompture/drivers/async_openai_driver.py +102 -0
  32. prompture/drivers/async_openrouter_driver.py +102 -0
  33. prompture/drivers/async_registry.py +133 -0
  34. prompture/drivers/azure_driver.py +42 -9
  35. prompture/drivers/claude_driver.py +257 -34
  36. prompture/drivers/google_driver.py +295 -42
  37. prompture/drivers/grok_driver.py +35 -32
  38. prompture/drivers/groq_driver.py +33 -26
  39. prompture/drivers/hugging_driver.py +6 -6
  40. prompture/drivers/lmstudio_driver.py +97 -19
  41. prompture/drivers/local_http_driver.py +6 -6
  42. prompture/drivers/ollama_driver.py +168 -23
  43. prompture/drivers/openai_driver.py +184 -9
  44. prompture/drivers/openrouter_driver.py +37 -25
  45. prompture/drivers/registry.py +306 -0
  46. prompture/drivers/vision_helpers.py +153 -0
  47. prompture/field_definitions.py +106 -96
  48. prompture/group_types.py +147 -0
  49. prompture/groups.py +530 -0
  50. prompture/image.py +180 -0
  51. prompture/logging.py +80 -0
  52. prompture/model_rates.py +217 -0
  53. prompture/persistence.py +254 -0
  54. prompture/persona.py +482 -0
  55. prompture/runner.py +49 -47
  56. prompture/scaffold/__init__.py +1 -0
  57. prompture/scaffold/generator.py +84 -0
  58. prompture/scaffold/templates/Dockerfile.j2 +12 -0
  59. prompture/scaffold/templates/README.md.j2 +41 -0
  60. prompture/scaffold/templates/config.py.j2 +21 -0
  61. prompture/scaffold/templates/env.example.j2 +8 -0
  62. prompture/scaffold/templates/main.py.j2 +86 -0
  63. prompture/scaffold/templates/models.py.j2 +40 -0
  64. prompture/scaffold/templates/requirements.txt.j2 +5 -0
  65. prompture/serialization.py +218 -0
  66. prompture/server.py +183 -0
  67. prompture/session.py +117 -0
  68. prompture/settings.py +19 -1
  69. prompture/tools.py +219 -267
  70. prompture/tools_schema.py +254 -0
  71. prompture/validator.py +3 -3
  72. prompture-0.0.38.dev2.dist-info/METADATA +369 -0
  73. prompture-0.0.38.dev2.dist-info/RECORD +77 -0
  74. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/WHEEL +1 -1
  75. prompture-0.0.29.dev8.dist-info/METADATA +0 -368
  76. prompture-0.0.29.dev8.dist-info/RECORD +0 -27
  77. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/entry_points.txt +0 -0
  78. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/licenses/LICENSE +0 -0
  79. {prompture-0.0.29.dev8.dist-info → prompture-0.0.38.dev2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,84 @@
1
+ """Project scaffolding generator.
2
+
3
+ Renders Jinja2 templates into a standalone FastAPI project directory
4
+ that users can customize and deploy.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+
11
+ try:
12
+ from jinja2 import Environment, FileSystemLoader
13
+ except ImportError:
14
+ Environment = None # type: ignore[assignment,misc]
15
+ FileSystemLoader = None # type: ignore[assignment,misc]
16
+
17
+ _TEMPLATES_DIR = Path(__file__).parent / "templates"
18
+
19
+ # Map from template file -> output path (relative to project root).
20
+ _FILE_MAP = {
21
+ "main.py.j2": "app/main.py",
22
+ "models.py.j2": "app/models.py",
23
+ "config.py.j2": "app/config.py",
24
+ "requirements.txt.j2": "requirements.txt",
25
+ "env.example.j2": ".env.example",
26
+ "README.md.j2": "README.md",
27
+ }
28
+
29
+ _DOCKER_FILES = {
30
+ "Dockerfile.j2": "Dockerfile",
31
+ }
32
+
33
+
34
+ def scaffold_project(
35
+ output_dir: str,
36
+ project_name: str = "my_app",
37
+ model_name: str = "openai/gpt-4o-mini",
38
+ include_docker: bool = True,
39
+ ) -> Path:
40
+ """Render all templates and write the project to *output_dir*.
41
+
42
+ Parameters:
43
+ output_dir: Destination directory (created if needed).
44
+ project_name: Human-friendly project name used in templates.
45
+ model_name: Default model string baked into config.
46
+ include_docker: Whether to include Dockerfile.
47
+
48
+ Returns:
49
+ The :class:`Path` to the generated project root.
50
+ """
51
+ if Environment is None:
52
+ raise ImportError("jinja2 is required for scaffolding: pip install prompture[scaffold]")
53
+
54
+ env = Environment(
55
+ loader=FileSystemLoader(str(_TEMPLATES_DIR)),
56
+ keep_trailing_newline=True,
57
+ )
58
+
59
+ context = {
60
+ "project_name": project_name,
61
+ "model_name": model_name,
62
+ "include_docker": include_docker,
63
+ }
64
+
65
+ out = Path(output_dir)
66
+
67
+ file_map = dict(_FILE_MAP)
68
+ if include_docker:
69
+ file_map.update(_DOCKER_FILES)
70
+
71
+ for template_name, rel_path in file_map.items():
72
+ template = env.get_template(template_name)
73
+ rendered = template.render(**context)
74
+
75
+ dest = out / rel_path
76
+ dest.parent.mkdir(parents=True, exist_ok=True)
77
+ dest.write_text(rendered, encoding="utf-8")
78
+
79
+ # Create empty __init__.py for the app package
80
+ init_path = out / "app" / "__init__.py"
81
+ if not init_path.exists():
82
+ init_path.write_text("", encoding="utf-8")
83
+
84
+ return out
@@ -0,0 +1,12 @@
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt .
6
+ RUN pip install --no-cache-dir -r requirements.txt
7
+
8
+ COPY . .
9
+
10
+ EXPOSE 8000
11
+
12
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
@@ -0,0 +1,41 @@
1
+ # {{ project_name }}
2
+
3
+ A FastAPI server powered by [Prompture](https://github.com/jhd3197/prompture) for structured LLM output.
4
+
5
+ ## Quick start
6
+
7
+ ```bash
8
+ # Install dependencies
9
+ pip install -r requirements.txt
10
+
11
+ # Copy and edit environment config
12
+ cp .env.example .env
13
+
14
+ # Run the server
15
+ uvicorn app.main:app --reload
16
+ ```
17
+
18
+ ## API endpoints
19
+
20
+ | Method | Path | Description |
21
+ |--------|------|-------------|
22
+ | POST | `/v1/chat` | Send a message, get a response |
23
+ | POST | `/v1/extract` | Extract structured JSON with schema |
24
+ | GET | `/v1/conversations/{id}` | Get conversation history |
25
+ | DELETE | `/v1/conversations/{id}` | Delete a conversation |
26
+
27
+ ## Example
28
+
29
+ ```bash
30
+ curl -X POST http://localhost:8000/v1/chat \
31
+ -H "Content-Type: application/json" \
32
+ -d '{"message": "Hello!"}'
33
+ ```
34
+ {% if include_docker %}
35
+ ## Docker
36
+
37
+ ```bash
38
+ docker build -t {{ project_name }} .
39
+ docker run -p 8000:8000 --env-file .env {{ project_name }}
40
+ ```
41
+ {% endif %}
@@ -0,0 +1,21 @@
1
+ """Configuration for {{ project_name }}."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pydantic_settings import BaseSettings
6
+
7
+
8
+ class Settings(BaseSettings):
9
+ model_name: str = "{{ model_name }}"
10
+ system_prompt: str = "You are a helpful assistant."
11
+ cors_origins: list[str] = ["*"]
12
+
13
+ # Provider API keys (loaded from environment / .env)
14
+ openai_api_key: str = ""
15
+ claude_api_key: str = ""
16
+ google_api_key: str = ""
17
+
18
+ model_config = {"env_file": ".env", "env_file_encoding": "utf-8"}
19
+
20
+
21
+ settings = Settings()
@@ -0,0 +1,8 @@
1
+ # {{ project_name }} environment configuration
2
+ MODEL_NAME={{ model_name }}
3
+ SYSTEM_PROMPT=You are a helpful assistant.
4
+
5
+ # Provider API keys (uncomment and fill in as needed)
6
+ # OPENAI_API_KEY=sk-...
7
+ # CLAUDE_API_KEY=sk-ant-...
8
+ # GOOGLE_API_KEY=...
@@ -0,0 +1,86 @@
1
+ """{{ project_name }} -- FastAPI server powered by Prompture."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import uuid
7
+ from typing import Any
8
+
9
+ from fastapi import FastAPI, HTTPException
10
+ from fastapi.middleware.cors import CORSMiddleware
11
+
12
+ from .config import settings
13
+ from .models import (
14
+ ChatRequest,
15
+ ChatResponse,
16
+ ConversationHistory,
17
+ ExtractRequest,
18
+ ExtractResponse,
19
+ )
20
+
21
+ from prompture import AsyncConversation
22
+
23
+ app = FastAPI(title="{{ project_name }}", version="0.1.0")
24
+
25
+ app.add_middleware(
26
+ CORSMiddleware,
27
+ allow_origins=settings.cors_origins,
28
+ allow_credentials=True,
29
+ allow_methods=["*"],
30
+ allow_headers=["*"],
31
+ )
32
+
33
+ _conversations: dict[str, AsyncConversation] = {}
34
+
35
+
36
+ def _get_or_create_conversation(conv_id: str | None) -> tuple[str, AsyncConversation]:
37
+ if conv_id and conv_id in _conversations:
38
+ return conv_id, _conversations[conv_id]
39
+ new_id = conv_id or uuid.uuid4().hex[:12]
40
+ conv = AsyncConversation(
41
+ model_name=settings.model_name,
42
+ system_prompt=settings.system_prompt,
43
+ )
44
+ _conversations[new_id] = conv
45
+ return new_id, conv
46
+
47
+
48
+ @app.post("/v1/chat", response_model=ChatResponse)
49
+ async def chat(request: ChatRequest):
50
+ conv_id, conv = _get_or_create_conversation(request.conversation_id)
51
+ text = await conv.ask(request.message, request.options)
52
+ return ChatResponse(message=text, conversation_id=conv_id, usage=conv.usage)
53
+
54
+
55
+ @app.post("/v1/extract", response_model=ExtractResponse)
56
+ async def extract(request: ExtractRequest):
57
+ conv_id, conv = _get_or_create_conversation(request.conversation_id)
58
+ result = await conv.ask_for_json(
59
+ content=request.text,
60
+ json_schema=request.schema_def,
61
+ )
62
+ return ExtractResponse(
63
+ json_object=result["json_object"],
64
+ conversation_id=conv_id,
65
+ usage=conv.usage,
66
+ )
67
+
68
+
69
+ @app.get("/v1/conversations/{conversation_id}", response_model=ConversationHistory)
70
+ async def get_conversation(conversation_id: str):
71
+ if conversation_id not in _conversations:
72
+ raise HTTPException(status_code=404, detail="Conversation not found")
73
+ conv = _conversations[conversation_id]
74
+ return ConversationHistory(
75
+ conversation_id=conversation_id,
76
+ messages=conv.messages,
77
+ usage=conv.usage,
78
+ )
79
+
80
+
81
+ @app.delete("/v1/conversations/{conversation_id}")
82
+ async def delete_conversation(conversation_id: str):
83
+ if conversation_id not in _conversations:
84
+ raise HTTPException(status_code=404, detail="Conversation not found")
85
+ del _conversations[conversation_id]
86
+ return {"status": "deleted", "conversation_id": conversation_id}
@@ -0,0 +1,40 @@
1
+ """Pydantic request/response models for {{ project_name }}."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class ChatRequest(BaseModel):
11
+ message: str
12
+ conversation_id: str | None = None
13
+ stream: bool = False
14
+ options: dict[str, Any] | None = None
15
+
16
+
17
+ class ChatResponse(BaseModel):
18
+ message: str
19
+ conversation_id: str
20
+ usage: dict[str, Any]
21
+
22
+
23
+ class ExtractRequest(BaseModel):
24
+ text: str
25
+ schema_def: dict[str, Any] = Field(..., alias="schema")
26
+ conversation_id: str | None = None
27
+
28
+ model_config = {"populate_by_name": True}
29
+
30
+
31
+ class ExtractResponse(BaseModel):
32
+ json_object: dict[str, Any]
33
+ conversation_id: str
34
+ usage: dict[str, Any]
35
+
36
+
37
+ class ConversationHistory(BaseModel):
38
+ conversation_id: str
39
+ messages: list[dict[str, Any]]
40
+ usage: dict[str, Any]
@@ -0,0 +1,5 @@
1
+ prompture>=0.0.30
2
+ fastapi>=0.100
3
+ uvicorn[standard]>=0.20
4
+ pydantic-settings>=2.0
5
+ python-dotenv>=0.19.0
@@ -0,0 +1,218 @@
1
+ """Conversation serialization — pure data transforms for export/import.
2
+
3
+ Handles converting Conversation state to/from plain dicts suitable for
4
+ JSON serialization. No I/O is performed here; see :mod:`persistence`
5
+ for file and database storage.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import copy
11
+ from datetime import datetime, timezone
12
+ from typing import Any
13
+
14
+ from .image import ImageContent
15
+ from .session import UsageSession
16
+
17
+ EXPORT_VERSION = 1
18
+
19
+
20
+ # ------------------------------------------------------------------
21
+ # Message content helpers
22
+ # ------------------------------------------------------------------
23
+
24
+
25
+ def _serialize_message_content(content: Any) -> Any:
26
+ """Convert ``ImageContent`` objects inside message content to plain dicts."""
27
+ if isinstance(content, str):
28
+ return content
29
+
30
+ if isinstance(content, list):
31
+ out: list[Any] = []
32
+ for block in content:
33
+ if isinstance(block, dict) and block.get("type") == "image":
34
+ source = block.get("source")
35
+ if isinstance(source, ImageContent):
36
+ out.append(
37
+ {
38
+ "type": "image",
39
+ "source": {
40
+ "data": source.data,
41
+ "media_type": source.media_type,
42
+ "source_type": source.source_type,
43
+ "url": source.url,
44
+ },
45
+ }
46
+ )
47
+ elif isinstance(source, dict):
48
+ out.append(block)
49
+ else:
50
+ out.append(block)
51
+ else:
52
+ out.append(block)
53
+ return out
54
+
55
+ return content
56
+
57
+
58
+ def _deserialize_message_content(content: Any) -> Any:
59
+ """Reconstruct ``ImageContent`` objects from plain dicts in message content."""
60
+ if isinstance(content, str):
61
+ return content
62
+
63
+ if isinstance(content, list):
64
+ out: list[Any] = []
65
+ for block in content:
66
+ if isinstance(block, dict) and block.get("type") == "image":
67
+ source = block.get("source")
68
+ if isinstance(source, dict) and "media_type" in source:
69
+ out.append(
70
+ {
71
+ "type": "image",
72
+ "source": ImageContent(
73
+ data=source.get("data", ""),
74
+ media_type=source["media_type"],
75
+ source_type=source.get("source_type", "base64"),
76
+ url=source.get("url"),
77
+ ),
78
+ }
79
+ )
80
+ else:
81
+ out.append(block)
82
+ else:
83
+ out.append(block)
84
+ return out
85
+
86
+ return content
87
+
88
+
89
+ # ------------------------------------------------------------------
90
+ # UsageSession export/import
91
+ # ------------------------------------------------------------------
92
+
93
+
94
+ def export_usage_session(session: UsageSession) -> dict[str, Any]:
95
+ """Serialize a :class:`UsageSession` to a plain dict."""
96
+ return {
97
+ "prompt_tokens": session.prompt_tokens,
98
+ "completion_tokens": session.completion_tokens,
99
+ "total_tokens": session.total_tokens,
100
+ "total_cost": session.total_cost,
101
+ "call_count": session.call_count,
102
+ "errors": session.errors,
103
+ "per_model": dict(session._per_model),
104
+ }
105
+
106
+
107
+ def import_usage_session(data: dict[str, Any]) -> UsageSession:
108
+ """Reconstruct a :class:`UsageSession` from an exported dict."""
109
+ session = UsageSession(
110
+ prompt_tokens=data.get("prompt_tokens", 0),
111
+ completion_tokens=data.get("completion_tokens", 0),
112
+ total_tokens=data.get("total_tokens", 0),
113
+ total_cost=data.get("total_cost", 0.0),
114
+ call_count=data.get("call_count", 0),
115
+ errors=data.get("errors", 0),
116
+ )
117
+ per_model = data.get("per_model", {})
118
+ for model, stats in per_model.items():
119
+ session._per_model[model] = dict(stats)
120
+ return session
121
+
122
+
123
+ # ------------------------------------------------------------------
124
+ # Conversation export/import
125
+ # ------------------------------------------------------------------
126
+
127
+
128
+ def export_conversation(
129
+ *,
130
+ model_name: str,
131
+ system_prompt: str | None,
132
+ options: dict[str, Any],
133
+ messages: list[dict[str, Any]],
134
+ usage: dict[str, Any],
135
+ max_tool_rounds: int,
136
+ tools_metadata: list[dict[str, Any]] | None = None,
137
+ usage_session: UsageSession | None = None,
138
+ metadata: dict[str, Any] | None = None,
139
+ conversation_id: str,
140
+ strip_images: bool = False,
141
+ ) -> dict[str, Any]:
142
+ """Export conversation state to a JSON-serializable dict.
143
+
144
+ Args:
145
+ strip_images: When *True*, image blocks are removed from messages
146
+ and list-of-blocks content that becomes text-only is collapsed
147
+ to a plain string.
148
+ """
149
+ serialized_messages: list[dict[str, Any]] = []
150
+ for msg in messages:
151
+ msg_copy = dict(msg)
152
+ content = msg_copy.get("content")
153
+
154
+ if strip_images and isinstance(content, list):
155
+ filtered = [b for b in content if not (isinstance(b, dict) and b.get("type") == "image")]
156
+ if len(filtered) == 1 and isinstance(filtered[0], dict) and filtered[0].get("type") == "text":
157
+ msg_copy["content"] = filtered[0]["text"]
158
+ elif filtered:
159
+ msg_copy["content"] = _serialize_message_content(filtered)
160
+ else:
161
+ msg_copy["content"] = ""
162
+ else:
163
+ msg_copy["content"] = _serialize_message_content(content)
164
+
165
+ serialized_messages.append(msg_copy)
166
+
167
+ now = datetime.now(timezone.utc).isoformat()
168
+ meta = dict(metadata) if metadata else {}
169
+ meta.setdefault("created_at", now)
170
+ meta["last_active"] = now
171
+ meta["turn_count"] = usage.get("turns", 0)
172
+
173
+ export: dict[str, Any] = {
174
+ "version": EXPORT_VERSION,
175
+ "conversation_id": conversation_id,
176
+ "model_name": model_name,
177
+ "system_prompt": system_prompt,
178
+ "options": dict(options),
179
+ "messages": serialized_messages,
180
+ "usage": dict(usage),
181
+ "max_tool_rounds": max_tool_rounds,
182
+ "metadata": meta,
183
+ }
184
+
185
+ if tools_metadata:
186
+ export["tools"] = tools_metadata
187
+
188
+ if usage_session is not None:
189
+ export["usage_session"] = export_usage_session(usage_session)
190
+
191
+ return export
192
+
193
+
194
+ def import_conversation(data: dict[str, Any]) -> dict[str, Any]:
195
+ """Validate and deserialize an exported conversation dict.
196
+
197
+ Returns a dict with deserialized messages (``ImageContent`` objects
198
+ reconstructed).
199
+
200
+ Raises:
201
+ ValueError: If the export version is unsupported.
202
+ """
203
+ version = data.get("version")
204
+ if version != EXPORT_VERSION:
205
+ raise ValueError(f"Unsupported export version: {version}. Expected {EXPORT_VERSION}.")
206
+
207
+ result = copy.deepcopy(data)
208
+
209
+ # Deserialize message content
210
+ for msg in result.get("messages", []):
211
+ if "content" in msg:
212
+ msg["content"] = _deserialize_message_content(msg["content"])
213
+
214
+ # Deserialize usage_session if present
215
+ if "usage_session" in result and isinstance(result["usage_session"], dict):
216
+ result["usage_session"] = import_usage_session(result["usage_session"])
217
+
218
+ return result