abstractcore 2.9.1__py3-none-any.whl → 2.11.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. abstractcore/__init__.py +7 -27
  2. abstractcore/apps/deepsearch.py +9 -4
  3. abstractcore/apps/extractor.py +33 -100
  4. abstractcore/apps/intent.py +19 -0
  5. abstractcore/apps/judge.py +20 -1
  6. abstractcore/apps/summarizer.py +20 -1
  7. abstractcore/architectures/detection.py +34 -1
  8. abstractcore/architectures/response_postprocessing.py +313 -0
  9. abstractcore/assets/architecture_formats.json +38 -8
  10. abstractcore/assets/model_capabilities.json +882 -160
  11. abstractcore/compression/__init__.py +1 -2
  12. abstractcore/compression/glyph_processor.py +6 -4
  13. abstractcore/config/main.py +52 -20
  14. abstractcore/config/manager.py +390 -12
  15. abstractcore/config/vision_config.py +5 -5
  16. abstractcore/core/interface.py +151 -3
  17. abstractcore/core/session.py +16 -10
  18. abstractcore/download.py +1 -1
  19. abstractcore/embeddings/manager.py +20 -6
  20. abstractcore/endpoint/__init__.py +2 -0
  21. abstractcore/endpoint/app.py +458 -0
  22. abstractcore/mcp/client.py +3 -1
  23. abstractcore/media/__init__.py +52 -17
  24. abstractcore/media/auto_handler.py +42 -22
  25. abstractcore/media/base.py +44 -1
  26. abstractcore/media/capabilities.py +12 -33
  27. abstractcore/media/enrichment.py +105 -0
  28. abstractcore/media/handlers/anthropic_handler.py +19 -28
  29. abstractcore/media/handlers/local_handler.py +124 -70
  30. abstractcore/media/handlers/openai_handler.py +19 -31
  31. abstractcore/media/processors/__init__.py +4 -2
  32. abstractcore/media/processors/audio_processor.py +57 -0
  33. abstractcore/media/processors/office_processor.py +8 -3
  34. abstractcore/media/processors/pdf_processor.py +46 -3
  35. abstractcore/media/processors/text_processor.py +22 -24
  36. abstractcore/media/processors/video_processor.py +58 -0
  37. abstractcore/media/types.py +97 -4
  38. abstractcore/media/utils/image_scaler.py +20 -2
  39. abstractcore/media/utils/video_frames.py +219 -0
  40. abstractcore/media/vision_fallback.py +136 -22
  41. abstractcore/processing/__init__.py +32 -3
  42. abstractcore/processing/basic_deepsearch.py +15 -10
  43. abstractcore/processing/basic_intent.py +3 -2
  44. abstractcore/processing/basic_judge.py +3 -2
  45. abstractcore/processing/basic_summarizer.py +1 -1
  46. abstractcore/providers/__init__.py +3 -1
  47. abstractcore/providers/anthropic_provider.py +95 -8
  48. abstractcore/providers/base.py +1516 -81
  49. abstractcore/providers/huggingface_provider.py +546 -69
  50. abstractcore/providers/lmstudio_provider.py +30 -916
  51. abstractcore/providers/mlx_provider.py +382 -35
  52. abstractcore/providers/model_capabilities.py +5 -1
  53. abstractcore/providers/ollama_provider.py +99 -15
  54. abstractcore/providers/openai_compatible_provider.py +406 -180
  55. abstractcore/providers/openai_provider.py +188 -44
  56. abstractcore/providers/openrouter_provider.py +76 -0
  57. abstractcore/providers/registry.py +61 -5
  58. abstractcore/providers/streaming.py +138 -33
  59. abstractcore/providers/vllm_provider.py +92 -817
  60. abstractcore/server/app.py +478 -28
  61. abstractcore/server/audio_endpoints.py +139 -0
  62. abstractcore/server/vision_endpoints.py +1319 -0
  63. abstractcore/structured/handler.py +316 -41
  64. abstractcore/tools/common_tools.py +5501 -2012
  65. abstractcore/tools/comms_tools.py +1641 -0
  66. abstractcore/tools/core.py +37 -7
  67. abstractcore/tools/handler.py +4 -9
  68. abstractcore/tools/parser.py +49 -2
  69. abstractcore/tools/tag_rewriter.py +2 -1
  70. abstractcore/tools/telegram_tdlib.py +407 -0
  71. abstractcore/tools/telegram_tools.py +261 -0
  72. abstractcore/utils/cli.py +1085 -72
  73. abstractcore/utils/structured_logging.py +29 -8
  74. abstractcore/utils/token_utils.py +2 -0
  75. abstractcore/utils/truncation.py +29 -0
  76. abstractcore/utils/version.py +3 -4
  77. abstractcore/utils/vlm_token_calculator.py +12 -2
  78. abstractcore-2.11.4.dist-info/METADATA +562 -0
  79. abstractcore-2.11.4.dist-info/RECORD +133 -0
  80. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/WHEEL +1 -1
  81. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/entry_points.txt +1 -0
  82. abstractcore-2.9.1.dist-info/METADATA +0 -1190
  83. abstractcore-2.9.1.dist-info/RECORD +0 -119
  84. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/licenses/LICENSE +0 -0
  85. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,458 @@
1
+ """
2
+ AbstractEndpoint (AbstractCore) - single-model OpenAI-compatible server.
3
+
4
+ Unlike `abstractcore.server.app` (multi-provider gateway), this server loads one provider+model
5
+ once per worker and reuses it across requests. It is intended for hosting local inference
6
+ backends (HF GGUF / MLX) as a `/v1` endpoint.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import argparse
12
+ import json
13
+ import os
14
+ import time
15
+ import uuid
16
+ import threading
17
+ from dataclasses import dataclass
18
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
19
+
20
+ from fastapi import FastAPI, HTTPException
21
+ from fastapi.responses import JSONResponse, StreamingResponse
22
+ from pydantic import BaseModel, Field
23
+
24
+ from ..core.factory import create_llm
25
+ from ..core.types import GenerateResponse
26
+
27
+
28
+ @dataclass(frozen=True)
29
+ class EndpointConfig:
30
+ provider: str
31
+ model: str
32
+ host: str = "0.0.0.0"
33
+ port: int = 8001
34
+
35
+
36
+ class ChatMessage(BaseModel):
37
+ role: str
38
+ content: Optional[Any] = None
39
+ tool_call_id: Optional[str] = None
40
+ tool_calls: Optional[List[Dict[str, Any]]] = None
41
+ name: Optional[str] = None
42
+
43
+
44
+ class ChatCompletionRequest(BaseModel):
45
+ model: str = Field(description="Model identifier (ignored/validated in single-model mode)")
46
+ messages: List[ChatMessage]
47
+
48
+ temperature: Optional[float] = 0.7
49
+ max_tokens: Optional[int] = None
50
+ top_p: Optional[float] = 1.0
51
+ stream: bool = False
52
+
53
+ tools: Optional[List[Dict[str, Any]]] = None
54
+ tool_choice: Optional[Any] = None
55
+
56
+ stop: Optional[Any] = None
57
+ seed: Optional[int] = None
58
+ frequency_penalty: Optional[float] = None
59
+ presence_penalty: Optional[float] = None
60
+
61
+ # OpenAI prompt caching (2025+): supported by OpenAI and forwarded by AbstractCore providers.
62
+ prompt_cache_key: Optional[str] = None
63
+
64
+
65
+ class PromptCacheSetRequest(BaseModel):
66
+ key: str = Field(description="Prompt cache key to create/select")
67
+ make_default: bool = Field(default=True, description="Set this key as the default for subsequent calls")
68
+ ttl_s: Optional[float] = Field(default=None, description="Optional in-process TTL (seconds) for this key")
69
+
70
+
71
+ class PromptCacheUpdateRequest(BaseModel):
72
+ key: str = Field(description="Prompt cache key to update/append into")
73
+ prompt: Optional[str] = Field(default=None, description="Raw prompt text (treated as a user message for chat templates)")
74
+ messages: Optional[List[Dict[str, Any]]] = Field(default=None, description="Optional message list to append (provider-dependent)")
75
+ system_prompt: Optional[str] = Field(default=None, description="Optional system prompt to append")
76
+ tools: Optional[List[Dict[str, Any]]] = Field(default=None, description="Optional tool definitions to append")
77
+ add_generation_prompt: bool = Field(default=False, description="If true, append an assistant preamble (backend-dependent)")
78
+ ttl_s: Optional[float] = Field(default=None, description="Optional TTL update (seconds)")
79
+
80
+
81
+ class PromptCacheForkRequest(BaseModel):
82
+ from_key: str = Field(description="Source prompt cache key")
83
+ to_key: str = Field(description="Destination prompt cache key")
84
+ make_default: bool = Field(default=False, description="Set the new key as default")
85
+ ttl_s: Optional[float] = Field(default=None, description="Optional TTL for the new key (seconds)")
86
+
87
+
88
+ class PromptCacheClearRequest(BaseModel):
89
+ key: Optional[str] = Field(default=None, description="If omitted, clears all in-process caches for this worker")
90
+
91
+
92
+ class PromptCachePrepareModulesRequest(BaseModel):
93
+ namespace: str = Field(description="Namespace used as a stable prefix for derived keys (e.g. tenant_id:model_id)")
94
+ modules: List[Dict[str, Any]] = Field(description="Ordered list of cache modules (see abstractcore.providers.base.PromptCacheModule)")
95
+ make_default: bool = Field(default=False, description="Set the final derived key as default")
96
+ ttl_s: Optional[float] = Field(default=None, description="Optional TTL for derived keys (seconds)")
97
+ version: int = Field(default=1, description="Hash version for key derivation (bump on formatting changes)")
98
+
99
+
100
+ def _extract_system_prompt(messages: List[ChatMessage]) -> Tuple[Optional[str], List[Dict[str, Any]]]:
101
+ system_parts: List[str] = []
102
+ out: List[Dict[str, Any]] = []
103
+ for msg in messages:
104
+ if msg.role == "system":
105
+ if isinstance(msg.content, str) and msg.content.strip():
106
+ system_parts.append(msg.content.strip())
107
+ continue
108
+ out.append(msg.model_dump(exclude_none=True))
109
+
110
+ system_prompt = "\n\n".join(system_parts) if system_parts else None
111
+ return system_prompt, out
112
+
113
+
114
+ def _format_tool_calls(tool_calls: Optional[List[Dict[str, Any]]]) -> Optional[List[Dict[str, Any]]]:
115
+ if not isinstance(tool_calls, list) or not tool_calls:
116
+ return None
117
+ formatted = []
118
+ for tc in tool_calls:
119
+ if not isinstance(tc, dict):
120
+ continue
121
+ formatted.append(
122
+ {
123
+ "id": tc.get("id"),
124
+ "type": tc.get("type") or "function",
125
+ "function": {
126
+ "name": tc.get("name"),
127
+ "arguments": tc.get("arguments", ""),
128
+ },
129
+ }
130
+ )
131
+ return formatted or None
132
+
133
+
134
+ def _usage_to_openai(usage: Optional[Dict[str, Any]]) -> Optional[Dict[str, int]]:
135
+ if not isinstance(usage, dict) or not usage:
136
+ return None
137
+ prompt_tokens = usage.get("prompt_tokens") or usage.get("input_tokens") or 0
138
+ completion_tokens = usage.get("completion_tokens") or usage.get("output_tokens") or 0
139
+ total_tokens = usage.get("total_tokens")
140
+ if total_tokens is None:
141
+ try:
142
+ total_tokens = int(prompt_tokens) + int(completion_tokens)
143
+ except Exception:
144
+ total_tokens = 0
145
+ return {
146
+ "prompt_tokens": int(prompt_tokens) if prompt_tokens is not None else 0,
147
+ "completion_tokens": int(completion_tokens) if completion_tokens is not None else 0,
148
+ "total_tokens": int(total_tokens) if total_tokens is not None else 0,
149
+ }
150
+
151
+
152
+ def _maybe_strip_provider_prefix(model: str) -> str:
153
+ if not isinstance(model, str):
154
+ return ""
155
+ s = model.strip()
156
+ if not s:
157
+ return ""
158
+ # If the prefix looks like an AbstractCore provider (first segment), strip it.
159
+ if "/" in s:
160
+ head, tail = s.split("/", 1)
161
+ if head.lower() in {
162
+ "openai",
163
+ "anthropic",
164
+ "openrouter",
165
+ "ollama",
166
+ "lmstudio",
167
+ "vllm",
168
+ "openai-compatible",
169
+ "huggingface",
170
+ "mlx",
171
+ }:
172
+ return tail
173
+ return s
174
+
175
+
176
+ def create_app(
177
+ *,
178
+ provider_name: Optional[str] = None,
179
+ model: Optional[str] = None,
180
+ provider_factory: Optional[callable] = None,
181
+ provider_instance: Optional[Any] = None,
182
+ ) -> FastAPI:
183
+ if provider_instance is not None:
184
+ provider = provider_instance
185
+ else:
186
+ if provider_factory is not None:
187
+ provider = provider_factory()
188
+ else:
189
+ if not provider_name or not model:
190
+ raise ValueError("provider_name and model are required when no provider_instance is provided")
191
+ provider = create_llm(provider_name, model=model)
192
+
193
+ app = FastAPI(title="AbstractEndpoint", version="0.1.0")
194
+ lock = threading.Lock()
195
+ created_at = int(time.time())
196
+ model_id = getattr(provider, "model", model or "unknown")
197
+
198
+ def _has_cache_api() -> bool:
199
+ return bool(getattr(provider, "supports_prompt_cache", lambda: False)())
200
+
201
+ @app.get("/health")
202
+ def health():
203
+ return {"status": "healthy", "model": model_id}
204
+
205
+ @app.get("/v1/models")
206
+ def list_models():
207
+ return {
208
+ "object": "list",
209
+ "data": [
210
+ {
211
+ "id": model_id,
212
+ "object": "model",
213
+ "created": created_at,
214
+ "owned_by": "abstractendpoint",
215
+ }
216
+ ],
217
+ }
218
+
219
+ @app.get("/acore/prompt_cache/stats")
220
+ def prompt_cache_stats():
221
+ if not _has_cache_api() or not hasattr(provider, "get_prompt_cache_stats"):
222
+ return {"supported": False, "error": "provider does not expose prompt cache stats"}
223
+ with lock:
224
+ try:
225
+ return {"supported": True, "stats": provider.get_prompt_cache_stats()} # type: ignore[no-any-return]
226
+ except Exception as e:
227
+ return {"supported": False, "error": str(e)}
228
+
229
+ @app.post("/acore/prompt_cache/set")
230
+ def prompt_cache_set(req: PromptCacheSetRequest):
231
+ if not _has_cache_api() or not hasattr(provider, "prompt_cache_set"):
232
+ return {"supported": False, "error": "provider does not support prompt cache control plane"}
233
+ with lock:
234
+ try:
235
+ ok = provider.prompt_cache_set(req.key, make_default=req.make_default, ttl_s=req.ttl_s) # type: ignore[arg-type]
236
+ return {"supported": True, "ok": bool(ok)}
237
+ except Exception as e:
238
+ return {"supported": False, "error": str(e)}
239
+
240
+ @app.post("/acore/prompt_cache/update")
241
+ def prompt_cache_update(req: PromptCacheUpdateRequest):
242
+ if not _has_cache_api() or not hasattr(provider, "prompt_cache_update"):
243
+ return {"supported": False, "error": "provider does not support prompt cache control plane"}
244
+ with lock:
245
+ try:
246
+ ok = provider.prompt_cache_update( # type: ignore[arg-type]
247
+ req.key,
248
+ prompt=req.prompt or "",
249
+ messages=req.messages,
250
+ system_prompt=req.system_prompt,
251
+ tools=req.tools,
252
+ add_generation_prompt=bool(req.add_generation_prompt),
253
+ ttl_s=req.ttl_s,
254
+ )
255
+ return {"supported": True, "ok": bool(ok)}
256
+ except Exception as e:
257
+ return {"supported": False, "error": str(e)}
258
+
259
+ @app.post("/acore/prompt_cache/fork")
260
+ def prompt_cache_fork(req: PromptCacheForkRequest):
261
+ if not _has_cache_api() or not hasattr(provider, "prompt_cache_fork"):
262
+ return {"supported": False, "error": "provider does not support prompt cache control plane"}
263
+ with lock:
264
+ try:
265
+ ok = provider.prompt_cache_fork( # type: ignore[arg-type]
266
+ req.from_key,
267
+ req.to_key,
268
+ make_default=bool(req.make_default),
269
+ ttl_s=req.ttl_s,
270
+ )
271
+ return {"supported": True, "ok": bool(ok)}
272
+ except Exception as e:
273
+ return {"supported": False, "error": str(e)}
274
+
275
+ @app.post("/acore/prompt_cache/clear")
276
+ def prompt_cache_clear(req: PromptCacheClearRequest):
277
+ if not _has_cache_api() or not hasattr(provider, "prompt_cache_clear"):
278
+ return {"supported": False, "error": "provider does not support prompt cache control plane"}
279
+ with lock:
280
+ try:
281
+ ok = provider.prompt_cache_clear(req.key) # type: ignore[arg-type]
282
+ return {"supported": True, "ok": bool(ok)}
283
+ except Exception as e:
284
+ return {"supported": False, "error": str(e)}
285
+
286
+ @app.post("/acore/prompt_cache/prepare_modules")
287
+ def prompt_cache_prepare_modules(req: PromptCachePrepareModulesRequest):
288
+ if not _has_cache_api() or not hasattr(provider, "prompt_cache_prepare_modules"):
289
+ return {"supported": False, "error": "provider does not support prompt cache module preparation"}
290
+ with lock:
291
+ try:
292
+ result = provider.prompt_cache_prepare_modules( # type: ignore[arg-type]
293
+ namespace=req.namespace,
294
+ modules=req.modules,
295
+ make_default=bool(req.make_default),
296
+ ttl_s=req.ttl_s,
297
+ version=int(req.version),
298
+ )
299
+ return result
300
+ except Exception as e:
301
+ return {"supported": False, "error": str(e)}
302
+
303
+ @app.post("/v1/chat/completions")
304
+ def chat_completions(request: ChatCompletionRequest):
305
+ requested_model = _maybe_strip_provider_prefix(request.model)
306
+ if requested_model and requested_model != model_id:
307
+ raise HTTPException(
308
+ status_code=400,
309
+ detail={
310
+ "error": {
311
+ "message": f"This endpoint serves model '{model_id}', but request asked for '{requested_model}'.",
312
+ "type": "invalid_request_error",
313
+ }
314
+ },
315
+ )
316
+
317
+ system_prompt, messages = _extract_system_prompt(request.messages)
318
+
319
+ gen_kwargs: Dict[str, Any] = {}
320
+ if request.temperature is not None:
321
+ gen_kwargs["temperature"] = request.temperature
322
+ if request.max_tokens is not None:
323
+ gen_kwargs["max_tokens"] = request.max_tokens
324
+ if request.top_p is not None:
325
+ gen_kwargs["top_p"] = request.top_p
326
+ if request.seed is not None:
327
+ gen_kwargs["seed"] = request.seed
328
+ if request.frequency_penalty is not None:
329
+ gen_kwargs["frequency_penalty"] = request.frequency_penalty
330
+ if request.presence_penalty is not None:
331
+ gen_kwargs["presence_penalty"] = request.presence_penalty
332
+ if request.stop is not None:
333
+ gen_kwargs["stop"] = request.stop
334
+ if isinstance(request.prompt_cache_key, str) and request.prompt_cache_key.strip():
335
+ gen_kwargs["prompt_cache_key"] = request.prompt_cache_key.strip()
336
+
337
+ completion_id = f"chatcmpl-{uuid.uuid4().hex}"
338
+ response_created = int(time.time())
339
+
340
+ def _non_streaming_response(resp: GenerateResponse) -> JSONResponse:
341
+ tool_calls = _format_tool_calls(resp.tool_calls)
342
+ message: Dict[str, Any] = {
343
+ "role": "assistant",
344
+ "content": resp.content,
345
+ }
346
+ if tool_calls:
347
+ message["tool_calls"] = tool_calls
348
+
349
+ body: Dict[str, Any] = {
350
+ "id": completion_id,
351
+ "object": "chat.completion",
352
+ "created": response_created,
353
+ "model": model_id,
354
+ "choices": [
355
+ {
356
+ "index": 0,
357
+ "message": message,
358
+ "finish_reason": resp.finish_reason or "stop",
359
+ }
360
+ ],
361
+ }
362
+ usage = _usage_to_openai(resp.usage)
363
+ if usage:
364
+ body["usage"] = usage
365
+ return JSONResponse(content=body)
366
+
367
+ def _event_stream(chunks: Iterable[GenerateResponse]):
368
+ # Initial delta with role, matches OpenAI stream behavior.
369
+ yield "data: " + json.dumps(
370
+ {
371
+ "id": completion_id,
372
+ "object": "chat.completion.chunk",
373
+ "created": response_created,
374
+ "model": model_id,
375
+ "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}],
376
+ }
377
+ ) + "\n\n"
378
+
379
+ for chunk in chunks:
380
+ delta: Dict[str, Any] = {}
381
+ if chunk.content:
382
+ delta["content"] = chunk.content
383
+
384
+ tool_calls = _format_tool_calls(chunk.tool_calls)
385
+ if tool_calls:
386
+ delta["tool_calls"] = tool_calls
387
+
388
+ if not delta:
389
+ continue
390
+
391
+ yield "data: " + json.dumps(
392
+ {
393
+ "id": completion_id,
394
+ "object": "chat.completion.chunk",
395
+ "created": response_created,
396
+ "model": model_id,
397
+ "choices": [{"index": 0, "delta": delta, "finish_reason": None}],
398
+ }
399
+ ) + "\n\n"
400
+
401
+ yield "data: [DONE]\n\n"
402
+
403
+ with lock:
404
+ resp = provider.generate(
405
+ prompt="",
406
+ messages=messages,
407
+ system_prompt=system_prompt,
408
+ tools=request.tools,
409
+ stream=request.stream,
410
+ **gen_kwargs,
411
+ )
412
+
413
+ if request.stream:
414
+ if not hasattr(resp, "__iter__"):
415
+ raise HTTPException(status_code=500, detail="provider did not return an iterator for stream=True")
416
+ return StreamingResponse(
417
+ _event_stream(resp), # type: ignore[arg-type]
418
+ media_type="text/event-stream",
419
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive"},
420
+ )
421
+
422
+ if not isinstance(resp, GenerateResponse):
423
+ # Defensive: structured outputs or other provider behaviors.
424
+ resp = GenerateResponse(content=str(resp), model=model_id, finish_reason="stop")
425
+
426
+ return _non_streaming_response(resp)
427
+
428
+ return app
429
+
430
+
431
+ def _env(name: str, default: Optional[str] = None) -> Optional[str]:
432
+ v = os.getenv(name)
433
+ if v is None:
434
+ return default
435
+ v = str(v).strip()
436
+ return v if v else default
437
+
438
+
439
+ def _parse_args(argv: Optional[List[str]] = None) -> EndpointConfig:
440
+ parser = argparse.ArgumentParser(description="AbstractEndpoint: single-model /v1 server")
441
+ parser.add_argument("--provider", default=_env("ABSTRACTENDPOINT_PROVIDER", "mlx"))
442
+ parser.add_argument("--model", default=_env("ABSTRACTENDPOINT_MODEL", "mlx-community/Qwen3-4B"))
443
+ parser.add_argument("--host", default=_env("ABSTRACTENDPOINT_HOST", "0.0.0.0"))
444
+ parser.add_argument("--port", type=int, default=int(_env("ABSTRACTENDPOINT_PORT", "8001") or 8001))
445
+ args = parser.parse_args(argv)
446
+ return EndpointConfig(provider=args.provider, model=args.model, host=args.host, port=args.port)
447
+
448
+
449
+ def main(argv: Optional[List[str]] = None) -> None:
450
+ cfg = _parse_args(argv)
451
+ app = create_app(provider_name=cfg.provider, model=cfg.model)
452
+ import uvicorn
453
+
454
+ uvicorn.run(app, host=cfg.host, port=cfg.port, log_level="error")
455
+
456
+
457
+ if __name__ == "__main__": # pragma: no cover
458
+ main()
@@ -6,6 +6,8 @@ from typing import Any, Dict, List, Optional
6
6
 
7
7
  import httpx
8
8
 
9
+ from ..utils.truncation import preview_text
10
+
9
11
 
10
12
  _DEFAULT_ACCEPT = "application/json, text/event-stream"
11
13
 
@@ -143,7 +145,7 @@ class McpClient:
143
145
 
144
146
  if resp.status_code < 200 or resp.status_code >= 300:
145
147
  body = (resp.text or "").strip()
146
- raise McpHttpError(f"MCP HTTP {resp.status_code}: {body[:500]}")
148
+ raise McpHttpError(f"MCP HTTP {resp.status_code}: {preview_text(body, max_chars=500)}")
147
149
 
148
150
  try:
149
151
  data = resp.json()
@@ -10,12 +10,20 @@ The system follows AbstractCore's proven architectural patterns:
10
10
  - Unified API across all providers
11
11
  """
12
12
 
13
- # Core types and base classes
13
+ from __future__ import annotations
14
+
15
+ # NOTE: Keep this package import-safe for minimal installs.
16
+ # Many submodules have optional dependencies (Pillow, PyMuPDF4LLM, unstructured, ...).
17
+ # Import them lazily so `from abstractcore.media.capabilities import ...` works without extras.
18
+
19
+ from importlib import import_module
20
+ from typing import Any
21
+
22
+ # Core types and base classes (dependency-free at import time)
14
23
  from .base import BaseMediaHandler, BaseProviderMediaHandler
15
24
  from .types import MediaContent, MediaType, ContentFormat, MultimodalMessage
16
- from .auto_handler import AutoMediaHandler
17
25
 
18
- # Media processing capabilities
26
+ # Capability helpers (dependency-free at import time)
19
27
  from .capabilities import (
20
28
  MediaCapabilities,
21
29
  get_media_capabilities,
@@ -25,24 +33,48 @@ from .capabilities import (
25
33
  supports_images,
26
34
  supports_documents,
27
35
  get_max_images,
28
- should_use_text_embedding
36
+ should_use_text_embedding,
29
37
  )
30
38
 
31
- # Processors for different file types
32
- from .processors import ImageProcessor, TextProcessor, PDFProcessor, OfficeProcessor
33
39
 
34
- # Provider-specific handlers
35
- from .handlers import OpenAIMediaHandler, AnthropicMediaHandler, LocalMediaHandler
40
+ def __getattr__(name: str) -> Any:
41
+ """Lazy attribute loader for optional media components."""
42
+ lazy_map = {
43
+ # Handlers
44
+ "OpenAIMediaHandler": ("abstractcore.media.handlers.openai_handler", "OpenAIMediaHandler"),
45
+ "AnthropicMediaHandler": ("abstractcore.media.handlers.anthropic_handler", "AnthropicMediaHandler"),
46
+ "LocalMediaHandler": ("abstractcore.media.handlers.local_handler", "LocalMediaHandler"),
36
47
 
37
- # Default media handler - automatically selects appropriate processor
38
- class MediaHandler(AutoMediaHandler):
39
- """
40
- Default media handler that automatically selects the appropriate processor.
48
+ # Auto handler
49
+ "AutoMediaHandler": ("abstractcore.media.auto_handler", "AutoMediaHandler"),
41
50
 
42
- This class provides automatic file type detection and processor selection,
43
- making it easy to process any supported media type with a single interface.
44
- """
45
- pass
51
+ # Processors
52
+ "ImageProcessor": ("abstractcore.media.processors.image_processor", "ImageProcessor"),
53
+ "TextProcessor": ("abstractcore.media.processors.text_processor", "TextProcessor"),
54
+ "PDFProcessor": ("abstractcore.media.processors.pdf_processor", "PDFProcessor"),
55
+ "OfficeProcessor": ("abstractcore.media.processors.office_processor", "OfficeProcessor"),
56
+ "AudioProcessor": ("abstractcore.media.processors.audio_processor", "AudioProcessor"),
57
+ "VideoProcessor": ("abstractcore.media.processors.video_processor", "VideoProcessor"),
58
+ }
59
+
60
+ if name == "MediaHandler":
61
+ AutoMediaHandler = getattr(import_module("abstractcore.media.auto_handler"), "AutoMediaHandler")
62
+
63
+ class MediaHandler(AutoMediaHandler): # type: ignore[misc]
64
+ """Default media handler (alias of AutoMediaHandler)."""
65
+
66
+ globals()["MediaHandler"] = MediaHandler
67
+ return MediaHandler
68
+
69
+ target = lazy_map.get(name)
70
+ if target is None:
71
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
72
+
73
+ module_name, attr = target
74
+ mod = import_module(module_name)
75
+ value = getattr(mod, attr)
76
+ globals()[name] = value
77
+ return value
46
78
 
47
79
  # Convenience functions for common operations
48
80
  def process_file(file_path: str) -> MediaContent:
@@ -55,6 +87,7 @@ def process_file(file_path: str) -> MediaContent:
55
87
  Returns:
56
88
  MediaContent object with processed content
57
89
  """
90
+ from .auto_handler import AutoMediaHandler
58
91
  handler = AutoMediaHandler()
59
92
  result = handler.process_file(file_path)
60
93
  if result.success:
@@ -106,6 +139,8 @@ __all__ = [
106
139
  'TextProcessor',
107
140
  'PDFProcessor',
108
141
  'OfficeProcessor',
142
+ 'AudioProcessor',
143
+ 'VideoProcessor',
109
144
 
110
145
  # Handlers
111
146
  'OpenAIMediaHandler',
@@ -116,4 +151,4 @@ __all__ = [
116
151
  'MediaHandler',
117
152
  'process_file',
118
153
  'get_media_type_from_path'
119
- ]
154
+ ]