abstractcore 2.9.1__py3-none-any.whl → 2.11.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. abstractcore/__init__.py +7 -27
  2. abstractcore/apps/deepsearch.py +9 -4
  3. abstractcore/apps/extractor.py +33 -100
  4. abstractcore/apps/intent.py +19 -0
  5. abstractcore/apps/judge.py +20 -1
  6. abstractcore/apps/summarizer.py +20 -1
  7. abstractcore/architectures/detection.py +34 -1
  8. abstractcore/architectures/response_postprocessing.py +313 -0
  9. abstractcore/assets/architecture_formats.json +38 -8
  10. abstractcore/assets/model_capabilities.json +882 -160
  11. abstractcore/compression/__init__.py +1 -2
  12. abstractcore/compression/glyph_processor.py +6 -4
  13. abstractcore/config/main.py +52 -20
  14. abstractcore/config/manager.py +390 -12
  15. abstractcore/config/vision_config.py +5 -5
  16. abstractcore/core/interface.py +151 -3
  17. abstractcore/core/session.py +16 -10
  18. abstractcore/download.py +1 -1
  19. abstractcore/embeddings/manager.py +20 -6
  20. abstractcore/endpoint/__init__.py +2 -0
  21. abstractcore/endpoint/app.py +458 -0
  22. abstractcore/mcp/client.py +3 -1
  23. abstractcore/media/__init__.py +52 -17
  24. abstractcore/media/auto_handler.py +42 -22
  25. abstractcore/media/base.py +44 -1
  26. abstractcore/media/capabilities.py +12 -33
  27. abstractcore/media/enrichment.py +105 -0
  28. abstractcore/media/handlers/anthropic_handler.py +19 -28
  29. abstractcore/media/handlers/local_handler.py +124 -70
  30. abstractcore/media/handlers/openai_handler.py +19 -31
  31. abstractcore/media/processors/__init__.py +4 -2
  32. abstractcore/media/processors/audio_processor.py +57 -0
  33. abstractcore/media/processors/office_processor.py +8 -3
  34. abstractcore/media/processors/pdf_processor.py +46 -3
  35. abstractcore/media/processors/text_processor.py +22 -24
  36. abstractcore/media/processors/video_processor.py +58 -0
  37. abstractcore/media/types.py +97 -4
  38. abstractcore/media/utils/image_scaler.py +20 -2
  39. abstractcore/media/utils/video_frames.py +219 -0
  40. abstractcore/media/vision_fallback.py +136 -22
  41. abstractcore/processing/__init__.py +32 -3
  42. abstractcore/processing/basic_deepsearch.py +15 -10
  43. abstractcore/processing/basic_intent.py +3 -2
  44. abstractcore/processing/basic_judge.py +3 -2
  45. abstractcore/processing/basic_summarizer.py +1 -1
  46. abstractcore/providers/__init__.py +3 -1
  47. abstractcore/providers/anthropic_provider.py +95 -8
  48. abstractcore/providers/base.py +1516 -81
  49. abstractcore/providers/huggingface_provider.py +546 -69
  50. abstractcore/providers/lmstudio_provider.py +30 -916
  51. abstractcore/providers/mlx_provider.py +382 -35
  52. abstractcore/providers/model_capabilities.py +5 -1
  53. abstractcore/providers/ollama_provider.py +99 -15
  54. abstractcore/providers/openai_compatible_provider.py +406 -180
  55. abstractcore/providers/openai_provider.py +188 -44
  56. abstractcore/providers/openrouter_provider.py +76 -0
  57. abstractcore/providers/registry.py +61 -5
  58. abstractcore/providers/streaming.py +138 -33
  59. abstractcore/providers/vllm_provider.py +92 -817
  60. abstractcore/server/app.py +478 -28
  61. abstractcore/server/audio_endpoints.py +139 -0
  62. abstractcore/server/vision_endpoints.py +1319 -0
  63. abstractcore/structured/handler.py +316 -41
  64. abstractcore/tools/common_tools.py +5501 -2012
  65. abstractcore/tools/comms_tools.py +1641 -0
  66. abstractcore/tools/core.py +37 -7
  67. abstractcore/tools/handler.py +4 -9
  68. abstractcore/tools/parser.py +49 -2
  69. abstractcore/tools/tag_rewriter.py +2 -1
  70. abstractcore/tools/telegram_tdlib.py +407 -0
  71. abstractcore/tools/telegram_tools.py +261 -0
  72. abstractcore/utils/cli.py +1085 -72
  73. abstractcore/utils/structured_logging.py +29 -8
  74. abstractcore/utils/token_utils.py +2 -0
  75. abstractcore/utils/truncation.py +29 -0
  76. abstractcore/utils/version.py +3 -4
  77. abstractcore/utils/vlm_token_calculator.py +12 -2
  78. abstractcore-2.11.4.dist-info/METADATA +562 -0
  79. abstractcore-2.11.4.dist-info/RECORD +133 -0
  80. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/WHEEL +1 -1
  81. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/entry_points.txt +1 -0
  82. abstractcore-2.9.1.dist-info/METADATA +0 -1190
  83. abstractcore-2.9.1.dist-info/RECORD +0 -119
  84. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/licenses/LICENSE +0 -0
  85. {abstractcore-2.9.1.dist-info → abstractcore-2.11.4.dist-info}/top_level.txt +0 -0
@@ -71,11 +71,12 @@ class AbstractCoreInterface(ABC):
71
71
  max_input_tokens: Optional[int] = None,
72
72
  max_output_tokens: int = 2048,
73
73
  temperature: float = 0.7,
74
- seed: Optional[int] = None,
74
+ seed: Optional[int] = -1,
75
75
  debug: bool = False,
76
76
  **kwargs):
77
77
  self.model = model
78
78
  self.config = kwargs
79
+ self._capability_registry = None
79
80
 
80
81
  # Unified token parameters
81
82
  self.max_tokens = max_tokens
@@ -84,13 +85,153 @@ class AbstractCoreInterface(ABC):
84
85
 
85
86
  # Unified generation parameters
86
87
  self.temperature = temperature
87
- self.seed = seed
88
+ # Default seed policy: -1 means "random" (do not forward a provider seed).
89
+ # Accept None for backward compatibility and normalize it to -1.
90
+ self.seed = -1 if seed is None else seed
88
91
 
89
92
  self.debug = debug
90
93
 
91
94
  # Validate token parameters
92
95
  self._validate_token_parameters()
93
96
 
97
+ @property
98
+ def capabilities(self):
99
+ """Capability registry (voice/audio/vision plugins).
100
+
101
+ Lazily created to avoid importing plugin machinery during `import abstractcore`.
102
+ """
103
+ if self._capability_registry is None:
104
+ explicit_prefs = None
105
+ try:
106
+ explicit_prefs = self.config.get("capabilities_preferred_backends")
107
+ except Exception:
108
+ explicit_prefs = None
109
+
110
+ merged_prefs: Dict[str, str] = {}
111
+
112
+ # Global config defaults (config-manager) — lowest precedence.
113
+ try:
114
+ from ..config.manager import get_config_manager
115
+
116
+ stt_backend_id = getattr(getattr(get_config_manager().config, "audio", None), "stt_backend_id", None)
117
+ except Exception:
118
+ stt_backend_id = None
119
+ if isinstance(stt_backend_id, str) and stt_backend_id.strip():
120
+ merged_prefs["audio"] = stt_backend_id.strip()
121
+
122
+ # Per-instance explicit preferences — highest precedence.
123
+ if isinstance(explicit_prefs, dict):
124
+ for k, v in explicit_prefs.items():
125
+ ks = str(k or "").strip()
126
+ vs = str(v or "").strip()
127
+ if ks and vs:
128
+ merged_prefs[ks] = vs
129
+
130
+ from ..capabilities.registry import CapabilityRegistry
131
+
132
+ self._capability_registry = CapabilityRegistry(self, preferred_backends=merged_prefs or None)
133
+ return self._capability_registry
134
+
135
+ @property
136
+ def voice(self):
137
+ return self.capabilities.voice
138
+
139
+ @property
140
+ def audio(self):
141
+ return self.capabilities.audio
142
+
143
+ @property
144
+ def vision(self):
145
+ return self.capabilities.vision
146
+
147
+ def generate_with_outputs(
148
+ self,
149
+ prompt: str,
150
+ *,
151
+ outputs: Optional[Dict[str, Any]] = None,
152
+ artifact_store: Optional[Any] = None,
153
+ **kwargs,
154
+ ):
155
+ """Convenience wrapper: run `generate()` then optional deterministic capability calls.
156
+
157
+ This intentionally does *not* change `generate()` semantics.
158
+
159
+ `outputs` is a small, explicit contract (v0):
160
+ - {"tts": {...}}: calls `core.voice.tts(...)` after text generation.
161
+ - default text source is the LLM response content.
162
+ - {"t2i": {...}}: calls `core.vision.t2i(...)` after text generation.
163
+ - default prompt source is the LLM response content.
164
+
165
+ If `artifact_store` is provided, it is passed through to capability calls.
166
+ """
167
+ from ..capabilities.types import GenerateWithOutputsResult
168
+
169
+ stream = bool(kwargs.get("stream", False))
170
+ if stream:
171
+ raise ValueError("generate_with_outputs does not support stream=True (v0)")
172
+
173
+ resp = self.generate(prompt, **kwargs)
174
+ out: Dict[str, Any] = {}
175
+
176
+ cfg = outputs if isinstance(outputs, dict) else {}
177
+ if cfg.get("tts"):
178
+ tts_cfg = cfg.get("tts")
179
+ tts_cfg = tts_cfg if isinstance(tts_cfg, dict) else {}
180
+ text = tts_cfg.get("text")
181
+ if text is None:
182
+ text = getattr(resp, "content", "") or ""
183
+ out["tts"] = self.voice.tts(
184
+ str(text),
185
+ voice=tts_cfg.get("voice"),
186
+ format=str(tts_cfg.get("format") or "wav"),
187
+ artifact_store=artifact_store,
188
+ run_id=tts_cfg.get("run_id"),
189
+ tags=tts_cfg.get("tags"),
190
+ metadata=tts_cfg.get("metadata"),
191
+ )
192
+
193
+ if cfg.get("t2i"):
194
+ t2i_cfg = cfg.get("t2i")
195
+ t2i_cfg = t2i_cfg if isinstance(t2i_cfg, dict) else {}
196
+ img_prompt = t2i_cfg.get("prompt")
197
+ if img_prompt is None:
198
+ img_prompt = getattr(resp, "content", "") or ""
199
+ out["t2i"] = self.vision.t2i(
200
+ str(img_prompt),
201
+ artifact_store=artifact_store,
202
+ run_id=t2i_cfg.get("run_id"),
203
+ tags=t2i_cfg.get("tags"),
204
+ metadata=t2i_cfg.get("metadata"),
205
+ **{k: v for k, v in t2i_cfg.items() if k not in {"prompt", "run_id", "tags", "metadata"}},
206
+ )
207
+
208
+ return GenerateWithOutputsResult(response=resp, outputs=out)
209
+
210
+ # Unified generation parameter accessors (provider-agnostic)
211
+ def get_temperature(self) -> float:
212
+ return float(self.temperature)
213
+
214
+ def set_temperature(self, temperature: float) -> None:
215
+ self.temperature = float(temperature)
216
+
217
+ def get_seed(self) -> int:
218
+ try:
219
+ return int(self.seed) # type: ignore[arg-type]
220
+ except Exception:
221
+ return -1
222
+
223
+ def set_seed(self, seed: Optional[int]) -> None:
224
+ # Normalize: None or negative -> -1 (random)
225
+ if seed is None:
226
+ self.seed = -1
227
+ return
228
+ try:
229
+ seed_i = int(seed)
230
+ except Exception:
231
+ self.seed = -1
232
+ return
233
+ self.seed = seed_i if seed_i >= 0 else -1
234
+
94
235
  @abstractmethod
95
236
  def generate(self,
96
237
  prompt: str,
@@ -99,6 +240,7 @@ class AbstractCoreInterface(ABC):
99
240
  tools: Optional[List[Dict[str, Any]]] = None,
100
241
  media: Optional[List[Union[str, Dict[str, Any], 'MediaContent']]] = None,
101
242
  stream: bool = False,
243
+ thinking: Optional[Union[bool, str]] = None,
102
244
  **kwargs) -> Union[GenerateResponse, Iterator[GenerateResponse]]:
103
245
  """
104
246
  Generate response from the LLM.
@@ -110,6 +252,7 @@ class AbstractCoreInterface(ABC):
110
252
  tools: Optional list of available tools
111
253
  media: Optional list of media files (file paths, MediaContent objects, or dicts)
112
254
  stream: Whether to stream the response
255
+ thinking: Unified thinking/reasoning control (best-effort across providers/models)
113
256
  **kwargs: Additional provider-specific parameters
114
257
 
115
258
  Returns:
@@ -122,6 +265,11 @@ class AbstractCoreInterface(ABC):
122
265
  """Get list of capabilities supported by this provider"""
123
266
  pass
124
267
 
268
+ @abstractmethod
269
+ def unload_model(self, model_name: str) -> None:
270
+ """Unload/cleanup resources for a specific model (best-effort)."""
271
+ pass
272
+
125
273
  def validate_config(self) -> bool:
126
274
  """Validate provider configuration"""
127
275
  return True
@@ -354,4 +502,4 @@ class AbstractCoreInterface(ABC):
354
502
  for warning in warnings:
355
503
  lines.append(f" • {warning}")
356
504
 
357
- return "\n".join(lines)
505
+ return "\n".join(lines)
@@ -34,8 +34,8 @@ class BasicSession:
34
34
  recovery_timeout: Optional[float] = None,
35
35
  auto_compact: bool = False,
36
36
  auto_compact_threshold: int = 6000,
37
- temperature: Optional[float] = None,
38
- seed: Optional[int] = None,
37
+ temperature: Optional[float] = 0.7,
38
+ seed: Optional[int] = -1,
39
39
  enable_tracing: bool = False):
40
40
  """Initialize basic session
41
41
 
@@ -48,8 +48,8 @@ class BasicSession:
48
48
  recovery_timeout: Circuit breaker recovery timeout
49
49
  auto_compact: Enable automatic conversation compaction
50
50
  auto_compact_threshold: Token threshold for auto-compaction
51
- temperature: Default temperature for generation (0.0-1.0)
52
- seed: Default seed for deterministic generation
51
+ temperature: Default temperature for generation (provider-agnostic)
52
+ seed: Default seed for deterministic generation (-1 means random/unset)
53
53
  enable_tracing: Enable interaction tracing for observability
54
54
  """
55
55
 
@@ -63,9 +63,15 @@ class BasicSession:
63
63
  self.auto_compact_threshold = auto_compact_threshold
64
64
  self._original_session = None # Track if this is a compacted session
65
65
 
66
- # Store session-level generation parameters
67
- self.temperature = temperature
68
- self.seed = seed
66
+ # Store session-level generation parameters.
67
+ self.temperature = None if temperature is None else float(temperature)
68
+ # Normalize: None or negative -> -1 (random/unset)
69
+ try:
70
+ self.seed = -1 if seed is None else int(seed)
71
+ except Exception:
72
+ self.seed = -1
73
+ if isinstance(self.seed, int) and self.seed < 0:
74
+ self.seed = -1
69
75
 
70
76
  # Setup interaction tracing
71
77
  self.enable_tracing = enable_tracing
@@ -218,7 +224,7 @@ class BasicSession:
218
224
  # Add session-level parameters if not overridden in kwargs
219
225
  if 'temperature' not in kwargs and self.temperature is not None:
220
226
  kwargs['temperature'] = self.temperature
221
- if 'seed' not in kwargs and self.seed is not None:
227
+ if 'seed' not in kwargs and isinstance(self.seed, int) and self.seed >= 0:
222
228
  kwargs['seed'] = self.seed
223
229
 
224
230
  # Add trace metadata if tracing is enabled
@@ -329,7 +335,7 @@ class BasicSession:
329
335
  # Add session-level parameters if not overridden in kwargs
330
336
  if 'temperature' not in kwargs and self.temperature is not None:
331
337
  kwargs['temperature'] = self.temperature
332
- if 'seed' not in kwargs and self.seed is not None:
338
+ if 'seed' not in kwargs and isinstance(self.seed, int) and self.seed >= 0:
333
339
  kwargs['seed'] = self.seed
334
340
 
335
341
  # Add trace metadata if tracing is enabled
@@ -1143,4 +1149,4 @@ class BasicSession:
1143
1149
  >>> print(f"First trace: {traces[0]['trace_id']}")
1144
1150
  >>> print(f"Tokens used: {traces[0]['response']['usage']}")
1145
1151
  """
1146
- return self.interaction_traces.copy()
1152
+ return self.interaction_traces.copy()
abstractcore/download.py CHANGED
@@ -212,7 +212,7 @@ async def _download_huggingface(
212
212
  status=DownloadStatus.ERROR,
213
213
  message=(
214
214
  "huggingface_hub is not installed. "
215
- "Install with: pip install abstractcore[huggingface]"
215
+ "Install with: pip install \"abstractcore[huggingface]\""
216
216
  ),
217
217
  )
218
218
  return
@@ -8,6 +8,7 @@ Production-ready embedding generation with SOTA models and efficient serving.
8
8
  import hashlib
9
9
  import pickle
10
10
  import atexit
11
+ import os
11
12
  import sys
12
13
  import builtins
13
14
  import warnings
@@ -124,7 +125,8 @@ class EmbeddingManager:
124
125
  cache_dir: Optional[Path] = None,
125
126
  cache_size: int = 1000,
126
127
  output_dims: Optional[int] = None,
127
- trust_remote_code: bool = False
128
+ trust_remote_code: bool = False,
129
+ strict: Optional[bool] = None,
128
130
  ):
129
131
  """Initialize the embedding manager.
130
132
 
@@ -138,7 +140,13 @@ class EmbeddingManager:
138
140
  cache_size: Maximum number of embeddings to cache in memory
139
141
  output_dims: Output dimensions for Matryoshka truncation (if supported by provider)
140
142
  trust_remote_code: Whether to trust remote code (HuggingFace only)
143
+ strict: If true, raise on provider/model failures instead of returning zero vectors.
141
144
  """
145
+ if strict is None:
146
+ strict_raw = str(os.environ.get("ABSTRACTCORE_EMBEDDINGS_STRICT", "") or "").strip().lower()
147
+ strict = strict_raw in {"1", "true", "yes", "on"}
148
+ self.strict = bool(strict)
149
+
142
150
  # Load configuration defaults, but ONLY if parameters weren't explicitly provided
143
151
  self._load_config_defaults(model, provider)
144
152
 
@@ -277,7 +285,10 @@ class EmbeddingManager:
277
285
  """Load the HuggingFace embedding model with optimal backend and reduced warnings."""
278
286
  try:
279
287
  if sentence_transformers is None:
280
- raise ImportError("sentence-transformers is required but not installed")
288
+ raise ImportError(
289
+ "sentence-transformers is required but not installed. "
290
+ "Install with: pip install \"abstractcore[embeddings]\""
291
+ )
281
292
 
282
293
  # Set HuggingFace cache directory (sentence-transformers uses this automatically)
283
294
  import os
@@ -333,7 +344,8 @@ class EmbeddingManager:
333
344
  except ImportError:
334
345
  raise ImportError(
335
346
  "sentence-transformers is required for embedding functionality. "
336
- "Install with: pip install sentence-transformers"
347
+ "Install with: pip install \"abstractcore[embeddings]\" (recommended) "
348
+ "or: pip install sentence-transformers"
337
349
  )
338
350
  except Exception as e:
339
351
  logger.error(f"Failed to load embedding model {self.model_id}: {e}")
@@ -676,7 +688,8 @@ class EmbeddingManager:
676
688
 
677
689
  except Exception as e:
678
690
  logger.error(f"Failed to embed text with {self.provider}: {e}")
679
- # Return zero vector as fallback
691
+ if self.strict:
692
+ raise
680
693
  dim = self.output_dims or self.get_dimension()
681
694
  return [0.0] * dim
682
695
 
@@ -759,7 +772,8 @@ class EmbeddingManager:
759
772
 
760
773
  except Exception as e:
761
774
  logger.error(f"Failed to embed batch with {self.provider}: {e}")
762
- # Fill with zero vectors as fallback
775
+ if self.strict:
776
+ raise
763
777
  dim = self.output_dims or self.get_dimension()
764
778
  zero_embedding = [0.0] * dim
765
779
  for idx in uncached_indices:
@@ -1262,4 +1276,4 @@ class EmbeddingManager:
1262
1276
  def save_caches(self):
1263
1277
  """Explicitly save both caches to disk."""
1264
1278
  self._save_persistent_cache()
1265
- self._save_normalized_cache()
1279
+ self._save_normalized_cache()
@@ -0,0 +1,2 @@
1
+ """AbstractCore single-model OpenAI-compatible endpoint server."""
2
+