abstractcore 2.9.1__py3-none-any.whl → 2.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. abstractcore/__init__.py +7 -27
  2. abstractcore/apps/extractor.py +33 -100
  3. abstractcore/apps/intent.py +19 -0
  4. abstractcore/apps/judge.py +20 -1
  5. abstractcore/apps/summarizer.py +20 -1
  6. abstractcore/architectures/detection.py +34 -1
  7. abstractcore/architectures/response_postprocessing.py +313 -0
  8. abstractcore/assets/architecture_formats.json +38 -8
  9. abstractcore/assets/model_capabilities.json +781 -160
  10. abstractcore/compression/__init__.py +1 -2
  11. abstractcore/compression/glyph_processor.py +6 -4
  12. abstractcore/config/main.py +31 -19
  13. abstractcore/config/manager.py +389 -11
  14. abstractcore/config/vision_config.py +5 -5
  15. abstractcore/core/interface.py +151 -3
  16. abstractcore/core/session.py +16 -10
  17. abstractcore/download.py +1 -1
  18. abstractcore/embeddings/manager.py +20 -6
  19. abstractcore/endpoint/__init__.py +2 -0
  20. abstractcore/endpoint/app.py +458 -0
  21. abstractcore/mcp/client.py +3 -1
  22. abstractcore/media/__init__.py +52 -17
  23. abstractcore/media/auto_handler.py +42 -22
  24. abstractcore/media/base.py +44 -1
  25. abstractcore/media/capabilities.py +12 -33
  26. abstractcore/media/enrichment.py +105 -0
  27. abstractcore/media/handlers/anthropic_handler.py +19 -28
  28. abstractcore/media/handlers/local_handler.py +124 -70
  29. abstractcore/media/handlers/openai_handler.py +19 -31
  30. abstractcore/media/processors/__init__.py +4 -2
  31. abstractcore/media/processors/audio_processor.py +57 -0
  32. abstractcore/media/processors/office_processor.py +8 -3
  33. abstractcore/media/processors/pdf_processor.py +46 -3
  34. abstractcore/media/processors/text_processor.py +22 -24
  35. abstractcore/media/processors/video_processor.py +58 -0
  36. abstractcore/media/types.py +97 -4
  37. abstractcore/media/utils/image_scaler.py +20 -2
  38. abstractcore/media/utils/video_frames.py +219 -0
  39. abstractcore/media/vision_fallback.py +136 -22
  40. abstractcore/processing/__init__.py +32 -3
  41. abstractcore/processing/basic_deepsearch.py +15 -10
  42. abstractcore/processing/basic_intent.py +3 -2
  43. abstractcore/processing/basic_judge.py +3 -2
  44. abstractcore/processing/basic_summarizer.py +1 -1
  45. abstractcore/providers/__init__.py +3 -1
  46. abstractcore/providers/anthropic_provider.py +95 -8
  47. abstractcore/providers/base.py +1516 -81
  48. abstractcore/providers/huggingface_provider.py +546 -69
  49. abstractcore/providers/lmstudio_provider.py +35 -923
  50. abstractcore/providers/mlx_provider.py +382 -35
  51. abstractcore/providers/model_capabilities.py +5 -1
  52. abstractcore/providers/ollama_provider.py +99 -15
  53. abstractcore/providers/openai_compatible_provider.py +406 -180
  54. abstractcore/providers/openai_provider.py +188 -44
  55. abstractcore/providers/openrouter_provider.py +76 -0
  56. abstractcore/providers/registry.py +61 -5
  57. abstractcore/providers/streaming.py +138 -33
  58. abstractcore/providers/vllm_provider.py +92 -817
  59. abstractcore/server/app.py +461 -13
  60. abstractcore/server/audio_endpoints.py +139 -0
  61. abstractcore/server/vision_endpoints.py +1319 -0
  62. abstractcore/structured/handler.py +316 -41
  63. abstractcore/tools/common_tools.py +5501 -2012
  64. abstractcore/tools/comms_tools.py +1641 -0
  65. abstractcore/tools/core.py +37 -7
  66. abstractcore/tools/handler.py +4 -9
  67. abstractcore/tools/parser.py +49 -2
  68. abstractcore/tools/tag_rewriter.py +2 -1
  69. abstractcore/tools/telegram_tdlib.py +407 -0
  70. abstractcore/tools/telegram_tools.py +261 -0
  71. abstractcore/utils/cli.py +1085 -72
  72. abstractcore/utils/token_utils.py +2 -0
  73. abstractcore/utils/truncation.py +29 -0
  74. abstractcore/utils/version.py +3 -4
  75. abstractcore/utils/vlm_token_calculator.py +12 -2
  76. abstractcore-2.11.2.dist-info/METADATA +562 -0
  77. abstractcore-2.11.2.dist-info/RECORD +133 -0
  78. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/WHEEL +1 -1
  79. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/entry_points.txt +1 -0
  80. abstractcore-2.9.1.dist-info/METADATA +0 -1190
  81. abstractcore-2.9.1.dist-info/RECORD +0 -119
  82. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/licenses/LICENSE +0 -0
  83. {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/top_level.txt +0 -0
@@ -6,7 +6,9 @@ Uses unified AbstractCore configuration system.
6
6
  """
7
7
 
8
8
  from pathlib import Path
9
- from typing import Optional, Dict, Any
9
+ from typing import Optional, Dict, Any, Tuple
10
+
11
+ from ..utils.jsonish import loads_dict_like
10
12
  from ..utils.structured_logging import get_logger
11
13
 
12
14
  logger = get_logger(__name__)
@@ -69,6 +71,16 @@ class VisionFallbackHandler:
69
71
  Raises:
70
72
  VisionNotConfiguredError: When vision fallback is not configured
71
73
  """
74
+ description, _trace = self.create_description_with_trace(image_path, user_prompt=user_prompt)
75
+ return description
76
+
77
+ def create_description_with_trace(self, image_path: str, user_prompt: str = None) -> Tuple[str, Dict[str, Any]]:
78
+ """
79
+ Generate a description along with a small transparency trace.
80
+
81
+ The trace is intentionally bounded and JSON-serializable so it can be
82
+ surfaced in response metadata without relying on logs.
83
+ """
72
84
  if self.vision_config.strategy == "disabled":
73
85
  raise VisionNotConfiguredError("Vision fallback is disabled")
74
86
 
@@ -76,7 +88,14 @@ class VisionFallbackHandler:
76
88
  raise VisionNotConfiguredError("No vision capability configured")
77
89
 
78
90
  try:
79
- return self._generate_with_fallback(image_path)
91
+ description, trace = self._generate_with_fallback_with_trace(image_path, user_prompt=user_prompt)
92
+ # Always include strategy in trace for host UX.
93
+ if isinstance(trace, dict):
94
+ trace = dict(trace)
95
+ else:
96
+ trace = {}
97
+ trace.setdefault("strategy", getattr(self.vision_config, "strategy", None))
98
+ return description, trace
80
99
  except Exception as e:
81
100
  logger.debug(f"Vision fallback failed: {e}")
82
101
  raise VisionNotConfiguredError(f"Vision fallback generation failed: {e}")
@@ -92,32 +111,61 @@ class VisionFallbackHandler:
92
111
 
93
112
  def _has_local_models(self) -> bool:
94
113
  """Check if any local vision models are available."""
95
- models_dir = Path(self.vision_config.local_models_path).expanduser()
114
+ local_models_path = getattr(self.vision_config, "local_models_path", None)
115
+ if not isinstance(local_models_path, str) or not local_models_path.strip():
116
+ return False
117
+
118
+ models_dir = Path(local_models_path).expanduser()
96
119
  return models_dir.exists() and any(models_dir.iterdir())
97
120
 
98
- def _generate_with_fallback(self, image_path: str) -> str:
99
- """Try vision models in fallback chain order."""
121
+ def _generate_with_fallback(self, image_path: str, user_prompt: Optional[str] = None) -> str:
122
+ """Backward-compatible: return only the description."""
123
+ description, _trace = self._generate_with_fallback_with_trace(image_path, user_prompt=user_prompt)
124
+ return description
125
+
126
+ def _generate_with_fallback_with_trace(
127
+ self, image_path: str, user_prompt: Optional[str] = None
128
+ ) -> Tuple[str, Dict[str, Any]]:
129
+ """Try vision models in fallback chain order and return a small trace."""
100
130
  # Try primary provider first
101
131
  if self.vision_config.caption_provider and self.vision_config.caption_model:
102
132
  try:
103
133
  description = self._generate_description(
104
134
  self.vision_config.caption_provider,
105
135
  self.vision_config.caption_model,
106
- image_path
136
+ image_path,
137
+ user_prompt=user_prompt,
107
138
  )
108
- return description
139
+ return description, {
140
+ "backend": {
141
+ "kind": "llm",
142
+ "provider": str(self.vision_config.caption_provider),
143
+ "model": str(self.vision_config.caption_model),
144
+ "source": "primary",
145
+ }
146
+ }
109
147
  except Exception as e:
110
148
  logger.debug(f"Primary vision provider failed: {e}")
111
149
 
112
150
  # Try fallback chain
113
- for provider_config in self.vision_config.fallback_chain:
151
+ for idx, provider_config in enumerate(self.vision_config.fallback_chain):
114
152
  try:
153
+ provider = provider_config.get("provider")
154
+ model = provider_config.get("model")
115
155
  description = self._generate_description(
116
- provider_config["provider"],
117
- provider_config["model"],
118
- image_path
156
+ provider,
157
+ model,
158
+ image_path,
159
+ user_prompt=user_prompt,
119
160
  )
120
- return description
161
+ return description, {
162
+ "backend": {
163
+ "kind": "llm",
164
+ "provider": str(provider),
165
+ "model": str(model),
166
+ "source": f"fallback_chain[{idx}]",
167
+ }
168
+ }
121
169
  except Exception as e:
122
170
  logger.debug(f"Vision provider {provider_config} failed: {e}")
123
171
  continue
@@ -125,37 +173,97 @@ class VisionFallbackHandler:
125
173
  # Try local models
126
174
  if self._has_local_models():
127
175
  try:
128
- description = self._generate_local_description(image_path)
129
- return description
176
+ description, local_trace = self._generate_local_description_with_trace(image_path)
177
+ return description, local_trace
130
178
  except Exception as e:
131
179
  logger.debug(f"Local vision model failed: {e}")
132
180
 
133
181
  raise Exception("All vision fallback providers failed")
134
182
 
135
- def _generate_description(self, provider: str, model: str, image_path: str) -> str:
183
+ def _generate_description(self, provider: str, model: str, image_path: str, user_prompt: Optional[str] = None) -> str:
136
184
  """Generate description using specified provider and model."""
137
185
  try:
138
186
  # Import here to avoid circular imports
139
187
  from abstractcore import create_llm
140
188
 
141
189
  vision_llm = create_llm(provider, model=model)
190
+ prompt = self._build_caption_prompt(user_prompt=user_prompt)
142
191
  response = vision_llm.generate(
143
- "Provide a detailed description of this image in 3-4 sentences. Be precise about specific landmarks, buildings, objects, and details. If you recognize specific places or things, name them accurately. Describe naturally without phrases like 'this image shows'.",
192
+ prompt,
144
193
  media=[image_path]
145
194
  )
146
- return response.content.strip()
195
+ return self._extract_caption_text(response.content)
147
196
  except Exception as e:
148
197
  logger.debug(f"Failed to generate description with {provider}/{model}: {e}")
149
198
  raise
150
199
 
200
+ def _build_caption_prompt(self, user_prompt: Optional[str] = None) -> str:
201
+ """
202
+ Build the prompt sent to the vision model.
203
+
204
+ Design goals:
205
+ - "Perception only": provide grounded visual observations, not a full answer.
206
+ - Context-aware: bias observations toward what's useful for the user's request.
207
+ - Seamless: avoid meta phrases that make the parent model "comment on a caption".
208
+ """
209
+ cleaned_user_prompt = (user_prompt or "").strip()
210
+
211
+ base = (
212
+ "Provide grounded visual observations that will help answer the user's request.\n"
213
+ "- Write 3–4 natural sentences.\n"
214
+ "- Be precise about objects, people, settings, and notable details.\n"
215
+ "- If there is readable text, include it verbatim in quotes.\n"
216
+ "- If you recognize specific places/people/brands with high confidence, name them; otherwise say \"unclear\".\n"
217
+ "- Avoid meta phrasing like \"this image shows\", \"the image depicts\", \"image analysis\", or apologies.\n"
218
+ "- Return only the description text.\n"
219
+ )
220
+
221
+ if not cleaned_user_prompt:
222
+ return base
223
+
224
+ return f"{base}\nUser request (for context): {cleaned_user_prompt}"
225
+
226
+ def _extract_caption_text(self, raw: Any) -> str:
227
+ """
228
+ Extract a plain caption string from the vision model output.
229
+
230
+ Some vision models (or wrappers) may return JSON-ish objects; accept both.
231
+ """
232
+ text = str(raw or "").strip()
233
+ if not text:
234
+ return ""
235
+
236
+ parsed = loads_dict_like(text)
237
+ if isinstance(parsed, dict):
238
+ for key in ("description", "caption", "text", "content"):
239
+ val = parsed.get(key)
240
+ if isinstance(val, str) and val.strip():
241
+ return val.strip()
242
+
243
+ return text
244
+
151
245
  def _generate_local_description(self, image_path: str) -> str:
152
- """Generate description using local vision model."""
246
+ """Backward-compatible: return only the local model description."""
247
+ description, _trace = self._generate_local_description_with_trace(image_path)
248
+ return description
249
+
250
+ def _generate_local_description_with_trace(self, image_path: str) -> Tuple[str, Dict[str, Any]]:
251
+ """Generate description using a local vision model and return trace."""
153
252
  try:
154
- models_dir = Path(self.vision_config.local_models_path).expanduser()
253
+ local_models_path = getattr(self.vision_config, "local_models_path", None)
254
+ if not isinstance(local_models_path, str) or not local_models_path.strip():
255
+ raise Exception("No local_models_path configured")
256
+
257
+ models_dir = Path(local_models_path).expanduser()
155
258
 
156
259
  # Look for downloaded vision models
157
260
  for model_dir in models_dir.iterdir():
158
- if model_dir.is_dir() and ("caption" in model_dir.name.lower() or "blip" in model_dir.name.lower() or "vit" in model_dir.name.lower() or "git" in model_dir.name.lower()):
261
+ if model_dir.is_dir() and (
262
+ "caption" in model_dir.name.lower()
263
+ or "blip" in model_dir.name.lower()
264
+ or "vit" in model_dir.name.lower()
265
+ or "git" in model_dir.name.lower()
266
+ ):
159
267
  try:
160
268
  # Check if download is complete
161
269
  if not (model_dir / "download_complete.txt").exists():
@@ -164,7 +272,13 @@ class VisionFallbackHandler:
164
272
 
165
273
  description = self._use_local_model(model_dir, image_path)
166
274
  if description:
167
- return description
275
+ return description, {
276
+ "backend": {
277
+ "kind": "local_model",
278
+ "model": str(model_dir.name),
279
+ "source": "local_models",
280
+ }
281
+ }
168
282
 
169
283
  except Exception as e:
170
284
  logger.debug(f"Local model {model_dir} failed: {e}")
@@ -271,4 +385,4 @@ def has_vision_capability() -> bool:
271
385
  def create_image_description(image_path: str, user_prompt: str = None) -> str:
272
386
  """Create image description for text-only models."""
273
387
  handler = VisionFallbackHandler()
274
- return handler.create_description(image_path, user_prompt)
388
+ return handler.create_description(image_path, user_prompt)
@@ -5,11 +5,40 @@ Basic text processing capabilities built on top of AbstractCore,
5
5
  demonstrating how to leverage the core infrastructure for real-world tasks.
6
6
  """
7
7
 
8
+ from __future__ import annotations
9
+
10
+ # Keep this package import-safe for minimal installs.
11
+ # Some processing apps pull optional deps (e.g. DeepSearch uses built-in web tools).
12
+ from importlib import import_module
13
+ from typing import Any
14
+
8
15
  from .basic_summarizer import BasicSummarizer, SummaryStyle, SummaryLength, CompressionMode
9
16
  from .basic_extractor import BasicExtractor
10
17
  from .basic_judge import BasicJudge, JudgmentCriteria, Assessment, create_judge
11
- from .basic_deepsearch import BasicDeepSearch, ResearchReport, ResearchFinding, ResearchPlan, ResearchSubTask
12
- from .basic_intent import BasicIntentAnalyzer, IntentType, IntentDepth, IntentContext, IdentifiedIntent, IntentAnalysisOutput
18
+ from .basic_intent import (
19
+ BasicIntentAnalyzer,
20
+ IntentType,
21
+ IntentDepth,
22
+ IntentContext,
23
+ IdentifiedIntent,
24
+ IntentAnalysisOutput,
25
+ )
26
+
27
+
28
+ def __getattr__(name: str) -> Any:
29
+ lazy = {
30
+ "BasicDeepSearch",
31
+ "ResearchReport",
32
+ "ResearchFinding",
33
+ "ResearchPlan",
34
+ "ResearchSubTask",
35
+ }
36
+ if name in lazy:
37
+ mod = import_module("abstractcore.processing.basic_deepsearch")
38
+ value = getattr(mod, name)
39
+ globals()[name] = value
40
+ return value
41
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
13
42
 
14
43
  __all__ = [
15
44
  'BasicSummarizer', 'SummaryStyle', 'SummaryLength', 'CompressionMode',
@@ -17,4 +46,4 @@ __all__ = [
17
46
  'BasicJudge', 'JudgmentCriteria', 'Assessment', 'create_judge',
18
47
  'BasicDeepSearch', 'ResearchReport', 'ResearchFinding', 'ResearchPlan', 'ResearchSubTask',
19
48
  'BasicIntentAnalyzer', 'IntentType', 'IntentDepth', 'IntentContext', 'IdentifiedIntent', 'IntentAnalysisOutput'
20
- ]
49
+ ]
@@ -24,6 +24,7 @@ from ..core.interface import AbstractCoreInterface
24
24
  from ..core.factory import create_llm
25
25
  from ..structured.retry import FeedbackRetry
26
26
  from ..utils.structured_logging import get_logger
27
+ from ..utils.truncation import preview_text
27
28
  from ..tools.common_tools import web_search, fetch_url
28
29
 
29
30
  logger = get_logger(__name__)
@@ -172,7 +173,7 @@ class ResearchSubTaskModel(BaseModel):
172
173
  class ResearchThemeModel(BaseModel):
173
174
  """Pydantic model for research theme (for structured output)"""
174
175
  name: str = Field(description="Theme name")
175
- questions: List[str] = Field(description="3 specific research questions for this theme", min_items=3, max_items=3)
176
+ questions: List[str] = Field(description="3 specific research questions for this theme", min_length=3, max_length=3)
176
177
  priority: int = Field(description="Theme priority (1=essential, 2=important, 3=supplementary)", ge=1, le=3)
177
178
 
178
179
  class ResearchPlanModel(BaseModel):
@@ -185,7 +186,7 @@ class ResearchPlanModel(BaseModel):
185
186
 
186
187
  class SearchQueriesModel(BaseModel):
187
188
  """Pydantic model for search queries (for structured output)"""
188
- queries: List[str] = Field(description="List of specific search queries", min_items=1, max_items=5)
189
+ queries: List[str] = Field(description="List of specific search queries", min_length=1, max_length=5)
189
190
 
190
191
 
191
192
  @dataclass
@@ -697,7 +698,7 @@ Avoid generic terms like "qubit" alone (which returns lab instruments) - be spec
697
698
  logger.info(f"🔍 Executing search for: {query}")
698
699
  search_results = web_search(query, num_results=5)
699
700
  logger.debug(f"📄 Search results length: {len(search_results)}")
700
- logger.debug(f"📄 Search results preview: {search_results[:500]}")
701
+ logger.debug(f"📄 Search results preview: {preview_text(search_results, max_chars=500)}")
701
702
 
702
703
  # Parse search results to extract URLs and content
703
704
  urls = self._extract_urls_from_search(search_results)
@@ -739,7 +740,7 @@ Avoid generic terms like "qubit" alone (which returns lab instruments) - be spec
739
740
  synthetic_finding = ResearchFinding(
740
741
  source_url="https://duckduckgo.com/?q=" + query.replace(" ", "+"),
741
742
  title=f"Search results for: {query}",
742
- content=search_results[:500] + "...",
743
+ content=preview_text(search_results, max_chars=500),
743
744
  relevance_score=0.3,
744
745
  timestamp=timestamp,
745
746
  sub_task_id=sub_task_id
@@ -940,7 +941,7 @@ Avoid generic terms like "qubit" alone (which returns lab instruments) - be spec
940
941
  }
941
942
 
942
943
  # Limit content for efficient assessment
943
- assessment_content = content[:1500] + "..." if len(content) > 1500 else content
944
+ assessment_content = preview_text(content, max_chars=1500)
944
945
 
945
946
  assessment_prompt = f"""
946
947
  Assess if this content contains meaningful information related to the research query.
@@ -1386,7 +1387,7 @@ BE GENEROUS with relevance assessment - when in doubt, mark as relevant.
1386
1387
  content_parts.append(f"**Content:** {text_preview}")
1387
1388
  else:
1388
1389
  # Standard mode: use longer preview (up to 1000 chars)
1389
- preview = text_preview[:1000] + "..." if len(text_preview) > 1000 else text_preview
1390
+ preview = preview_text(text_preview, max_chars=1000)
1390
1391
  content_parts.append(f"**Content:** {preview}")
1391
1392
 
1392
1393
  if not content_parts:
@@ -1536,8 +1537,7 @@ If the content is not relevant to the query, respond with "NOT_RELEVANT".
1536
1537
  """Fallback LLM-based extraction for unstructured content"""
1537
1538
 
1538
1539
  # Limit content length for processing
1539
- if len(content) > 8000:
1540
- content = content[:8000] + "..."
1540
+ content = preview_text(content, max_chars=8000)
1541
1541
 
1542
1542
  extraction_prompt = f"""
1543
1543
  Extract the most relevant information from this content for the research query.
@@ -1944,7 +1944,7 @@ Guidelines:
1944
1944
  sources = []
1945
1945
 
1946
1946
  for finding in findings[:10]: # Limit to top 10 findings
1947
- key_findings.append(finding.content[:200] + "..." if len(finding.content) > 200 else finding.content)
1947
+ key_findings.append(preview_text(finding.content, max_chars=200))
1948
1948
  sources.append({
1949
1949
  "title": finding.title,
1950
1950
  "url": finding.source_url,
@@ -1964,6 +1964,11 @@ Guidelines:
1964
1964
 
1965
1965
  def _verify_report(self, report: ResearchReport, findings: List[ResearchFinding]) -> ResearchReport:
1966
1966
  """Stage 5: Verify report accuracy and add fact-checking"""
1967
+
1968
+ sources_preview = "\n".join(
1969
+ f"- {preview_text(f.content, max_chars=200)}"
1970
+ for f in findings[:10]
1971
+ )
1967
1972
 
1968
1973
  verification_prompt = f"""
1969
1974
  Review this research report for accuracy and consistency with the source findings.
@@ -1973,7 +1978,7 @@ EXECUTIVE SUMMARY: {report.executive_summary}
1973
1978
  KEY FINDINGS: {report.key_findings}
1974
1979
 
1975
1980
  SOURCE FINDINGS:
1976
- {[f"- {f.content[:200]}..." for f in findings[:10]]}
1981
+ {sources_preview}
1977
1982
 
1978
1983
  Identify any potential issues:
1979
1984
  1. Claims not supported by the source findings
@@ -13,6 +13,7 @@ from ..core.interface import AbstractCoreInterface
13
13
  from ..core.factory import create_llm
14
14
  from ..structured.retry import FeedbackRetry
15
15
  from ..utils.structured_logging import get_logger
16
+ from ..utils.truncation import preview_text
16
17
 
17
18
  logger = get_logger(__name__)
18
19
 
@@ -275,7 +276,7 @@ class BasicIntentAnalyzer:
275
276
  # Debug information for troubleshooting
276
277
  error_msg = f"Failed to generate structured intent analysis output. Response type: {type(response)}"
277
278
  if hasattr(response, 'content'):
278
- error_msg += f", Content: {response.content[:200]}..."
279
+ error_msg += f", Content: {preview_text(response.content, max_chars=200)}"
279
280
  if hasattr(response, 'structured_output'):
280
281
  error_msg += f", Structured output: {response.structured_output}"
281
282
  raise ValueError(error_msg)
@@ -375,7 +376,7 @@ class BasicIntentAnalyzer:
375
376
  # Debug information for troubleshooting
376
377
  error_msg = f"Failed to generate final structured intent analysis output. Response type: {type(response)}"
377
378
  if hasattr(response, 'content'):
378
- error_msg += f", Content: {response.content[:200]}..."
379
+ error_msg += f", Content: {preview_text(response.content, max_chars=200)}"
379
380
  if hasattr(response, 'structured_output'):
380
381
  error_msg += f", Structured output: {response.structured_output}"
381
382
  raise ValueError(error_msg)
@@ -19,6 +19,7 @@ from ..core.interface import AbstractCoreInterface
19
19
  from ..core.factory import create_llm
20
20
  from ..structured.retry import FeedbackRetry
21
21
  from ..utils.structured_logging import get_logger
22
+ from ..utils.truncation import preview_text
22
23
 
23
24
  logger = get_logger(__name__)
24
25
 
@@ -657,7 +658,7 @@ These criteria form the foundation of this assessment. Each criterion is evaluat
657
658
  # Determine source reference
658
659
  source_ref = f"Content evaluation in context: {context}"
659
660
  if len(content) > 50:
660
- content_preview = content[:50] + "..."
661
+ content_preview = preview_text(content, max_chars=50)
661
662
  source_ref = f"Content: '{content_preview}' (context: {context})"
662
663
  else:
663
664
  source_ref = f"Content: '{content}' (context: {context})"
@@ -774,4 +775,4 @@ def create_judge(
774
775
  llm = create_llm(provider, model=model, temperature=temperature, max_tokens=max_tokens, max_output_tokens=max_output_tokens, timeout=timeout, **kwargs)
775
776
  return BasicJudge(llm=llm, temperature=temperature, max_tokens=max_tokens, max_output_tokens=max_output_tokens, debug=debug, timeout=timeout)
776
777
  else:
777
- return BasicJudge(temperature=temperature, max_tokens=max_tokens, max_output_tokens=max_output_tokens, debug=debug, timeout=timeout)
778
+ return BasicJudge(temperature=temperature, max_tokens=max_tokens, max_output_tokens=max_output_tokens, debug=debug, timeout=timeout)
@@ -166,7 +166,7 @@ class BasicSummarizer:
166
166
  " summarizer = BasicSummarizer(llm)\n"
167
167
  " \n"
168
168
  " # Using Anthropic\n"
169
- " llm = create_llm('anthropic', model='claude-3-5-haiku-latest')\n"
169
+ " llm = create_llm('anthropic', model='claude-haiku-4-5')\n"
170
170
  " summarizer = BasicSummarizer(llm)\n"
171
171
  " \n"
172
172
  " # Using different Ollama model\n"
@@ -9,6 +9,7 @@ from .huggingface_provider import HuggingFaceProvider
9
9
  from .mlx_provider import MLXProvider
10
10
  from .vllm_provider import VLLMProvider
11
11
  from .openai_compatible_provider import OpenAICompatibleProvider
12
+ from .openrouter_provider import OpenRouterProvider
12
13
 
13
14
  # Provider registry for centralized provider discovery and management
14
15
  from .registry import (
@@ -45,6 +46,7 @@ __all__ = [
45
46
  'MLXProvider',
46
47
  'VLLMProvider',
47
48
  'OpenAICompatibleProvider',
49
+ 'OpenRouterProvider',
48
50
 
49
51
  # Provider registry
50
52
  'ProviderRegistry',
@@ -65,4 +67,4 @@ __all__ = [
65
67
  'get_model_output_capabilities',
66
68
  'filter_models_by_capabilities',
67
69
  'get_capability_summary',
68
- ]
70
+ ]
@@ -15,7 +15,6 @@ except ImportError:
15
15
  BaseModel = None
16
16
  from .base import BaseProvider
17
17
  from ..core.types import GenerateResponse
18
- from ..media import MediaHandler
19
18
  from ..exceptions import AuthenticationError, ProviderAPIError, ModelNotFoundError, format_model_error, format_auth_error
20
19
  from ..tools import UniversalToolHandler, execute_tools
21
20
  from ..events import EventType
@@ -142,7 +141,7 @@ class AnthropicProvider(BaseProvider):
142
141
  multimodal_message = media_handler.create_multimodal_message(prompt, media)
143
142
  api_messages.append(multimodal_message)
144
143
  except ImportError:
145
- self.logger.warning("Media processing not available. Install with: pip install abstractcore[media]")
144
+ self.logger.warning("Media processing not available. Install with: pip install \"abstractcore[media]\"")
146
145
  api_messages.append({"role": "user", "content": prompt})
147
146
  except Exception as e:
148
147
  self.logger.warning(f"Failed to process media content: {e}")
@@ -150,6 +149,50 @@ class AnthropicProvider(BaseProvider):
150
149
  else:
151
150
  api_messages.append({"role": "user", "content": prompt})
152
151
 
152
+ # If media is present but no multimodal message was created (common when prompt="" and the
153
+ # caller provided the request in `messages`), attach media to the last plain user message.
154
+ if media:
155
+ try:
156
+ has_image = False
157
+ for m in api_messages:
158
+ if not isinstance(m, dict):
159
+ continue
160
+ if m.get("role") != "user":
161
+ continue
162
+ c = m.get("content")
163
+ if not isinstance(c, list):
164
+ continue
165
+ for b in c:
166
+ if isinstance(b, dict) and b.get("type") == "image":
167
+ has_image = True
168
+ break
169
+ if has_image:
170
+ break
171
+
172
+ if not has_image:
173
+ from ..media.handlers import AnthropicMediaHandler
174
+
175
+ media_handler = AnthropicMediaHandler(self.model_capabilities)
176
+ idx: Optional[int] = None
177
+ for i in range(len(api_messages) - 1, -1, -1):
178
+ m = api_messages[i]
179
+ if not isinstance(m, dict):
180
+ continue
181
+ if m.get("role") != "user":
182
+ continue
183
+ if isinstance(m.get("content"), str):
184
+ idx = i
185
+ break
186
+ if idx is None:
187
+ api_messages.append(media_handler.create_multimodal_message("", media))
188
+ else:
189
+ text0 = str(api_messages[idx].get("content") or "")
190
+ api_messages[idx] = media_handler.create_multimodal_message(text0, media)
191
+ except ImportError:
192
+ self.logger.warning("Media processing not available. Install with: pip install \"abstractcore[media]\"")
193
+ except Exception as e:
194
+ self.logger.warning(f"Failed to process media content: {e}")
195
+
153
196
  # Prepare API call parameters using unified system
154
197
  generation_kwargs = self._prepare_generation_kwargs(**kwargs)
155
198
  max_output_tokens = self._get_provider_max_tokens_param(generation_kwargs)
@@ -158,7 +201,7 @@ class AnthropicProvider(BaseProvider):
158
201
  "model": self.model,
159
202
  "messages": api_messages,
160
203
  "max_tokens": max_output_tokens, # This is max_output_tokens for Anthropic
161
- "temperature": kwargs.get("temperature", self.temperature),
204
+ "temperature": generation_kwargs.get("temperature", self.temperature),
162
205
  "stream": stream
163
206
  }
164
207
 
@@ -175,7 +218,7 @@ class AnthropicProvider(BaseProvider):
175
218
  call_params["top_k"] = kwargs.get("top_k", self.top_k)
176
219
 
177
220
  # Handle seed parameter (Anthropic doesn't support seed natively)
178
- seed_value = kwargs.get("seed", self.seed)
221
+ seed_value = generation_kwargs.get("seed")
179
222
  if seed_value is not None:
180
223
  import warnings
181
224
  warnings.warn(
@@ -330,7 +373,7 @@ class AnthropicProvider(BaseProvider):
330
373
  multimodal_message = media_handler.create_multimodal_message(prompt, media)
331
374
  api_messages.append(multimodal_message)
332
375
  except ImportError:
333
- self.logger.warning("Media processing not available. Install with: pip install abstractcore[media]")
376
+ self.logger.warning("Media processing not available. Install with: pip install \"abstractcore[media]\"")
334
377
  api_messages.append({"role": "user", "content": prompt})
335
378
  except Exception as e:
336
379
  self.logger.warning(f"Failed to process media content: {e}")
@@ -338,6 +381,50 @@ class AnthropicProvider(BaseProvider):
338
381
  else:
339
382
  api_messages.append({"role": "user", "content": prompt})
340
383
 
384
+ # If media is present but no multimodal message was created (common when prompt="" and the
385
+ # caller provided the request in `messages`), attach media to the last plain user message.
386
+ if media:
387
+ try:
388
+ has_image = False
389
+ for m in api_messages:
390
+ if not isinstance(m, dict):
391
+ continue
392
+ if m.get("role") != "user":
393
+ continue
394
+ c = m.get("content")
395
+ if not isinstance(c, list):
396
+ continue
397
+ for b in c:
398
+ if isinstance(b, dict) and b.get("type") == "image":
399
+ has_image = True
400
+ break
401
+ if has_image:
402
+ break
403
+
404
+ if not has_image:
405
+ from ..media.handlers import AnthropicMediaHandler
406
+
407
+ media_handler = AnthropicMediaHandler(self.model_capabilities)
408
+ idx: Optional[int] = None
409
+ for i in range(len(api_messages) - 1, -1, -1):
410
+ m = api_messages[i]
411
+ if not isinstance(m, dict):
412
+ continue
413
+ if m.get("role") != "user":
414
+ continue
415
+ if isinstance(m.get("content"), str):
416
+ idx = i
417
+ break
418
+ if idx is None:
419
+ api_messages.append(media_handler.create_multimodal_message("", media))
420
+ else:
421
+ text0 = str(api_messages[idx].get("content") or "")
422
+ api_messages[idx] = media_handler.create_multimodal_message(text0, media)
423
+ except ImportError:
424
+ self.logger.warning("Media processing not available. Install with: pip install \"abstractcore[media]\"")
425
+ except Exception as e:
426
+ self.logger.warning(f"Failed to process media content: {e}")
427
+
341
428
  # Prepare API call parameters (same logic as sync)
342
429
  generation_kwargs = self._prepare_generation_kwargs(**kwargs)
343
430
  max_output_tokens = self._get_provider_max_tokens_param(generation_kwargs)
@@ -346,7 +433,7 @@ class AnthropicProvider(BaseProvider):
346
433
  "model": self.model,
347
434
  "messages": api_messages,
348
435
  "max_tokens": max_output_tokens,
349
- "temperature": kwargs.get("temperature", self.temperature),
436
+ "temperature": generation_kwargs.get("temperature", self.temperature),
350
437
  "stream": stream
351
438
  }
352
439
 
@@ -363,7 +450,7 @@ class AnthropicProvider(BaseProvider):
363
450
  call_params["top_k"] = kwargs.get("top_k", self.top_k)
364
451
 
365
452
  # Handle seed parameter (Anthropic doesn't support seed natively)
366
- seed_value = kwargs.get("seed", self.seed)
453
+ seed_value = generation_kwargs.get("seed")
367
454
  if seed_value is not None:
368
455
  import warnings
369
456
  warnings.warn(
@@ -455,7 +542,7 @@ class AnthropicProvider(BaseProvider):
455
542
  except Exception as e:
456
543
  raise
457
544
 
458
- def unload(self) -> None:
545
+ def unload_model(self, model_name: str) -> None:
459
546
  """Close async client if it was created."""
460
547
  if self._async_client is not None:
461
548
  import asyncio