abstractcore 2.9.1__py3-none-any.whl → 2.11.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/__init__.py +7 -27
- abstractcore/apps/extractor.py +33 -100
- abstractcore/apps/intent.py +19 -0
- abstractcore/apps/judge.py +20 -1
- abstractcore/apps/summarizer.py +20 -1
- abstractcore/architectures/detection.py +34 -1
- abstractcore/architectures/response_postprocessing.py +313 -0
- abstractcore/assets/architecture_formats.json +38 -8
- abstractcore/assets/model_capabilities.json +781 -160
- abstractcore/compression/__init__.py +1 -2
- abstractcore/compression/glyph_processor.py +6 -4
- abstractcore/config/main.py +31 -19
- abstractcore/config/manager.py +389 -11
- abstractcore/config/vision_config.py +5 -5
- abstractcore/core/interface.py +151 -3
- abstractcore/core/session.py +16 -10
- abstractcore/download.py +1 -1
- abstractcore/embeddings/manager.py +20 -6
- abstractcore/endpoint/__init__.py +2 -0
- abstractcore/endpoint/app.py +458 -0
- abstractcore/mcp/client.py +3 -1
- abstractcore/media/__init__.py +52 -17
- abstractcore/media/auto_handler.py +42 -22
- abstractcore/media/base.py +44 -1
- abstractcore/media/capabilities.py +12 -33
- abstractcore/media/enrichment.py +105 -0
- abstractcore/media/handlers/anthropic_handler.py +19 -28
- abstractcore/media/handlers/local_handler.py +124 -70
- abstractcore/media/handlers/openai_handler.py +19 -31
- abstractcore/media/processors/__init__.py +4 -2
- abstractcore/media/processors/audio_processor.py +57 -0
- abstractcore/media/processors/office_processor.py +8 -3
- abstractcore/media/processors/pdf_processor.py +46 -3
- abstractcore/media/processors/text_processor.py +22 -24
- abstractcore/media/processors/video_processor.py +58 -0
- abstractcore/media/types.py +97 -4
- abstractcore/media/utils/image_scaler.py +20 -2
- abstractcore/media/utils/video_frames.py +219 -0
- abstractcore/media/vision_fallback.py +136 -22
- abstractcore/processing/__init__.py +32 -3
- abstractcore/processing/basic_deepsearch.py +15 -10
- abstractcore/processing/basic_intent.py +3 -2
- abstractcore/processing/basic_judge.py +3 -2
- abstractcore/processing/basic_summarizer.py +1 -1
- abstractcore/providers/__init__.py +3 -1
- abstractcore/providers/anthropic_provider.py +95 -8
- abstractcore/providers/base.py +1516 -81
- abstractcore/providers/huggingface_provider.py +546 -69
- abstractcore/providers/lmstudio_provider.py +35 -923
- abstractcore/providers/mlx_provider.py +382 -35
- abstractcore/providers/model_capabilities.py +5 -1
- abstractcore/providers/ollama_provider.py +99 -15
- abstractcore/providers/openai_compatible_provider.py +406 -180
- abstractcore/providers/openai_provider.py +188 -44
- abstractcore/providers/openrouter_provider.py +76 -0
- abstractcore/providers/registry.py +61 -5
- abstractcore/providers/streaming.py +138 -33
- abstractcore/providers/vllm_provider.py +92 -817
- abstractcore/server/app.py +461 -13
- abstractcore/server/audio_endpoints.py +139 -0
- abstractcore/server/vision_endpoints.py +1319 -0
- abstractcore/structured/handler.py +316 -41
- abstractcore/tools/common_tools.py +5501 -2012
- abstractcore/tools/comms_tools.py +1641 -0
- abstractcore/tools/core.py +37 -7
- abstractcore/tools/handler.py +4 -9
- abstractcore/tools/parser.py +49 -2
- abstractcore/tools/tag_rewriter.py +2 -1
- abstractcore/tools/telegram_tdlib.py +407 -0
- abstractcore/tools/telegram_tools.py +261 -0
- abstractcore/utils/cli.py +1085 -72
- abstractcore/utils/token_utils.py +2 -0
- abstractcore/utils/truncation.py +29 -0
- abstractcore/utils/version.py +3 -4
- abstractcore/utils/vlm_token_calculator.py +12 -2
- abstractcore-2.11.2.dist-info/METADATA +562 -0
- abstractcore-2.11.2.dist-info/RECORD +133 -0
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/WHEEL +1 -1
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/entry_points.txt +1 -0
- abstractcore-2.9.1.dist-info/METADATA +0 -1190
- abstractcore-2.9.1.dist-info/RECORD +0 -119
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/top_level.txt +0 -0
|
@@ -15,6 +15,7 @@ from ..core.types import GenerateResponse
|
|
|
15
15
|
from ..tools.core import ToolCall
|
|
16
16
|
from ..utils.jsonish import loads_dict_like
|
|
17
17
|
from ..utils.structured_logging import get_logger
|
|
18
|
+
from ..utils.truncation import preview_text
|
|
18
19
|
|
|
19
20
|
logger = get_logger(__name__)
|
|
20
21
|
|
|
@@ -120,7 +121,7 @@ class IncrementalToolDetector:
|
|
|
120
121
|
return [self.patterns["qwen"], self.patterns["llama"], self.patterns["xml"]]
|
|
121
122
|
|
|
122
123
|
# XML-wrapped tools.
|
|
123
|
-
if tool_format
|
|
124
|
+
if tool_format in {"xml", "glm_xml"}:
|
|
124
125
|
return [self.patterns["xml"], self.patterns["llama"], self.patterns["qwen"]]
|
|
125
126
|
|
|
126
127
|
# LLaMA-style prompted tools.
|
|
@@ -420,26 +421,54 @@ class IncrementalToolDetector:
|
|
|
420
421
|
if not json_content or not json_content.strip():
|
|
421
422
|
return None
|
|
422
423
|
|
|
424
|
+
cleaned = json_content.strip()
|
|
425
|
+
|
|
426
|
+
# Handle missing braces (best-effort).
|
|
427
|
+
if cleaned.count("{") > cleaned.count("}"):
|
|
428
|
+
missing = cleaned.count("{") - cleaned.count("}")
|
|
429
|
+
cleaned += "}" * missing
|
|
430
|
+
|
|
431
|
+
tool_data: Optional[Dict[str, Any]] = None
|
|
423
432
|
try:
|
|
424
|
-
|
|
433
|
+
tool_data = loads_dict_like(cleaned)
|
|
434
|
+
except Exception as e:
|
|
435
|
+
logger.debug(f"Tool JSON-ish parse error: {e}, content: {repr(json_content)}")
|
|
436
|
+
tool_data = None
|
|
425
437
|
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
missing_braces = cleaned_json.count('{') - cleaned_json.count('}')
|
|
429
|
-
cleaned_json += '}' * missing_braces
|
|
438
|
+
if not isinstance(tool_data, dict):
|
|
439
|
+
return None
|
|
430
440
|
|
|
431
|
-
|
|
441
|
+
name: Any = tool_data.get("name")
|
|
442
|
+
arguments: Any = tool_data.get("arguments")
|
|
443
|
+
call_id: Any = tool_data.get("call_id") or tool_data.get("id")
|
|
444
|
+
|
|
445
|
+
# OpenAI-style wrapper payload: {"id":"...","type":"function","function":{"name":...,"arguments":"{...}"}}
|
|
446
|
+
function = tool_data.get("function") if isinstance(tool_data.get("function"), dict) else None
|
|
447
|
+
if function:
|
|
448
|
+
if not isinstance(name, str) or not name.strip():
|
|
449
|
+
name = function.get("name")
|
|
450
|
+
if arguments is None:
|
|
451
|
+
arguments = function.get("arguments")
|
|
452
|
+
|
|
453
|
+
# Anthropic-ish key used by some tool payloads.
|
|
454
|
+
if arguments is None and "input" in tool_data:
|
|
455
|
+
arguments = tool_data.get("input")
|
|
456
|
+
|
|
457
|
+
# Normalize arguments to a dict.
|
|
458
|
+
if isinstance(arguments, str):
|
|
459
|
+
parsed_args = loads_dict_like(arguments)
|
|
460
|
+
arguments = parsed_args if isinstance(parsed_args, dict) else {}
|
|
461
|
+
if not isinstance(arguments, dict):
|
|
462
|
+
arguments = {}
|
|
463
|
+
|
|
464
|
+
if not isinstance(name, str) or not name.strip():
|
|
465
|
+
return None
|
|
432
466
|
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
arguments=tool_data.get("arguments", {}),
|
|
437
|
-
call_id=tool_data.get("id")
|
|
438
|
-
)
|
|
439
|
-
except json.JSONDecodeError as e:
|
|
440
|
-
logger.debug(f"JSON parse error: {e}, content: {repr(json_content)}")
|
|
467
|
+
call_id_str: Optional[str] = None
|
|
468
|
+
if isinstance(call_id, str) and call_id.strip():
|
|
469
|
+
call_id_str = call_id.strip()
|
|
441
470
|
|
|
442
|
-
return
|
|
471
|
+
return ToolCall(name=name.strip(), arguments=arguments, call_id=call_id_str)
|
|
443
472
|
|
|
444
473
|
def finalize(self) -> List[ToolCall]:
|
|
445
474
|
"""Finalize and return any remaining tool calls."""
|
|
@@ -560,7 +589,47 @@ class UnifiedStreamProcessor:
|
|
|
560
589
|
GenerateResponse: Processed chunks with rewritten tags
|
|
561
590
|
"""
|
|
562
591
|
try:
|
|
592
|
+
def _canonical_tool_call_key(call: Dict[str, Any]) -> Optional[tuple]:
|
|
593
|
+
"""Best-effort key for deduplicating canonical tool-call payloads."""
|
|
594
|
+
name = call.get("name")
|
|
595
|
+
if not isinstance(name, str) or not name.strip():
|
|
596
|
+
return None
|
|
597
|
+
|
|
598
|
+
call_id = call.get("call_id") or call.get("id")
|
|
599
|
+
call_id_norm: Optional[str]
|
|
600
|
+
if isinstance(call_id, str) and call_id.strip():
|
|
601
|
+
call_id_norm = call_id.strip()
|
|
602
|
+
else:
|
|
603
|
+
call_id_norm = None
|
|
604
|
+
|
|
605
|
+
args = call.get("arguments")
|
|
606
|
+
if isinstance(args, dict):
|
|
607
|
+
try:
|
|
608
|
+
args_norm = json.dumps(args, sort_keys=True, separators=(",", ":"))
|
|
609
|
+
except Exception:
|
|
610
|
+
args_norm = str(args)
|
|
611
|
+
else:
|
|
612
|
+
args_norm = str(args)
|
|
613
|
+
|
|
614
|
+
return (name.strip(), args_norm, call_id_norm)
|
|
615
|
+
|
|
563
616
|
for chunk in response_stream:
|
|
617
|
+
# Preserve provider-emitted tool calls (native tools / server-side tool_calls).
|
|
618
|
+
incoming_tool_calls = (
|
|
619
|
+
chunk.tool_calls
|
|
620
|
+
if isinstance(getattr(chunk, "tool_calls", None), list) and chunk.tool_calls
|
|
621
|
+
else None
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
incoming_tool_call_keys = set()
|
|
625
|
+
if incoming_tool_calls:
|
|
626
|
+
for call in incoming_tool_calls:
|
|
627
|
+
if not isinstance(call, dict):
|
|
628
|
+
continue
|
|
629
|
+
key = _canonical_tool_call_key(call)
|
|
630
|
+
if key:
|
|
631
|
+
incoming_tool_call_keys.add(key)
|
|
632
|
+
|
|
564
633
|
if not chunk.content:
|
|
565
634
|
yield chunk
|
|
566
635
|
continue
|
|
@@ -586,7 +655,25 @@ class UnifiedStreamProcessor:
|
|
|
586
655
|
model=chunk.model,
|
|
587
656
|
finish_reason=chunk.finish_reason,
|
|
588
657
|
usage=chunk.usage,
|
|
589
|
-
raw_response=chunk.raw_response
|
|
658
|
+
raw_response=chunk.raw_response,
|
|
659
|
+
metadata=chunk.metadata,
|
|
660
|
+
tool_calls=incoming_tool_calls,
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
# If we emitted content alongside provider-emitted tool calls, do not emit them again.
|
|
664
|
+
incoming_tool_calls = None
|
|
665
|
+
|
|
666
|
+
# If the incoming chunk had tool_calls but we did not emit any content (buffering/tag parsing),
|
|
667
|
+
# still surface the tool_calls to downstream hosts.
|
|
668
|
+
if incoming_tool_calls:
|
|
669
|
+
yield GenerateResponse(
|
|
670
|
+
content="",
|
|
671
|
+
tool_calls=incoming_tool_calls,
|
|
672
|
+
model=chunk.model,
|
|
673
|
+
finish_reason=chunk.finish_reason,
|
|
674
|
+
usage=chunk.usage,
|
|
675
|
+
raw_response=chunk.raw_response,
|
|
676
|
+
metadata=chunk.metadata,
|
|
590
677
|
)
|
|
591
678
|
|
|
592
679
|
# Yield tool calls for server processing
|
|
@@ -601,14 +688,25 @@ class UnifiedStreamProcessor:
|
|
|
601
688
|
for tc in completed_tools
|
|
602
689
|
if getattr(tc, "name", None)
|
|
603
690
|
]
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
691
|
+
if incoming_tool_call_keys:
|
|
692
|
+
tool_payload = [
|
|
693
|
+
call
|
|
694
|
+
for call in tool_payload
|
|
695
|
+
if (
|
|
696
|
+
isinstance(call, dict)
|
|
697
|
+
and _canonical_tool_call_key(call) not in incoming_tool_call_keys
|
|
698
|
+
)
|
|
699
|
+
]
|
|
700
|
+
if tool_payload:
|
|
701
|
+
yield GenerateResponse(
|
|
702
|
+
content="",
|
|
703
|
+
tool_calls=tool_payload,
|
|
704
|
+
model=chunk.model,
|
|
705
|
+
finish_reason=chunk.finish_reason,
|
|
706
|
+
usage=chunk.usage,
|
|
707
|
+
raw_response=chunk.raw_response,
|
|
708
|
+
metadata=chunk.metadata,
|
|
709
|
+
)
|
|
612
710
|
|
|
613
711
|
# Finalize - get any remaining tools and handle remaining content
|
|
614
712
|
final_tools = self.detector.finalize()
|
|
@@ -786,9 +884,11 @@ class UnifiedStreamProcessor:
|
|
|
786
884
|
# Use direct text rewriting since we have complete tool calls
|
|
787
885
|
rewritten = self.tag_rewriter.rewrite_text(content)
|
|
788
886
|
if rewritten != content:
|
|
789
|
-
logger.debug(
|
|
887
|
+
logger.debug(
|
|
888
|
+
f"Tag rewriting successful: {preview_text(content, max_chars=50)} -> {preview_text(rewritten, max_chars=50)}"
|
|
889
|
+
)
|
|
790
890
|
else:
|
|
791
|
-
logger.debug(f"Tag rewriting had no effect on: {content
|
|
891
|
+
logger.debug(f"Tag rewriting had no effect on: {preview_text(content, max_chars=50)}")
|
|
792
892
|
return rewritten
|
|
793
893
|
except Exception as e:
|
|
794
894
|
logger.debug(f"Tag rewriting failed: {e}")
|
|
@@ -831,8 +931,8 @@ class UnifiedStreamProcessor:
|
|
|
831
931
|
# Extract JSON content
|
|
832
932
|
json_content = match.group(1).strip()
|
|
833
933
|
|
|
834
|
-
# Parse the JSON to validate and extract fields
|
|
835
|
-
tool_data =
|
|
934
|
+
# Parse the JSON-ish payload to validate and extract fields.
|
|
935
|
+
tool_data = loads_dict_like(json_content)
|
|
836
936
|
|
|
837
937
|
if not isinstance(tool_data, dict) or "name" not in tool_data:
|
|
838
938
|
logger.warning(f"Invalid tool call JSON: {json_content[:100]}")
|
|
@@ -847,7 +947,15 @@ class UnifiedStreamProcessor:
|
|
|
847
947
|
"type": "function",
|
|
848
948
|
"function": {
|
|
849
949
|
"name": tool_data["name"],
|
|
850
|
-
"arguments": json.dumps(
|
|
950
|
+
"arguments": json.dumps(
|
|
951
|
+
(tool_data.get("arguments") if isinstance(tool_data.get("arguments"), dict) else None)
|
|
952
|
+
or (
|
|
953
|
+
loads_dict_like(tool_data.get("arguments"))
|
|
954
|
+
if isinstance(tool_data.get("arguments"), str)
|
|
955
|
+
else None
|
|
956
|
+
)
|
|
957
|
+
or {}
|
|
958
|
+
),
|
|
851
959
|
}
|
|
852
960
|
}
|
|
853
961
|
|
|
@@ -857,9 +965,6 @@ class UnifiedStreamProcessor:
|
|
|
857
965
|
|
|
858
966
|
logger.debug(f"Converted {format_type} tool call to OpenAI format: {openai_json[:100]}")
|
|
859
967
|
|
|
860
|
-
except json.JSONDecodeError as e:
|
|
861
|
-
logger.warning(f"Failed to parse tool call JSON: {e}")
|
|
862
|
-
continue
|
|
863
968
|
except Exception as e:
|
|
864
969
|
logger.error(f"Error converting tool call to OpenAI format: {e}")
|
|
865
970
|
continue
|