abstractcore 2.3.9__py3-none-any.whl → 2.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {abstractllm → abstractcore}/__init__.py +2 -2
  2. abstractcore/apps/__init__.py +1 -0
  3. {abstractllm → abstractcore}/apps/__main__.py +6 -6
  4. {abstractllm → abstractcore}/apps/extractor.py +16 -16
  5. {abstractllm → abstractcore}/apps/judge.py +18 -18
  6. {abstractllm → abstractcore}/apps/summarizer.py +11 -11
  7. {abstractllm → abstractcore}/core/__init__.py +2 -2
  8. {abstractllm → abstractcore}/core/enums.py +1 -1
  9. {abstractllm → abstractcore}/core/factory.py +8 -8
  10. {abstractllm → abstractcore}/core/interface.py +3 -3
  11. {abstractllm → abstractcore}/core/retry.py +1 -1
  12. {abstractllm → abstractcore}/core/session.py +6 -6
  13. {abstractllm → abstractcore}/core/types.py +1 -1
  14. {abstractllm → abstractcore}/embeddings/__init__.py +1 -1
  15. {abstractllm → abstractcore}/embeddings/manager.py +5 -5
  16. {abstractllm → abstractcore}/events/__init__.py +1 -1
  17. abstractcore/exceptions/__init__.py +125 -0
  18. abstractcore/media/__init__.py +151 -0
  19. {abstractllm → abstractcore}/processing/basic_extractor.py +5 -5
  20. {abstractllm → abstractcore}/processing/basic_judge.py +5 -5
  21. {abstractllm → abstractcore}/processing/basic_summarizer.py +7 -7
  22. {abstractllm → abstractcore}/providers/base.py +4 -4
  23. {abstractllm → abstractcore}/providers/huggingface_provider.py +1 -1
  24. {abstractllm → abstractcore}/providers/streaming.py +8 -3
  25. {abstractllm → abstractcore}/structured/__init__.py +1 -1
  26. {abstractllm → abstractcore}/tools/__init__.py +2 -2
  27. {abstractllm → abstractcore}/tools/common_tools.py +4 -4
  28. {abstractllm → abstractcore}/utils/__init__.py +1 -1
  29. {abstractllm → abstractcore}/utils/cli.py +13 -13
  30. {abstractllm → abstractcore}/utils/structured_logging.py +3 -3
  31. {abstractllm → abstractcore}/utils/token_utils.py +1 -1
  32. {abstractllm → abstractcore}/utils/version.py +1 -1
  33. {abstractcore-2.3.9.dist-info → abstractcore-2.4.1.dist-info}/METADATA +20 -16
  34. abstractcore-2.4.1.dist-info/RECORD +64 -0
  35. abstractcore-2.4.1.dist-info/entry_points.txt +7 -0
  36. abstractcore-2.4.1.dist-info/top_level.txt +1 -0
  37. abstractcore-2.3.9.dist-info/RECORD +0 -62
  38. abstractcore-2.3.9.dist-info/entry_points.txt +0 -7
  39. abstractcore-2.3.9.dist-info/top_level.txt +0 -1
  40. abstractllm/apps/__init__.py +0 -1
  41. {abstractllm → abstractcore}/architectures/__init__.py +0 -0
  42. {abstractllm → abstractcore}/architectures/detection.py +0 -0
  43. {abstractllm → abstractcore}/architectures/enums.py +0 -0
  44. {abstractllm → abstractcore}/assets/architecture_formats.json +0 -0
  45. {abstractllm → abstractcore}/assets/model_capabilities.json +0 -0
  46. {abstractllm → abstractcore}/assets/session_schema.json +0 -0
  47. {abstractllm → abstractcore}/embeddings/models.py +0 -0
  48. {abstractllm → abstractcore}/processing/__init__.py +0 -0
  49. {abstractllm → abstractcore}/providers/__init__.py +0 -0
  50. {abstractllm → abstractcore}/providers/anthropic_provider.py +0 -0
  51. {abstractllm → abstractcore}/providers/lmstudio_provider.py +0 -0
  52. {abstractllm → abstractcore}/providers/mlx_provider.py +0 -0
  53. {abstractllm → abstractcore}/providers/mock_provider.py +0 -0
  54. {abstractllm → abstractcore}/providers/ollama_provider.py +0 -0
  55. {abstractllm → abstractcore}/providers/openai_provider.py +0 -0
  56. {abstractllm → abstractcore}/server/__init__.py +0 -0
  57. {abstractllm → abstractcore}/server/app.py +0 -0
  58. {abstractllm → abstractcore}/structured/handler.py +0 -0
  59. {abstractllm → abstractcore}/structured/retry.py +0 -0
  60. {abstractllm → abstractcore}/tools/core.py +0 -0
  61. {abstractllm → abstractcore}/tools/handler.py +0 -0
  62. {abstractllm → abstractcore}/tools/parser.py +0 -0
  63. {abstractllm → abstractcore}/tools/registry.py +0 -0
  64. {abstractllm → abstractcore}/tools/syntax_rewriter.py +0 -0
  65. {abstractllm → abstractcore}/tools/tag_rewriter.py +0 -0
  66. {abstractllm → abstractcore}/utils/self_fixes.py +0 -0
  67. {abstractcore-2.3.9.dist-info → abstractcore-2.4.1.dist-info}/WHEEL +0 -0
  68. {abstractcore-2.3.9.dist-info → abstractcore-2.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """
3
- AbstractLLM - Unified interface to all LLM providers with essential infrastructure.
3
+ AbstractCore - Unified interface to all LLM providers with essential infrastructure.
4
4
 
5
5
  Key Features:
6
6
  • Multi-provider support (OpenAI, Anthropic, Ollama, HuggingFace, MLX, LMStudio)
@@ -11,7 +11,7 @@ Key Features:
11
11
  • Event system for observability
12
12
 
13
13
  Quick Start:
14
- from abstractllm import create_llm
14
+ from abstractcore import create_llm
15
15
 
16
16
  # Unified token management across all providers
17
17
  llm = create_llm(
@@ -0,0 +1 @@
1
+ # AbstractCore CLI Applications
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Apps - Command-line interface launcher
3
+ AbstractCore Apps - Command-line interface launcher
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps <app_name> [options]
6
+ python -m abstractcore.apps <app_name> [options]
7
7
 
8
8
  Available apps:
9
9
  summarizer - Document summarization tool
@@ -11,10 +11,10 @@ Available apps:
11
11
  judge - Text evaluation and scoring tool
12
12
 
13
13
  Examples:
14
- python -m abstractllm.apps summarizer document.txt
15
- python -m abstractllm.apps extractor report.txt --format json-ld
16
- python -m abstractllm.apps judge essay.txt --criteria clarity,accuracy
17
- python -m abstractllm.apps <app> --help
14
+ python -m abstractcore.apps summarizer document.txt
15
+ python -m abstractcore.apps extractor report.txt --format json-ld
16
+ python -m abstractcore.apps judge essay.txt --criteria clarity,accuracy
17
+ python -m abstractcore.apps <app> --help
18
18
  """
19
19
 
20
20
  import sys
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Entity Extractor CLI Application
3
+ AbstractCore Entity Extractor CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.extractor <file_path> [options]
6
+ python -m abstractcore.apps.extractor <file_path> [options]
7
7
 
8
8
  Options:
9
9
  --focus <focus> Specific focus area for extraction (e.g., "technology", "business", "medical")
@@ -27,12 +27,12 @@ Options:
27
27
  --help Show this help message
28
28
 
29
29
  Examples:
30
- python -m abstractllm.apps.extractor document.pdf
31
- python -m abstractllm.apps.extractor report.txt --focus technology --style structured --verbose
32
- python -m abstractllm.apps.extractor data.md --entity-types person,organization --output kg.jsonld
33
- python -m abstractllm.apps.extractor large.txt --fast --minified --verbose # Fast, compact output
34
- python -m abstractllm.apps.extractor report.txt --length detailed --provider openai --model gpt-4o-mini
35
- python -m abstractllm.apps.extractor doc.txt --iterate 3 --verbose # 3 refinement passes for higher quality
30
+ python -m abstractcore.apps.extractor document.pdf
31
+ python -m abstractcore.apps.extractor report.txt --focus technology --style structured --verbose
32
+ python -m abstractcore.apps.extractor data.md --entity-types person,organization --output kg.jsonld
33
+ python -m abstractcore.apps.extractor large.txt --fast --minified --verbose # Fast, compact output
34
+ python -m abstractcore.apps.extractor report.txt --length detailed --provider openai --model gpt-4o-mini
35
+ python -m abstractcore.apps.extractor doc.txt --iterate 3 --verbose # 3 refinement passes for higher quality
36
36
  """
37
37
 
38
38
  import argparse
@@ -155,17 +155,17 @@ def parse_extraction_length(length_str: Optional[str]) -> str:
155
155
  def main():
156
156
  """Main CLI function"""
157
157
  parser = argparse.ArgumentParser(
158
- description="AbstractLLM Entity & Relationship Extractor - Default: qwen3:4b-instruct-2507-q4_K_M (requires Ollama)",
158
+ description="AbstractCore Entity & Relationship Extractor - Default: qwen3:4b-instruct-2507-q4_K_M (requires Ollama)",
159
159
  formatter_class=argparse.RawDescriptionHelpFormatter,
160
160
  epilog="""
161
161
  Examples:
162
- python -m abstractllm.apps.extractor document.pdf
163
- python -m abstractllm.apps.extractor report.txt --focus=technology --style=structured --verbose
164
- python -m abstractllm.apps.extractor data.md --entity-types=person,organization --output=kg.jsonld
165
- python -m abstractllm.apps.extractor large.txt --length=detailed --fast --minified --verbose
166
- python -m abstractllm.apps.extractor doc.txt --iterate=3 --verbose # Iterative refinement for quality
167
- python -m abstractllm.apps.extractor doc.txt --format=triples --verbose # RDF triples output
168
- python -m abstractllm.apps.extractor doc.txt --format=triples --output=triples.txt # Simple triples
162
+ python -m abstractcore.apps.extractor document.pdf
163
+ python -m abstractcore.apps.extractor report.txt --focus=technology --style=structured --verbose
164
+ python -m abstractcore.apps.extractor data.md --entity-types=person,organization --output=kg.jsonld
165
+ python -m abstractcore.apps.extractor large.txt --length=detailed --fast --minified --verbose
166
+ python -m abstractcore.apps.extractor doc.txt --iterate=3 --verbose # Iterative refinement for quality
167
+ python -m abstractcore.apps.extractor doc.txt --format=triples --verbose # RDF triples output
168
+ python -m abstractcore.apps.extractor doc.txt --format=triples --output=triples.txt # Simple triples
169
169
 
170
170
  Supported file types: .txt, .md, .py, .js, .html, .json, .csv, and most text-based files
171
171
 
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Basic Judge CLI Application
3
+ AbstractCore Basic Judge CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.judge <file_path_or_text> [file2] [file3] ... [options]
6
+ python -m abstractcore.apps.judge <file_path_or_text> [file2] [file3] ... [options]
7
7
 
8
8
  Options:
9
9
  --context <context> Evaluation context description (e.g., "code review", "documentation assessment")
@@ -25,18 +25,18 @@ Options:
25
25
 
26
26
  Examples:
27
27
  # Single file or text
28
- python -m abstractllm.apps.judge "This code is well-structured and solves the problem efficiently."
29
- python -m abstractllm.apps.judge document.py --context "code review" --criteria clarity,soundness,effectiveness
28
+ python -m abstractcore.apps.judge "This code is well-structured and solves the problem efficiently."
29
+ python -m abstractcore.apps.judge document.py --context "code review" --criteria clarity,soundness,effectiveness
30
30
 
31
31
  # Multiple files (evaluated sequentially to avoid context overflow)
32
- python -m abstractllm.apps.judge file1.py file2.py file3.py --context "code review" --output assessments.json
33
- python -m abstractllm.apps.judge *.py --context "Python code review" --format plain
34
- python -m abstractllm.apps.judge docs/*.md --context "documentation review" --criteria clarity,completeness
32
+ python -m abstractcore.apps.judge file1.py file2.py file3.py --context "code review" --output assessments.json
33
+ python -m abstractcore.apps.judge *.py --context "Python code review" --format plain
34
+ python -m abstractcore.apps.judge docs/*.md --context "documentation review" --criteria clarity,completeness
35
35
 
36
36
  # Other options
37
- python -m abstractllm.apps.judge proposal.md --focus "technical accuracy,completeness,examples" --output assessment.json
38
- python -m abstractllm.apps.judge content.txt --reference ideal_solution.txt --format plain --verbose
39
- python -m abstractllm.apps.judge text.md --provider openai --model gpt-4o-mini --temperature 0.05
37
+ python -m abstractcore.apps.judge proposal.md --focus "technical accuracy,completeness,examples" --output assessment.json
38
+ python -m abstractcore.apps.judge content.txt --reference ideal_solution.txt --format plain --verbose
39
+ python -m abstractcore.apps.judge text.md --provider openai --model gpt-4o-mini --temperature 0.05
40
40
  """
41
41
 
42
42
  import argparse
@@ -250,22 +250,22 @@ def format_assessment_plain(assessment: dict) -> str:
250
250
  def main():
251
251
  """Main CLI function"""
252
252
  parser = argparse.ArgumentParser(
253
- description="AbstractLLM Basic Judge - LLM-as-a-judge for objective evaluation (Default: qwen3:4b-instruct-2507-q4_K_M)",
253
+ description="AbstractCore Basic Judge - LLM-as-a-judge for objective evaluation (Default: qwen3:4b-instruct-2507-q4_K_M)",
254
254
  formatter_class=argparse.RawDescriptionHelpFormatter,
255
255
  epilog="""
256
256
  Examples:
257
257
  # Single file or text
258
- python -m abstractllm.apps.judge "This code is well-structured."
259
- python -m abstractllm.apps.judge document.py --context "code review" --criteria clarity,soundness
260
- python -m abstractllm.apps.judge proposal.md --focus "technical accuracy,examples" --output assessment.json
258
+ python -m abstractcore.apps.judge "This code is well-structured."
259
+ python -m abstractcore.apps.judge document.py --context "code review" --criteria clarity,soundness
260
+ python -m abstractcore.apps.judge proposal.md --focus "technical accuracy,examples" --output assessment.json
261
261
 
262
262
  # Multiple files (evaluated sequentially)
263
- python -m abstractllm.apps.judge file1.py file2.py file3.py --context "code review" --format json
264
- python -m abstractllm.apps.judge docs/*.md --context "documentation review" --format plain
263
+ python -m abstractcore.apps.judge file1.py file2.py file3.py --context "code review" --format json
264
+ python -m abstractcore.apps.judge docs/*.md --context "documentation review" --format plain
265
265
 
266
266
  # Other options
267
- python -m abstractllm.apps.judge content.txt --reference ideal.txt --format plain --verbose
268
- python -m abstractllm.apps.judge text.md --provider openai --model gpt-4o-mini
267
+ python -m abstractcore.apps.judge content.txt --reference ideal.txt --format plain --verbose
268
+ python -m abstractcore.apps.judge text.md --provider openai --model gpt-4o-mini
269
269
 
270
270
  Available criteria:
271
271
  clarity, simplicity, actionability, soundness, innovation, effectiveness,
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Summarizer CLI Application
3
+ AbstractCore Summarizer CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.summarizer <file_path> [options]
6
+ python -m abstractcore.apps.summarizer <file_path> [options]
7
7
 
8
8
  Options:
9
9
  --style <style> Summary style (structured, narrative, objective, analytical, executive, conversational)
@@ -19,10 +19,10 @@ Options:
19
19
  --help Show this help message
20
20
 
21
21
  Examples:
22
- python -m abstractllm.apps.summarizer document.pdf
23
- python -m abstractllm.apps.summarizer report.txt --style executive --length brief --verbose
24
- python -m abstractllm.apps.summarizer data.md --focus "technical details" --output summary.txt
25
- python -m abstractllm.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
22
+ python -m abstractcore.apps.summarizer document.pdf
23
+ python -m abstractcore.apps.summarizer report.txt --style executive --length brief --verbose
24
+ python -m abstractcore.apps.summarizer data.md --focus "technical details" --output summary.txt
25
+ python -m abstractcore.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
26
26
  """
27
27
 
28
28
  import argparse
@@ -156,14 +156,14 @@ def format_summary_output(result) -> str:
156
156
  def main():
157
157
  """Main CLI function"""
158
158
  parser = argparse.ArgumentParser(
159
- description="AbstractLLM Document Summarizer - Default: gemma3:1b-it-qat (requires Ollama)",
159
+ description="AbstractCore Document Summarizer - Default: gemma3:1b-it-qat (requires Ollama)",
160
160
  formatter_class=argparse.RawDescriptionHelpFormatter,
161
161
  epilog="""
162
162
  Examples:
163
- python -m abstractllm.apps.summarizer document.pdf
164
- python -m abstractllm.apps.summarizer report.txt --style executive --length brief --verbose
165
- python -m abstractllm.apps.summarizer data.md --focus "technical details" --output summary.txt
166
- python -m abstractllm.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
163
+ python -m abstractcore.apps.summarizer document.pdf
164
+ python -m abstractcore.apps.summarizer report.txt --style executive --length brief --verbose
165
+ python -m abstractcore.apps.summarizer data.md --focus "technical details" --output summary.txt
166
+ python -m abstractcore.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
167
167
 
168
168
  Supported file types: .txt, .md, .py, .js, .html, .json, .csv, and most text-based files
169
169
 
@@ -12,7 +12,7 @@ from .factory import create_llm
12
12
  from .session import BasicSession
13
13
  from .types import GenerateResponse, Message
14
14
  from .enums import ModelParameter, ModelCapability, MessageRole
15
- from .interface import AbstractLLMInterface
15
+ from .interface import AbstractCoreInterface
16
16
 
17
17
  __all__ = [
18
18
  'create_llm',
@@ -22,5 +22,5 @@ __all__ = [
22
22
  'ModelParameter',
23
23
  'ModelCapability',
24
24
  'MessageRole',
25
- 'AbstractLLMInterface'
25
+ 'AbstractCoreInterface'
26
26
  ]
@@ -1,5 +1,5 @@
1
1
  """
2
- Enums for AbstractLLM.
2
+ Enums for AbstractCore.
3
3
  """
4
4
 
5
5
  from enum import Enum
@@ -3,11 +3,11 @@ Factory for creating LLM providers.
3
3
  """
4
4
 
5
5
  from typing import Optional
6
- from .interface import AbstractLLMInterface
6
+ from .interface import AbstractCoreInterface
7
7
  from ..exceptions import ModelNotFoundError, AuthenticationError, ProviderAPIError
8
8
 
9
9
 
10
- def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> AbstractLLMInterface:
10
+ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> AbstractCoreInterface:
11
11
  """
12
12
  Create an LLM provider instance with unified token parameter support.
13
13
 
@@ -16,7 +16,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
16
16
  model: Model name (optional, will use provider default)
17
17
  **kwargs: Additional configuration including token parameters
18
18
 
19
- Token Parameters (AbstractLLM Unified Standard):
19
+ Token Parameters (AbstractCore Unified Standard):
20
20
  max_tokens: Total context window budget (input + output combined)
21
21
  max_output_tokens: Maximum tokens reserved for generation (default: 2048)
22
22
  max_input_tokens: Maximum tokens for input (auto-calculated if not specified)
@@ -75,7 +75,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
75
75
  from ..providers.openai_provider import OpenAIProvider
76
76
  return OpenAIProvider(model=model or "gpt-5-nano-2025-08-07", **kwargs)
77
77
  except ImportError:
78
- raise ImportError("OpenAI dependencies not installed. Install with: pip install abstractllm[openai]")
78
+ raise ImportError("OpenAI dependencies not installed. Install with: pip install abstractcore[openai]")
79
79
  except (ModelNotFoundError, AuthenticationError, ProviderAPIError) as e:
80
80
  # Re-raise provider exceptions cleanly
81
81
  raise e
@@ -85,7 +85,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
85
85
  from ..providers.anthropic_provider import AnthropicProvider
86
86
  return AnthropicProvider(model=model or "claude-3-5-haiku-latest", **kwargs)
87
87
  except ImportError:
88
- raise ImportError("Anthropic dependencies not installed. Install with: pip install abstractllm[anthropic]")
88
+ raise ImportError("Anthropic dependencies not installed. Install with: pip install abstractcore[anthropic]")
89
89
  except (ModelNotFoundError, AuthenticationError, ProviderAPIError) as e:
90
90
  # Re-raise provider exceptions cleanly
91
91
  raise e
@@ -95,21 +95,21 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
95
95
  from ..providers.ollama_provider import OllamaProvider
96
96
  return OllamaProvider(model=model or "qwen3-coder:30b", **kwargs)
97
97
  except ImportError:
98
- raise ImportError("Ollama dependencies not installed. Install with: pip install abstractllm[ollama]")
98
+ raise ImportError("Ollama dependencies not installed. Install with: pip install abstractcore[ollama]")
99
99
 
100
100
  elif provider.lower() == "huggingface":
101
101
  try:
102
102
  from ..providers.huggingface_provider import HuggingFaceProvider
103
103
  return HuggingFaceProvider(model=model or "Qwen/Qwen3-4B/", **kwargs)
104
104
  except ImportError:
105
- raise ImportError("HuggingFace dependencies not installed. Install with: pip install abstractllm[huggingface]")
105
+ raise ImportError("HuggingFace dependencies not installed. Install with: pip install abstractcore[huggingface]")
106
106
 
107
107
  elif provider.lower() == "mlx":
108
108
  try:
109
109
  from ..providers.mlx_provider import MLXProvider
110
110
  return MLXProvider(model=model or "mlx-community/Qwen3-4B", **kwargs)
111
111
  except ImportError:
112
- raise ImportError("MLX dependencies not installed. Install with: pip install abstractllm[mlx]")
112
+ raise ImportError("MLX dependencies not installed. Install with: pip install abstractcore[mlx]")
113
113
 
114
114
  elif provider.lower() == "lmstudio":
115
115
  try:
@@ -7,11 +7,11 @@ from typing import List, Dict, Any, Optional, Union, Iterator
7
7
  from .types import GenerateResponse, Message
8
8
 
9
9
 
10
- class AbstractLLMInterface(ABC):
10
+ class AbstractCoreInterface(ABC):
11
11
  """
12
12
  Abstract base class for all LLM providers.
13
13
 
14
- AbstractLLM Token Parameter Vocabulary (Unified Standard):
14
+ AbstractCore Token Parameter Vocabulary (Unified Standard):
15
15
  =========================================================
16
16
 
17
17
  • max_tokens: Total context window budget (input + output combined) - YOUR BUDGET
@@ -57,7 +57,7 @@ class AbstractLLMInterface(ABC):
57
57
 
58
58
  Provider Abstraction:
59
59
  ===================
60
- AbstractLLM handles provider-specific parameter mapping internally:
60
+ AbstractCore handles provider-specific parameter mapping internally:
61
61
  • OpenAI: max_tokens → max_completion_tokens (o1 models) or max_tokens (others)
62
62
  • Anthropic: max_output_tokens → max_tokens (output-focused API)
63
63
  • Google: max_output_tokens → max_output_tokens (direct mapping)
@@ -1,5 +1,5 @@
1
1
  """
2
- Production-ready retry strategies for AbstractLLM Core.
2
+ Production-ready retry strategies for AbstractCore.
3
3
 
4
4
  Implements SOTA exponential backoff with jitter and circuit breaker patterns
5
5
  based on 2025 best practices from AWS Architecture Blog, Tenacity principles,
@@ -10,7 +10,7 @@ import json
10
10
  import uuid
11
11
  from collections.abc import Generator
12
12
 
13
- from .interface import AbstractLLMInterface
13
+ from .interface import AbstractCoreInterface
14
14
  from .types import GenerateResponse, Message
15
15
  from .enums import MessageRole
16
16
 
@@ -25,7 +25,7 @@ class BasicSession:
25
25
  """
26
26
 
27
27
  def __init__(self,
28
- provider: Optional[AbstractLLMInterface] = None,
28
+ provider: Optional[AbstractCoreInterface] = None,
29
29
  system_prompt: Optional[str] = None,
30
30
  tools: Optional[List[Callable]] = None,
31
31
  timeout: Optional[float] = None,
@@ -255,7 +255,7 @@ class BasicSession:
255
255
  json.dump(data, f, indent=2)
256
256
 
257
257
  @classmethod
258
- def load(cls, filepath: Union[str, Path], provider: Optional[AbstractLLMInterface] = None,
258
+ def load(cls, filepath: Union[str, Path], provider: Optional[AbstractCoreInterface] = None,
259
259
  tools: Optional[List[Callable]] = None) -> 'BasicSession':
260
260
  """
261
261
  Load session from file with complete metadata restoration.
@@ -325,7 +325,7 @@ class BasicSession:
325
325
  }
326
326
 
327
327
  @classmethod
328
- def from_dict(cls, data: Dict[str, Any], provider: Optional[AbstractLLMInterface] = None,
328
+ def from_dict(cls, data: Dict[str, Any], provider: Optional[AbstractCoreInterface] = None,
329
329
  tools: Optional[List[Callable]] = None) -> 'BasicSession':
330
330
  """
331
331
  Create session from dictionary data (supports both new archive format and legacy format).
@@ -429,7 +429,7 @@ class BasicSession:
429
429
  def compact(self,
430
430
  preserve_recent: int = 6,
431
431
  focus: Optional[str] = None,
432
- compact_provider: Optional[AbstractLLMInterface] = None,
432
+ compact_provider: Optional[AbstractCoreInterface] = None,
433
433
  reason: str = "manual") -> 'BasicSession':
434
434
  """
435
435
  Compact chat history using SOTA 2025 best practices for conversation summarization.
@@ -675,7 +675,7 @@ class BasicSession:
675
675
  print(f"✅ Session compacted: {len(compacted.messages)} messages, ~{compacted.get_token_estimate()} tokens")
676
676
 
677
677
  def generate_summary(self, preserve_recent: int = 6, focus: Optional[str] = None,
678
- compact_provider: Optional[AbstractLLMInterface] = None) -> Dict[str, Any]:
678
+ compact_provider: Optional[AbstractCoreInterface] = None) -> Dict[str, Any]:
679
679
  """
680
680
  Generate a summary of the entire conversation and store it in session.summary.
681
681
 
@@ -1,5 +1,5 @@
1
1
  """
2
- Core types for AbstractLLM.
2
+ Core types for AbstractCore.
3
3
  """
4
4
 
5
5
  from typing import Optional, Dict, List, Any
@@ -1,5 +1,5 @@
1
1
  """
2
- Vector Embeddings for AbstractLLM Core
2
+ Vector Embeddings for AbstractCore
3
3
  =====================================
4
4
 
5
5
  Provides efficient text embedding with SOTA open-source models.
@@ -44,7 +44,7 @@ def _suppress_onnx_warnings():
44
44
  This suppresses the CoreML and node assignment warnings commonly seen on macOS.
45
45
  These warnings are informational only and don't impact performance or quality.
46
46
 
47
- To enable verbose ONNX logging for debugging, set: ABSTRACTLLM_ONNX_VERBOSE=1
47
+ To enable verbose ONNX logging for debugging, set: ABSTRACTCORE_ONNX_VERBOSE=1
48
48
  """
49
49
  with warnings.catch_warnings():
50
50
  # Suppress PyTorch ONNX registration warnings (harmless in PyTorch 2.8+)
@@ -73,8 +73,8 @@ def _suppress_onnx_warnings():
73
73
  import os
74
74
 
75
75
  # Allow users to enable verbose ONNX logging for debugging
76
- # Set ABSTRACTLLM_ONNX_VERBOSE=1 to see ONNX warnings for debugging
77
- if os.environ.get("ABSTRACTLLM_ONNX_VERBOSE", "0") != "1":
76
+ # Set ABSTRACTCORE_ONNX_VERBOSE=1 to see ONNX warnings for debugging
77
+ if os.environ.get("ABSTRACTCORE_ONNX_VERBOSE", "0") != "1":
78
78
  # Suppress the CoreML and node assignment warnings you may see on macOS
79
79
  # These are harmless informational messages that don't affect performance or quality:
80
80
  # - CoreML partitioning warnings: Normal behavior when model ops aren't all CoreML-compatible
@@ -132,7 +132,7 @@ class EmbeddingManager:
132
132
  model: Model identifier (HuggingFace model ID for HF provider, model name for others).
133
133
  provider: Embedding provider ('huggingface', 'ollama', 'lmstudio'). Defaults to 'huggingface'.
134
134
  backend: Inference backend for HuggingFace ('auto', 'pytorch', 'onnx', 'openvino')
135
- cache_dir: Directory for persistent cache. Defaults to ~/.abstractllm/embeddings
135
+ cache_dir: Directory for persistent cache. Defaults to ~/.abstractcore/embeddings
136
136
  cache_size: Maximum number of embeddings to cache in memory
137
137
  output_dims: Output dimensions for Matryoshka truncation (if supported by provider)
138
138
  trust_remote_code: Whether to trust remote code (HuggingFace only)
@@ -193,7 +193,7 @@ class EmbeddingManager:
193
193
  logger.info(f"Initialized LMStudio embedding provider with model: {model}")
194
194
 
195
195
  # Common setup for all providers
196
- self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / ".abstractllm" / "embeddings"
196
+ self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / ".abstractcore" / "embeddings"
197
197
  self.cache_dir.mkdir(parents=True, exist_ok=True)
198
198
  self.cache_size = cache_size
199
199
  self.output_dims = output_dims
@@ -1,5 +1,5 @@
1
1
  """
2
- Event system for AbstractLLM - OpenTelemetry compatible.
2
+ Event system for AbstractCore - OpenTelemetry compatible.
3
3
 
4
4
  This module provides a comprehensive event system for tracking LLM operations,
5
5
  including generation, tool calls, structured output, and performance metrics.
@@ -0,0 +1,125 @@
1
+ """
2
+ Custom exceptions for AbstractCore.
3
+ """
4
+
5
+
6
+ class AbstractCoreError(Exception):
7
+ """Base exception for AbstractCore"""
8
+ pass
9
+
10
+
11
+ class ProviderError(AbstractCoreError):
12
+ """Base exception for provider-related errors"""
13
+ pass
14
+
15
+
16
+ class ProviderAPIError(ProviderError):
17
+ """API call to provider failed"""
18
+ pass
19
+
20
+
21
+ class AuthenticationError(ProviderError):
22
+ """Authentication with provider failed"""
23
+ pass
24
+
25
+
26
+ # Alias for backward compatibility with old AbstractCore
27
+ Authentication = AuthenticationError
28
+
29
+
30
+ class RateLimitError(ProviderError):
31
+ """Rate limit exceeded"""
32
+ pass
33
+
34
+
35
+ class InvalidRequestError(ProviderError):
36
+ """Invalid request to provider"""
37
+ pass
38
+
39
+
40
+ class UnsupportedFeatureError(AbstractCoreError):
41
+ """Feature not supported by provider"""
42
+ pass
43
+
44
+
45
+ class FileProcessingError(AbstractCoreError):
46
+ """Error processing file or media"""
47
+ pass
48
+
49
+
50
+ class ToolExecutionError(AbstractCoreError):
51
+ """Error executing tool"""
52
+ pass
53
+
54
+
55
+ class SessionError(AbstractCoreError):
56
+ """Error with session management"""
57
+ pass
58
+
59
+
60
+ class ConfigurationError(AbstractCoreError):
61
+ """Invalid configuration"""
62
+ pass
63
+
64
+
65
+ class ModelNotFoundError(ProviderError):
66
+ """Model not found or invalid model name"""
67
+ pass
68
+
69
+
70
+ def format_model_error(provider: str, invalid_model: str, available_models: list) -> str:
71
+ """
72
+ Format a helpful error message for model not found errors.
73
+
74
+ Args:
75
+ provider: Provider name (e.g., "OpenAI", "Anthropic")
76
+ invalid_model: The model name that was not found
77
+ available_models: List of available model names
78
+
79
+ Returns:
80
+ Formatted error message string
81
+ """
82
+ message = f"❌ Model '{invalid_model}' not found for {provider} provider.\n"
83
+
84
+ if available_models:
85
+ message += f"\n✅ Available models ({len(available_models)}):\n"
86
+ for model in available_models[:30]: # Show max 30
87
+ message += f" • {model}\n"
88
+ if len(available_models) > 30:
89
+ message += f" ... and {len(available_models) - 30} more\n"
90
+ else:
91
+ # Show provider documentation when we can't fetch models
92
+ doc_links = {
93
+ "anthropic": "https://docs.anthropic.com/en/docs/about-claude/models",
94
+ "openai": "https://platform.openai.com/docs/models",
95
+ "ollama": "https://ollama.com/library",
96
+ "huggingface": "https://huggingface.co/models",
97
+ "mlx": "https://huggingface.co/mlx-community"
98
+ }
99
+
100
+ provider_lower = provider.lower()
101
+ if provider_lower in doc_links:
102
+ message += f"\n📚 See available models: {doc_links[provider_lower]}\n"
103
+ else:
104
+ message += f"\n⚠️ Could not fetch available models for {provider}.\n"
105
+
106
+ return message.rstrip()
107
+
108
+
109
+ # Export all exceptions for easy importing
110
+ __all__ = [
111
+ 'AbstractCoreError',
112
+ 'ProviderError',
113
+ 'ProviderAPIError',
114
+ 'AuthenticationError',
115
+ 'Authentication', # Backward compatibility alias
116
+ 'RateLimitError',
117
+ 'InvalidRequestError',
118
+ 'UnsupportedFeatureError',
119
+ 'FileProcessingError',
120
+ 'ToolExecutionError',
121
+ 'SessionError',
122
+ 'ConfigurationError',
123
+ 'ModelNotFoundError',
124
+ 'format_model_error'
125
+ ]