abstractcore 2.3.9__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {abstractllm → abstractcore}/__init__.py +2 -2
  2. abstractcore/apps/__init__.py +1 -0
  3. {abstractllm → abstractcore}/apps/__main__.py +6 -6
  4. {abstractllm → abstractcore}/apps/extractor.py +16 -16
  5. {abstractllm → abstractcore}/apps/judge.py +18 -18
  6. {abstractllm → abstractcore}/apps/summarizer.py +11 -11
  7. {abstractllm → abstractcore}/core/__init__.py +2 -2
  8. {abstractllm → abstractcore}/core/enums.py +1 -1
  9. {abstractllm → abstractcore}/core/factory.py +8 -8
  10. {abstractllm → abstractcore}/core/interface.py +3 -3
  11. {abstractllm → abstractcore}/core/retry.py +1 -1
  12. {abstractllm → abstractcore}/core/session.py +6 -6
  13. {abstractllm → abstractcore}/core/types.py +1 -1
  14. {abstractllm → abstractcore}/embeddings/__init__.py +1 -1
  15. {abstractllm → abstractcore}/embeddings/manager.py +5 -5
  16. {abstractllm → abstractcore}/events/__init__.py +1 -1
  17. {abstractllm → abstractcore}/processing/basic_extractor.py +5 -5
  18. {abstractllm → abstractcore}/processing/basic_judge.py +5 -5
  19. {abstractllm → abstractcore}/processing/basic_summarizer.py +7 -7
  20. {abstractllm → abstractcore}/providers/base.py +4 -4
  21. {abstractllm → abstractcore}/providers/huggingface_provider.py +1 -1
  22. {abstractllm → abstractcore}/providers/streaming.py +8 -3
  23. {abstractllm → abstractcore}/structured/__init__.py +1 -1
  24. {abstractllm → abstractcore}/tools/__init__.py +2 -2
  25. {abstractllm → abstractcore}/tools/common_tools.py +4 -4
  26. {abstractllm → abstractcore}/utils/__init__.py +1 -1
  27. {abstractllm → abstractcore}/utils/cli.py +13 -13
  28. {abstractllm → abstractcore}/utils/structured_logging.py +3 -3
  29. {abstractllm → abstractcore}/utils/token_utils.py +1 -1
  30. {abstractllm → abstractcore}/utils/version.py +1 -1
  31. {abstractcore-2.3.9.dist-info → abstractcore-2.4.0.dist-info}/METADATA +20 -16
  32. abstractcore-2.4.0.dist-info/RECORD +62 -0
  33. abstractcore-2.4.0.dist-info/entry_points.txt +7 -0
  34. abstractcore-2.4.0.dist-info/top_level.txt +1 -0
  35. abstractcore-2.3.9.dist-info/RECORD +0 -62
  36. abstractcore-2.3.9.dist-info/entry_points.txt +0 -7
  37. abstractcore-2.3.9.dist-info/top_level.txt +0 -1
  38. abstractllm/apps/__init__.py +0 -1
  39. {abstractllm → abstractcore}/architectures/__init__.py +0 -0
  40. {abstractllm → abstractcore}/architectures/detection.py +0 -0
  41. {abstractllm → abstractcore}/architectures/enums.py +0 -0
  42. {abstractllm → abstractcore}/assets/architecture_formats.json +0 -0
  43. {abstractllm → abstractcore}/assets/model_capabilities.json +0 -0
  44. {abstractllm → abstractcore}/assets/session_schema.json +0 -0
  45. {abstractllm → abstractcore}/embeddings/models.py +0 -0
  46. {abstractllm → abstractcore}/processing/__init__.py +0 -0
  47. {abstractllm → abstractcore}/providers/__init__.py +0 -0
  48. {abstractllm → abstractcore}/providers/anthropic_provider.py +0 -0
  49. {abstractllm → abstractcore}/providers/lmstudio_provider.py +0 -0
  50. {abstractllm → abstractcore}/providers/mlx_provider.py +0 -0
  51. {abstractllm → abstractcore}/providers/mock_provider.py +0 -0
  52. {abstractllm → abstractcore}/providers/ollama_provider.py +0 -0
  53. {abstractllm → abstractcore}/providers/openai_provider.py +0 -0
  54. {abstractllm → abstractcore}/server/__init__.py +0 -0
  55. {abstractllm → abstractcore}/server/app.py +0 -0
  56. {abstractllm → abstractcore}/structured/handler.py +0 -0
  57. {abstractllm → abstractcore}/structured/retry.py +0 -0
  58. {abstractllm → abstractcore}/tools/core.py +0 -0
  59. {abstractllm → abstractcore}/tools/handler.py +0 -0
  60. {abstractllm → abstractcore}/tools/parser.py +0 -0
  61. {abstractllm → abstractcore}/tools/registry.py +0 -0
  62. {abstractllm → abstractcore}/tools/syntax_rewriter.py +0 -0
  63. {abstractllm → abstractcore}/tools/tag_rewriter.py +0 -0
  64. {abstractllm → abstractcore}/utils/self_fixes.py +0 -0
  65. {abstractcore-2.3.9.dist-info → abstractcore-2.4.0.dist-info}/WHEEL +0 -0
  66. {abstractcore-2.3.9.dist-info → abstractcore-2.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """
3
- AbstractLLM - Unified interface to all LLM providers with essential infrastructure.
3
+ AbstractCore - Unified interface to all LLM providers with essential infrastructure.
4
4
 
5
5
  Key Features:
6
6
  • Multi-provider support (OpenAI, Anthropic, Ollama, HuggingFace, MLX, LMStudio)
@@ -11,7 +11,7 @@ Key Features:
11
11
  • Event system for observability
12
12
 
13
13
  Quick Start:
14
- from abstractllm import create_llm
14
+ from abstractcore import create_llm
15
15
 
16
16
  # Unified token management across all providers
17
17
  llm = create_llm(
@@ -0,0 +1 @@
1
+ # AbstractCore CLI Applications
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Apps - Command-line interface launcher
3
+ AbstractCore Apps - Command-line interface launcher
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps <app_name> [options]
6
+ python -m abstractcore.apps <app_name> [options]
7
7
 
8
8
  Available apps:
9
9
  summarizer - Document summarization tool
@@ -11,10 +11,10 @@ Available apps:
11
11
  judge - Text evaluation and scoring tool
12
12
 
13
13
  Examples:
14
- python -m abstractllm.apps summarizer document.txt
15
- python -m abstractllm.apps extractor report.txt --format json-ld
16
- python -m abstractllm.apps judge essay.txt --criteria clarity,accuracy
17
- python -m abstractllm.apps <app> --help
14
+ python -m abstractcore.apps summarizer document.txt
15
+ python -m abstractcore.apps extractor report.txt --format json-ld
16
+ python -m abstractcore.apps judge essay.txt --criteria clarity,accuracy
17
+ python -m abstractcore.apps <app> --help
18
18
  """
19
19
 
20
20
  import sys
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Entity Extractor CLI Application
3
+ AbstractCore Entity Extractor CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.extractor <file_path> [options]
6
+ python -m abstractcore.apps.extractor <file_path> [options]
7
7
 
8
8
  Options:
9
9
  --focus <focus> Specific focus area for extraction (e.g., "technology", "business", "medical")
@@ -27,12 +27,12 @@ Options:
27
27
  --help Show this help message
28
28
 
29
29
  Examples:
30
- python -m abstractllm.apps.extractor document.pdf
31
- python -m abstractllm.apps.extractor report.txt --focus technology --style structured --verbose
32
- python -m abstractllm.apps.extractor data.md --entity-types person,organization --output kg.jsonld
33
- python -m abstractllm.apps.extractor large.txt --fast --minified --verbose # Fast, compact output
34
- python -m abstractllm.apps.extractor report.txt --length detailed --provider openai --model gpt-4o-mini
35
- python -m abstractllm.apps.extractor doc.txt --iterate 3 --verbose # 3 refinement passes for higher quality
30
+ python -m abstractcore.apps.extractor document.pdf
31
+ python -m abstractcore.apps.extractor report.txt --focus technology --style structured --verbose
32
+ python -m abstractcore.apps.extractor data.md --entity-types person,organization --output kg.jsonld
33
+ python -m abstractcore.apps.extractor large.txt --fast --minified --verbose # Fast, compact output
34
+ python -m abstractcore.apps.extractor report.txt --length detailed --provider openai --model gpt-4o-mini
35
+ python -m abstractcore.apps.extractor doc.txt --iterate 3 --verbose # 3 refinement passes for higher quality
36
36
  """
37
37
 
38
38
  import argparse
@@ -155,17 +155,17 @@ def parse_extraction_length(length_str: Optional[str]) -> str:
155
155
  def main():
156
156
  """Main CLI function"""
157
157
  parser = argparse.ArgumentParser(
158
- description="AbstractLLM Entity & Relationship Extractor - Default: qwen3:4b-instruct-2507-q4_K_M (requires Ollama)",
158
+ description="AbstractCore Entity & Relationship Extractor - Default: qwen3:4b-instruct-2507-q4_K_M (requires Ollama)",
159
159
  formatter_class=argparse.RawDescriptionHelpFormatter,
160
160
  epilog="""
161
161
  Examples:
162
- python -m abstractllm.apps.extractor document.pdf
163
- python -m abstractllm.apps.extractor report.txt --focus=technology --style=structured --verbose
164
- python -m abstractllm.apps.extractor data.md --entity-types=person,organization --output=kg.jsonld
165
- python -m abstractllm.apps.extractor large.txt --length=detailed --fast --minified --verbose
166
- python -m abstractllm.apps.extractor doc.txt --iterate=3 --verbose # Iterative refinement for quality
167
- python -m abstractllm.apps.extractor doc.txt --format=triples --verbose # RDF triples output
168
- python -m abstractllm.apps.extractor doc.txt --format=triples --output=triples.txt # Simple triples
162
+ python -m abstractcore.apps.extractor document.pdf
163
+ python -m abstractcore.apps.extractor report.txt --focus=technology --style=structured --verbose
164
+ python -m abstractcore.apps.extractor data.md --entity-types=person,organization --output=kg.jsonld
165
+ python -m abstractcore.apps.extractor large.txt --length=detailed --fast --minified --verbose
166
+ python -m abstractcore.apps.extractor doc.txt --iterate=3 --verbose # Iterative refinement for quality
167
+ python -m abstractcore.apps.extractor doc.txt --format=triples --verbose # RDF triples output
168
+ python -m abstractcore.apps.extractor doc.txt --format=triples --output=triples.txt # Simple triples
169
169
 
170
170
  Supported file types: .txt, .md, .py, .js, .html, .json, .csv, and most text-based files
171
171
 
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Basic Judge CLI Application
3
+ AbstractCore Basic Judge CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.judge <file_path_or_text> [file2] [file3] ... [options]
6
+ python -m abstractcore.apps.judge <file_path_or_text> [file2] [file3] ... [options]
7
7
 
8
8
  Options:
9
9
  --context <context> Evaluation context description (e.g., "code review", "documentation assessment")
@@ -25,18 +25,18 @@ Options:
25
25
 
26
26
  Examples:
27
27
  # Single file or text
28
- python -m abstractllm.apps.judge "This code is well-structured and solves the problem efficiently."
29
- python -m abstractllm.apps.judge document.py --context "code review" --criteria clarity,soundness,effectiveness
28
+ python -m abstractcore.apps.judge "This code is well-structured and solves the problem efficiently."
29
+ python -m abstractcore.apps.judge document.py --context "code review" --criteria clarity,soundness,effectiveness
30
30
 
31
31
  # Multiple files (evaluated sequentially to avoid context overflow)
32
- python -m abstractllm.apps.judge file1.py file2.py file3.py --context "code review" --output assessments.json
33
- python -m abstractllm.apps.judge *.py --context "Python code review" --format plain
34
- python -m abstractllm.apps.judge docs/*.md --context "documentation review" --criteria clarity,completeness
32
+ python -m abstractcore.apps.judge file1.py file2.py file3.py --context "code review" --output assessments.json
33
+ python -m abstractcore.apps.judge *.py --context "Python code review" --format plain
34
+ python -m abstractcore.apps.judge docs/*.md --context "documentation review" --criteria clarity,completeness
35
35
 
36
36
  # Other options
37
- python -m abstractllm.apps.judge proposal.md --focus "technical accuracy,completeness,examples" --output assessment.json
38
- python -m abstractllm.apps.judge content.txt --reference ideal_solution.txt --format plain --verbose
39
- python -m abstractllm.apps.judge text.md --provider openai --model gpt-4o-mini --temperature 0.05
37
+ python -m abstractcore.apps.judge proposal.md --focus "technical accuracy,completeness,examples" --output assessment.json
38
+ python -m abstractcore.apps.judge content.txt --reference ideal_solution.txt --format plain --verbose
39
+ python -m abstractcore.apps.judge text.md --provider openai --model gpt-4o-mini --temperature 0.05
40
40
  """
41
41
 
42
42
  import argparse
@@ -250,22 +250,22 @@ def format_assessment_plain(assessment: dict) -> str:
250
250
  def main():
251
251
  """Main CLI function"""
252
252
  parser = argparse.ArgumentParser(
253
- description="AbstractLLM Basic Judge - LLM-as-a-judge for objective evaluation (Default: qwen3:4b-instruct-2507-q4_K_M)",
253
+ description="AbstractCore Basic Judge - LLM-as-a-judge for objective evaluation (Default: qwen3:4b-instruct-2507-q4_K_M)",
254
254
  formatter_class=argparse.RawDescriptionHelpFormatter,
255
255
  epilog="""
256
256
  Examples:
257
257
  # Single file or text
258
- python -m abstractllm.apps.judge "This code is well-structured."
259
- python -m abstractllm.apps.judge document.py --context "code review" --criteria clarity,soundness
260
- python -m abstractllm.apps.judge proposal.md --focus "technical accuracy,examples" --output assessment.json
258
+ python -m abstractcore.apps.judge "This code is well-structured."
259
+ python -m abstractcore.apps.judge document.py --context "code review" --criteria clarity,soundness
260
+ python -m abstractcore.apps.judge proposal.md --focus "technical accuracy,examples" --output assessment.json
261
261
 
262
262
  # Multiple files (evaluated sequentially)
263
- python -m abstractllm.apps.judge file1.py file2.py file3.py --context "code review" --format json
264
- python -m abstractllm.apps.judge docs/*.md --context "documentation review" --format plain
263
+ python -m abstractcore.apps.judge file1.py file2.py file3.py --context "code review" --format json
264
+ python -m abstractcore.apps.judge docs/*.md --context "documentation review" --format plain
265
265
 
266
266
  # Other options
267
- python -m abstractllm.apps.judge content.txt --reference ideal.txt --format plain --verbose
268
- python -m abstractllm.apps.judge text.md --provider openai --model gpt-4o-mini
267
+ python -m abstractcore.apps.judge content.txt --reference ideal.txt --format plain --verbose
268
+ python -m abstractcore.apps.judge text.md --provider openai --model gpt-4o-mini
269
269
 
270
270
  Available criteria:
271
271
  clarity, simplicity, actionability, soundness, innovation, effectiveness,
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Summarizer CLI Application
3
+ AbstractCore Summarizer CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.summarizer <file_path> [options]
6
+ python -m abstractcore.apps.summarizer <file_path> [options]
7
7
 
8
8
  Options:
9
9
  --style <style> Summary style (structured, narrative, objective, analytical, executive, conversational)
@@ -19,10 +19,10 @@ Options:
19
19
  --help Show this help message
20
20
 
21
21
  Examples:
22
- python -m abstractllm.apps.summarizer document.pdf
23
- python -m abstractllm.apps.summarizer report.txt --style executive --length brief --verbose
24
- python -m abstractllm.apps.summarizer data.md --focus "technical details" --output summary.txt
25
- python -m abstractllm.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
22
+ python -m abstractcore.apps.summarizer document.pdf
23
+ python -m abstractcore.apps.summarizer report.txt --style executive --length brief --verbose
24
+ python -m abstractcore.apps.summarizer data.md --focus "technical details" --output summary.txt
25
+ python -m abstractcore.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
26
26
  """
27
27
 
28
28
  import argparse
@@ -156,14 +156,14 @@ def format_summary_output(result) -> str:
156
156
  def main():
157
157
  """Main CLI function"""
158
158
  parser = argparse.ArgumentParser(
159
- description="AbstractLLM Document Summarizer - Default: gemma3:1b-it-qat (requires Ollama)",
159
+ description="AbstractCore Document Summarizer - Default: gemma3:1b-it-qat (requires Ollama)",
160
160
  formatter_class=argparse.RawDescriptionHelpFormatter,
161
161
  epilog="""
162
162
  Examples:
163
- python -m abstractllm.apps.summarizer document.pdf
164
- python -m abstractllm.apps.summarizer report.txt --style executive --length brief --verbose
165
- python -m abstractllm.apps.summarizer data.md --focus "technical details" --output summary.txt
166
- python -m abstractllm.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
163
+ python -m abstractcore.apps.summarizer document.pdf
164
+ python -m abstractcore.apps.summarizer report.txt --style executive --length brief --verbose
165
+ python -m abstractcore.apps.summarizer data.md --focus "technical details" --output summary.txt
166
+ python -m abstractcore.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
167
167
 
168
168
  Supported file types: .txt, .md, .py, .js, .html, .json, .csv, and most text-based files
169
169
 
@@ -12,7 +12,7 @@ from .factory import create_llm
12
12
  from .session import BasicSession
13
13
  from .types import GenerateResponse, Message
14
14
  from .enums import ModelParameter, ModelCapability, MessageRole
15
- from .interface import AbstractLLMInterface
15
+ from .interface import AbstractCoreInterface
16
16
 
17
17
  __all__ = [
18
18
  'create_llm',
@@ -22,5 +22,5 @@ __all__ = [
22
22
  'ModelParameter',
23
23
  'ModelCapability',
24
24
  'MessageRole',
25
- 'AbstractLLMInterface'
25
+ 'AbstractCoreInterface'
26
26
  ]
@@ -1,5 +1,5 @@
1
1
  """
2
- Enums for AbstractLLM.
2
+ Enums for AbstractCore.
3
3
  """
4
4
 
5
5
  from enum import Enum
@@ -3,11 +3,11 @@ Factory for creating LLM providers.
3
3
  """
4
4
 
5
5
  from typing import Optional
6
- from .interface import AbstractLLMInterface
6
+ from .interface import AbstractCoreInterface
7
7
  from ..exceptions import ModelNotFoundError, AuthenticationError, ProviderAPIError
8
8
 
9
9
 
10
- def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> AbstractLLMInterface:
10
+ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> AbstractCoreInterface:
11
11
  """
12
12
  Create an LLM provider instance with unified token parameter support.
13
13
 
@@ -16,7 +16,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
16
16
  model: Model name (optional, will use provider default)
17
17
  **kwargs: Additional configuration including token parameters
18
18
 
19
- Token Parameters (AbstractLLM Unified Standard):
19
+ Token Parameters (AbstractCore Unified Standard):
20
20
  max_tokens: Total context window budget (input + output combined)
21
21
  max_output_tokens: Maximum tokens reserved for generation (default: 2048)
22
22
  max_input_tokens: Maximum tokens for input (auto-calculated if not specified)
@@ -75,7 +75,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
75
75
  from ..providers.openai_provider import OpenAIProvider
76
76
  return OpenAIProvider(model=model or "gpt-5-nano-2025-08-07", **kwargs)
77
77
  except ImportError:
78
- raise ImportError("OpenAI dependencies not installed. Install with: pip install abstractllm[openai]")
78
+ raise ImportError("OpenAI dependencies not installed. Install with: pip install abstractcore[openai]")
79
79
  except (ModelNotFoundError, AuthenticationError, ProviderAPIError) as e:
80
80
  # Re-raise provider exceptions cleanly
81
81
  raise e
@@ -85,7 +85,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
85
85
  from ..providers.anthropic_provider import AnthropicProvider
86
86
  return AnthropicProvider(model=model or "claude-3-5-haiku-latest", **kwargs)
87
87
  except ImportError:
88
- raise ImportError("Anthropic dependencies not installed. Install with: pip install abstractllm[anthropic]")
88
+ raise ImportError("Anthropic dependencies not installed. Install with: pip install abstractcore[anthropic]")
89
89
  except (ModelNotFoundError, AuthenticationError, ProviderAPIError) as e:
90
90
  # Re-raise provider exceptions cleanly
91
91
  raise e
@@ -95,21 +95,21 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
95
95
  from ..providers.ollama_provider import OllamaProvider
96
96
  return OllamaProvider(model=model or "qwen3-coder:30b", **kwargs)
97
97
  except ImportError:
98
- raise ImportError("Ollama dependencies not installed. Install with: pip install abstractllm[ollama]")
98
+ raise ImportError("Ollama dependencies not installed. Install with: pip install abstractcore[ollama]")
99
99
 
100
100
  elif provider.lower() == "huggingface":
101
101
  try:
102
102
  from ..providers.huggingface_provider import HuggingFaceProvider
103
103
  return HuggingFaceProvider(model=model or "Qwen/Qwen3-4B/", **kwargs)
104
104
  except ImportError:
105
- raise ImportError("HuggingFace dependencies not installed. Install with: pip install abstractllm[huggingface]")
105
+ raise ImportError("HuggingFace dependencies not installed. Install with: pip install abstractcore[huggingface]")
106
106
 
107
107
  elif provider.lower() == "mlx":
108
108
  try:
109
109
  from ..providers.mlx_provider import MLXProvider
110
110
  return MLXProvider(model=model or "mlx-community/Qwen3-4B", **kwargs)
111
111
  except ImportError:
112
- raise ImportError("MLX dependencies not installed. Install with: pip install abstractllm[mlx]")
112
+ raise ImportError("MLX dependencies not installed. Install with: pip install abstractcore[mlx]")
113
113
 
114
114
  elif provider.lower() == "lmstudio":
115
115
  try:
@@ -7,11 +7,11 @@ from typing import List, Dict, Any, Optional, Union, Iterator
7
7
  from .types import GenerateResponse, Message
8
8
 
9
9
 
10
- class AbstractLLMInterface(ABC):
10
+ class AbstractCoreInterface(ABC):
11
11
  """
12
12
  Abstract base class for all LLM providers.
13
13
 
14
- AbstractLLM Token Parameter Vocabulary (Unified Standard):
14
+ AbstractCore Token Parameter Vocabulary (Unified Standard):
15
15
  =========================================================
16
16
 
17
17
  • max_tokens: Total context window budget (input + output combined) - YOUR BUDGET
@@ -57,7 +57,7 @@ class AbstractLLMInterface(ABC):
57
57
 
58
58
  Provider Abstraction:
59
59
  ===================
60
- AbstractLLM handles provider-specific parameter mapping internally:
60
+ AbstractCore handles provider-specific parameter mapping internally:
61
61
  • OpenAI: max_tokens → max_completion_tokens (o1 models) or max_tokens (others)
62
62
  • Anthropic: max_output_tokens → max_tokens (output-focused API)
63
63
  • Google: max_output_tokens → max_output_tokens (direct mapping)
@@ -1,5 +1,5 @@
1
1
  """
2
- Production-ready retry strategies for AbstractLLM Core.
2
+ Production-ready retry strategies for AbstractCore.
3
3
 
4
4
  Implements SOTA exponential backoff with jitter and circuit breaker patterns
5
5
  based on 2025 best practices from AWS Architecture Blog, Tenacity principles,
@@ -10,7 +10,7 @@ import json
10
10
  import uuid
11
11
  from collections.abc import Generator
12
12
 
13
- from .interface import AbstractLLMInterface
13
+ from .interface import AbstractCoreInterface
14
14
  from .types import GenerateResponse, Message
15
15
  from .enums import MessageRole
16
16
 
@@ -25,7 +25,7 @@ class BasicSession:
25
25
  """
26
26
 
27
27
  def __init__(self,
28
- provider: Optional[AbstractLLMInterface] = None,
28
+ provider: Optional[AbstractCoreInterface] = None,
29
29
  system_prompt: Optional[str] = None,
30
30
  tools: Optional[List[Callable]] = None,
31
31
  timeout: Optional[float] = None,
@@ -255,7 +255,7 @@ class BasicSession:
255
255
  json.dump(data, f, indent=2)
256
256
 
257
257
  @classmethod
258
- def load(cls, filepath: Union[str, Path], provider: Optional[AbstractLLMInterface] = None,
258
+ def load(cls, filepath: Union[str, Path], provider: Optional[AbstractCoreInterface] = None,
259
259
  tools: Optional[List[Callable]] = None) -> 'BasicSession':
260
260
  """
261
261
  Load session from file with complete metadata restoration.
@@ -325,7 +325,7 @@ class BasicSession:
325
325
  }
326
326
 
327
327
  @classmethod
328
- def from_dict(cls, data: Dict[str, Any], provider: Optional[AbstractLLMInterface] = None,
328
+ def from_dict(cls, data: Dict[str, Any], provider: Optional[AbstractCoreInterface] = None,
329
329
  tools: Optional[List[Callable]] = None) -> 'BasicSession':
330
330
  """
331
331
  Create session from dictionary data (supports both new archive format and legacy format).
@@ -429,7 +429,7 @@ class BasicSession:
429
429
  def compact(self,
430
430
  preserve_recent: int = 6,
431
431
  focus: Optional[str] = None,
432
- compact_provider: Optional[AbstractLLMInterface] = None,
432
+ compact_provider: Optional[AbstractCoreInterface] = None,
433
433
  reason: str = "manual") -> 'BasicSession':
434
434
  """
435
435
  Compact chat history using SOTA 2025 best practices for conversation summarization.
@@ -675,7 +675,7 @@ class BasicSession:
675
675
  print(f"✅ Session compacted: {len(compacted.messages)} messages, ~{compacted.get_token_estimate()} tokens")
676
676
 
677
677
  def generate_summary(self, preserve_recent: int = 6, focus: Optional[str] = None,
678
- compact_provider: Optional[AbstractLLMInterface] = None) -> Dict[str, Any]:
678
+ compact_provider: Optional[AbstractCoreInterface] = None) -> Dict[str, Any]:
679
679
  """
680
680
  Generate a summary of the entire conversation and store it in session.summary.
681
681
 
@@ -1,5 +1,5 @@
1
1
  """
2
- Core types for AbstractLLM.
2
+ Core types for AbstractCore.
3
3
  """
4
4
 
5
5
  from typing import Optional, Dict, List, Any
@@ -1,5 +1,5 @@
1
1
  """
2
- Vector Embeddings for AbstractLLM Core
2
+ Vector Embeddings for AbstractCore
3
3
  =====================================
4
4
 
5
5
  Provides efficient text embedding with SOTA open-source models.
@@ -44,7 +44,7 @@ def _suppress_onnx_warnings():
44
44
  This suppresses the CoreML and node assignment warnings commonly seen on macOS.
45
45
  These warnings are informational only and don't impact performance or quality.
46
46
 
47
- To enable verbose ONNX logging for debugging, set: ABSTRACTLLM_ONNX_VERBOSE=1
47
+ To enable verbose ONNX logging for debugging, set: ABSTRACTCORE_ONNX_VERBOSE=1
48
48
  """
49
49
  with warnings.catch_warnings():
50
50
  # Suppress PyTorch ONNX registration warnings (harmless in PyTorch 2.8+)
@@ -73,8 +73,8 @@ def _suppress_onnx_warnings():
73
73
  import os
74
74
 
75
75
  # Allow users to enable verbose ONNX logging for debugging
76
- # Set ABSTRACTLLM_ONNX_VERBOSE=1 to see ONNX warnings for debugging
77
- if os.environ.get("ABSTRACTLLM_ONNX_VERBOSE", "0") != "1":
76
+ # Set ABSTRACTCORE_ONNX_VERBOSE=1 to see ONNX warnings for debugging
77
+ if os.environ.get("ABSTRACTCORE_ONNX_VERBOSE", "0") != "1":
78
78
  # Suppress the CoreML and node assignment warnings you may see on macOS
79
79
  # These are harmless informational messages that don't affect performance or quality:
80
80
  # - CoreML partitioning warnings: Normal behavior when model ops aren't all CoreML-compatible
@@ -132,7 +132,7 @@ class EmbeddingManager:
132
132
  model: Model identifier (HuggingFace model ID for HF provider, model name for others).
133
133
  provider: Embedding provider ('huggingface', 'ollama', 'lmstudio'). Defaults to 'huggingface'.
134
134
  backend: Inference backend for HuggingFace ('auto', 'pytorch', 'onnx', 'openvino')
135
- cache_dir: Directory for persistent cache. Defaults to ~/.abstractllm/embeddings
135
+ cache_dir: Directory for persistent cache. Defaults to ~/.abstractcore/embeddings
136
136
  cache_size: Maximum number of embeddings to cache in memory
137
137
  output_dims: Output dimensions for Matryoshka truncation (if supported by provider)
138
138
  trust_remote_code: Whether to trust remote code (HuggingFace only)
@@ -193,7 +193,7 @@ class EmbeddingManager:
193
193
  logger.info(f"Initialized LMStudio embedding provider with model: {model}")
194
194
 
195
195
  # Common setup for all providers
196
- self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / ".abstractllm" / "embeddings"
196
+ self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / ".abstractcore" / "embeddings"
197
197
  self.cache_dir.mkdir(parents=True, exist_ok=True)
198
198
  self.cache_size = cache_size
199
199
  self.output_dims = output_dims
@@ -1,5 +1,5 @@
1
1
  """
2
- Event system for AbstractLLM - OpenTelemetry compatible.
2
+ Event system for AbstractCore - OpenTelemetry compatible.
3
3
 
4
4
  This module provides a comprehensive event system for tracking LLM operations,
5
5
  including generation, tool calls, structured output, and performance metrics.
@@ -13,7 +13,7 @@ import json
13
13
  import logging
14
14
  from pydantic import BaseModel, Field
15
15
 
16
- from ..core.interface import AbstractLLMInterface
16
+ from ..core.interface import AbstractCoreInterface
17
17
  from ..core.factory import create_llm
18
18
  from ..structured.retry import FeedbackRetry
19
19
  from ..utils.structured_logging import get_logger
@@ -50,7 +50,7 @@ class BasicExtractor:
50
50
 
51
51
  def __init__(
52
52
  self,
53
- llm: Optional[AbstractLLMInterface] = None,
53
+ llm: Optional[AbstractCoreInterface] = None,
54
54
  max_chunk_size: int = 8000,
55
55
  max_tokens: int = 32000,
56
56
  max_output_tokens: int = 8000,
@@ -59,7 +59,7 @@ class BasicExtractor:
59
59
  """Initialize the extractor
60
60
 
61
61
  Args:
62
- llm: AbstractLLM instance (any provider). If None, uses default Ollama model
62
+ llm: AbstractCore instance (any provider). If None, uses default Ollama model
63
63
  max_chunk_size: Maximum characters per chunk for long documents (default 8000)
64
64
  max_tokens: Maximum total tokens for LLM context (default 32000)
65
65
  max_output_tokens: Maximum tokens for LLM output generation (default 8000)
@@ -79,8 +79,8 @@ class BasicExtractor:
79
79
  " - qwen3-coder:30b (excellent for structured output, requires 32GB RAM)\n"
80
80
  " - gpt-oss:120b (highest quality, requires 120GB RAM)\n\n"
81
81
  "🔧 Alternatively, provide a custom LLM instance:\n"
82
- " from abstractllm import create_llm\n"
83
- " from abstractllm.processing import BasicExtractor\n"
82
+ " from abstractcore import create_llm\n"
83
+ " from abstractcore.processing import BasicExtractor\n"
84
84
  " \n"
85
85
  " llm = create_llm('openai', model='gpt-4o-mini', max_tokens=32000, max_output_tokens=8000)\n"
86
86
  " extractor = BasicExtractor(llm)"
@@ -15,7 +15,7 @@ import logging
15
15
  from pathlib import Path
16
16
  from pydantic import BaseModel, Field
17
17
 
18
- from ..core.interface import AbstractLLMInterface
18
+ from ..core.interface import AbstractCoreInterface
19
19
  from ..core.factory import create_llm
20
20
  from ..structured.retry import FeedbackRetry
21
21
  from ..utils.structured_logging import get_logger
@@ -114,7 +114,7 @@ class BasicJudge:
114
114
 
115
115
  def __init__(
116
116
  self,
117
- llm: Optional[AbstractLLMInterface] = None,
117
+ llm: Optional[AbstractCoreInterface] = None,
118
118
  temperature: float = 0.1, # Low temperature for consistent evaluation
119
119
  max_tokens: int = 32000,
120
120
  max_output_tokens: int = 8000,
@@ -124,7 +124,7 @@ class BasicJudge:
124
124
  """Initialize the judge
125
125
 
126
126
  Args:
127
- llm: AbstractLLM instance (any provider). If None, uses default Ollama model
127
+ llm: AbstractCore instance (any provider). If None, uses default Ollama model
128
128
  temperature: Temperature for evaluation consistency (default 0.1)
129
129
  max_tokens: Maximum total tokens for LLM context (default 32000)
130
130
  max_output_tokens: Maximum tokens for LLM output generation (default 8000)
@@ -147,8 +147,8 @@ class BasicJudge:
147
147
  " - qwen3-coder:30b (excellent for detailed assessment, requires 32GB RAM)\n"
148
148
  " - gpt-oss:120b (highest quality evaluation, requires 120GB RAM)\n\n"
149
149
  "🔧 Alternatively, provide a custom LLM instance:\n"
150
- " from abstractllm import create_llm\n"
151
- " from abstractllm.processing import BasicJudge\n"
150
+ " from abstractcore import create_llm\n"
151
+ " from abstractcore.processing import BasicJudge\n"
152
152
  " \n"
153
153
  " llm = create_llm('openai', model='gpt-4o-mini', temperature=0.1)\n"
154
154
  " judge = BasicJudge(llm)"
@@ -9,7 +9,7 @@ from enum import Enum
9
9
  from typing import List, Optional
10
10
  from pydantic import BaseModel, Field
11
11
 
12
- from ..core.interface import AbstractLLMInterface
12
+ from ..core.interface import AbstractCoreInterface
13
13
  from ..core.factory import create_llm
14
14
  from ..structured.retry import FeedbackRetry
15
15
  from ..utils.structured_logging import get_logger
@@ -79,7 +79,7 @@ class BasicSummarizer:
79
79
 
80
80
  def __init__(
81
81
  self,
82
- llm: Optional[AbstractLLMInterface] = None,
82
+ llm: Optional[AbstractCoreInterface] = None,
83
83
  max_chunk_size: int = 8000,
84
84
  max_tokens: int = 32000,
85
85
  max_output_tokens: int = 8000,
@@ -89,7 +89,7 @@ class BasicSummarizer:
89
89
  Initialize the summarizer
90
90
 
91
91
  Args:
92
- llm: AbstractLLM instance (any provider). If None, attempts to create ollama gemma3:1b-it-qat
92
+ llm: AbstractCore instance (any provider). If None, attempts to create ollama gemma3:1b-it-qat
93
93
  max_chunk_size: Maximum characters per chunk for long documents (default 8000)
94
94
  max_tokens: Maximum total tokens for LLM context (default 32000)
95
95
  max_output_tokens: Maximum tokens for LLM output generation (default 8000)
@@ -107,8 +107,8 @@ class BasicSummarizer:
107
107
  " 2. Download the model: ollama pull gemma3:1b-it-qat\n"
108
108
  " 3. Start Ollama service\n\n"
109
109
  "🔧 Alternatively, provide a custom LLM instance:\n"
110
- " from abstractllm import create_llm\n"
111
- " from abstractllm.processing import BasicSummarizer\n"
110
+ " from abstractcore import create_llm\n"
111
+ " from abstractcore.processing import BasicSummarizer\n"
112
112
  " \n"
113
113
  " # Using OpenAI\n"
114
114
  " llm = create_llm('openai', model='gpt-4o-mini')\n"
@@ -150,8 +150,8 @@ class BasicSummarizer:
150
150
  SummaryOutput: Structured summary with metadata
151
151
 
152
152
  Example:
153
- >>> from abstractllm import create_llm
154
- >>> from abstractllm.processing import BasicSummarizer, SummaryStyle, SummaryLength
153
+ >>> from abstractcore import create_llm
154
+ >>> from abstractcore.processing import BasicSummarizer, SummaryStyle, SummaryLength
155
155
  >>>
156
156
  >>> llm = create_llm("openai", model="gpt-4o-mini")
157
157
  >>> summarizer = BasicSummarizer(llm)
@@ -13,7 +13,7 @@ except ImportError:
13
13
  PYDANTIC_AVAILABLE = False
14
14
  BaseModel = None
15
15
 
16
- from ..core.interface import AbstractLLMInterface
16
+ from ..core.interface import AbstractCoreInterface
17
17
  from ..core.types import GenerateResponse
18
18
  from ..events import EventType, Event
19
19
  from datetime import datetime
@@ -30,14 +30,14 @@ from ..tools import execute_tools
30
30
  from ..core.retry import RetryManager, RetryConfig
31
31
 
32
32
 
33
- class BaseProvider(AbstractLLMInterface, ABC):
33
+ class BaseProvider(AbstractCoreInterface, ABC):
34
34
  """
35
35
  Base provider class with integrated telemetry and events.
36
36
  All providers should inherit from this class.
37
37
  """
38
38
 
39
39
  def __init__(self, model: str, **kwargs):
40
- AbstractLLMInterface.__init__(self, model, **kwargs)
40
+ AbstractCoreInterface.__init__(self, model, **kwargs)
41
41
 
42
42
  # Setup structured logging
43
43
  self.logger = get_logger(self.__class__.__name__)
@@ -957,7 +957,7 @@ Please provide a structured response."""
957
957
  """
958
958
  Generate response from the LLM.
959
959
 
960
- This method implements the AbstractLLMInterface and delegates to generate_with_telemetry.
960
+ This method implements the AbstractCoreInterface and delegates to generate_with_telemetry.
961
961
 
962
962
  Args:
963
963
  prompt: The input prompt
@@ -385,7 +385,7 @@ class HuggingFaceProvider(BaseProvider):
385
385
  error_parts.extend([
386
386
  "",
387
387
  "📖 For more info: https://huggingface.co/docs/hub/en/gguf",
388
- "🔧 AbstractLLM only uses cached models - we never download automatically."
388
+ "🔧 AbstractCore only uses cached models - we never download automatically."
389
389
  ])
390
390
 
391
391
  error_message = "\n".join(error_parts)