abstractcore 2.3.8__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {abstractllm → abstractcore}/__init__.py +2 -2
  2. abstractcore/apps/__init__.py +1 -0
  3. {abstractllm → abstractcore}/apps/__main__.py +6 -6
  4. {abstractllm → abstractcore}/apps/extractor.py +35 -27
  5. {abstractllm → abstractcore}/apps/judge.py +20 -24
  6. {abstractllm → abstractcore}/apps/summarizer.py +25 -16
  7. {abstractllm → abstractcore}/core/__init__.py +2 -2
  8. {abstractllm → abstractcore}/core/enums.py +1 -1
  9. {abstractllm → abstractcore}/core/factory.py +8 -8
  10. {abstractllm → abstractcore}/core/interface.py +3 -3
  11. {abstractllm → abstractcore}/core/retry.py +1 -1
  12. {abstractllm → abstractcore}/core/session.py +6 -6
  13. {abstractllm → abstractcore}/core/types.py +1 -1
  14. {abstractllm → abstractcore}/embeddings/__init__.py +1 -1
  15. {abstractllm → abstractcore}/embeddings/manager.py +5 -5
  16. {abstractllm → abstractcore}/events/__init__.py +1 -1
  17. {abstractllm → abstractcore}/processing/basic_extractor.py +9 -7
  18. {abstractllm → abstractcore}/processing/basic_judge.py +14 -10
  19. {abstractllm → abstractcore}/processing/basic_summarizer.py +12 -10
  20. {abstractllm → abstractcore}/providers/base.py +7 -6
  21. {abstractllm → abstractcore}/providers/huggingface_provider.py +38 -1
  22. {abstractllm → abstractcore}/providers/lmstudio_provider.py +8 -6
  23. {abstractllm → abstractcore}/providers/mlx_provider.py +37 -0
  24. {abstractllm → abstractcore}/providers/mock_provider.py +32 -0
  25. {abstractllm → abstractcore}/providers/streaming.py +8 -3
  26. {abstractllm → abstractcore}/structured/__init__.py +1 -1
  27. {abstractllm → abstractcore}/tools/__init__.py +2 -2
  28. {abstractllm → abstractcore}/tools/common_tools.py +4 -4
  29. {abstractllm → abstractcore}/utils/__init__.py +1 -1
  30. {abstractllm → abstractcore}/utils/cli.py +13 -13
  31. {abstractllm → abstractcore}/utils/structured_logging.py +3 -3
  32. {abstractllm → abstractcore}/utils/token_utils.py +1 -1
  33. {abstractllm → abstractcore}/utils/version.py +1 -1
  34. {abstractcore-2.3.8.dist-info → abstractcore-2.4.0.dist-info}/METADATA +20 -16
  35. abstractcore-2.4.0.dist-info/RECORD +62 -0
  36. abstractcore-2.4.0.dist-info/entry_points.txt +7 -0
  37. abstractcore-2.4.0.dist-info/top_level.txt +1 -0
  38. abstractcore-2.3.8.dist-info/RECORD +0 -62
  39. abstractcore-2.3.8.dist-info/entry_points.txt +0 -7
  40. abstractcore-2.3.8.dist-info/top_level.txt +0 -1
  41. abstractllm/apps/__init__.py +0 -1
  42. {abstractllm → abstractcore}/architectures/__init__.py +0 -0
  43. {abstractllm → abstractcore}/architectures/detection.py +0 -0
  44. {abstractllm → abstractcore}/architectures/enums.py +0 -0
  45. {abstractllm → abstractcore}/assets/architecture_formats.json +0 -0
  46. {abstractllm → abstractcore}/assets/model_capabilities.json +0 -0
  47. {abstractllm → abstractcore}/assets/session_schema.json +0 -0
  48. {abstractllm → abstractcore}/embeddings/models.py +0 -0
  49. {abstractllm → abstractcore}/processing/__init__.py +0 -0
  50. {abstractllm → abstractcore}/providers/__init__.py +0 -0
  51. {abstractllm → abstractcore}/providers/anthropic_provider.py +0 -0
  52. {abstractllm → abstractcore}/providers/ollama_provider.py +0 -0
  53. {abstractllm → abstractcore}/providers/openai_provider.py +0 -0
  54. {abstractllm → abstractcore}/server/__init__.py +0 -0
  55. {abstractllm → abstractcore}/server/app.py +0 -0
  56. {abstractllm → abstractcore}/structured/handler.py +0 -0
  57. {abstractllm → abstractcore}/structured/retry.py +0 -0
  58. {abstractllm → abstractcore}/tools/core.py +0 -0
  59. {abstractllm → abstractcore}/tools/handler.py +0 -0
  60. {abstractllm → abstractcore}/tools/parser.py +0 -0
  61. {abstractllm → abstractcore}/tools/registry.py +0 -0
  62. {abstractllm → abstractcore}/tools/syntax_rewriter.py +0 -0
  63. {abstractllm → abstractcore}/tools/tag_rewriter.py +0 -0
  64. {abstractllm → abstractcore}/utils/self_fixes.py +0 -0
  65. {abstractcore-2.3.8.dist-info → abstractcore-2.4.0.dist-info}/WHEEL +0 -0
  66. {abstractcore-2.3.8.dist-info → abstractcore-2.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """
3
- AbstractLLM - Unified interface to all LLM providers with essential infrastructure.
3
+ AbstractCore - Unified interface to all LLM providers with essential infrastructure.
4
4
 
5
5
  Key Features:
6
6
  • Multi-provider support (OpenAI, Anthropic, Ollama, HuggingFace, MLX, LMStudio)
@@ -11,7 +11,7 @@ Key Features:
11
11
  • Event system for observability
12
12
 
13
13
  Quick Start:
14
- from abstractllm import create_llm
14
+ from abstractcore import create_llm
15
15
 
16
16
  # Unified token management across all providers
17
17
  llm = create_llm(
@@ -0,0 +1 @@
1
+ # AbstractCore CLI Applications
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Apps - Command-line interface launcher
3
+ AbstractCore Apps - Command-line interface launcher
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps <app_name> [options]
6
+ python -m abstractcore.apps <app_name> [options]
7
7
 
8
8
  Available apps:
9
9
  summarizer - Document summarization tool
@@ -11,10 +11,10 @@ Available apps:
11
11
  judge - Text evaluation and scoring tool
12
12
 
13
13
  Examples:
14
- python -m abstractllm.apps summarizer document.txt
15
- python -m abstractllm.apps extractor report.txt --format json-ld
16
- python -m abstractllm.apps judge essay.txt --criteria clarity,accuracy
17
- python -m abstractllm.apps <app> --help
14
+ python -m abstractcore.apps summarizer document.txt
15
+ python -m abstractcore.apps extractor report.txt --format json-ld
16
+ python -m abstractcore.apps judge essay.txt --criteria clarity,accuracy
17
+ python -m abstractcore.apps <app> --help
18
18
  """
19
19
 
20
20
  import sys
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Entity Extractor CLI Application
3
+ AbstractCore Entity Extractor CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.extractor <file_path> [options]
6
+ python -m abstractcore.apps.extractor <file_path> [options]
7
7
 
8
8
  Options:
9
9
  --focus <focus> Specific focus area for extraction (e.g., "technology", "business", "medical")
@@ -27,12 +27,12 @@ Options:
27
27
  --help Show this help message
28
28
 
29
29
  Examples:
30
- python -m abstractllm.apps.extractor document.pdf
31
- python -m abstractllm.apps.extractor report.txt --focus technology --style structured --verbose
32
- python -m abstractllm.apps.extractor data.md --entity-types person,organization --output kg.jsonld
33
- python -m abstractllm.apps.extractor large.txt --fast --minified --verbose # Fast, compact output
34
- python -m abstractllm.apps.extractor report.txt --length detailed --provider openai --model gpt-4o-mini
35
- python -m abstractllm.apps.extractor doc.txt --iterate 3 --verbose # 3 refinement passes for higher quality
30
+ python -m abstractcore.apps.extractor document.pdf
31
+ python -m abstractcore.apps.extractor report.txt --focus technology --style structured --verbose
32
+ python -m abstractcore.apps.extractor data.md --entity-types person,organization --output kg.jsonld
33
+ python -m abstractcore.apps.extractor large.txt --fast --minified --verbose # Fast, compact output
34
+ python -m abstractcore.apps.extractor report.txt --length detailed --provider openai --model gpt-4o-mini
35
+ python -m abstractcore.apps.extractor doc.txt --iterate 3 --verbose # 3 refinement passes for higher quality
36
36
  """
37
37
 
38
38
  import argparse
@@ -52,6 +52,18 @@ from ..processing import BasicExtractor
52
52
  from ..core.factory import create_llm
53
53
 
54
54
 
55
+ def timeout_type(value):
56
+ """Parse timeout value - accepts None, 'none', or float"""
57
+ if value is None:
58
+ return None
59
+ if isinstance(value, str) and value.lower() == 'none':
60
+ return None
61
+ try:
62
+ return float(value)
63
+ except ValueError:
64
+ raise argparse.ArgumentTypeError(f"Invalid timeout value: {value}. Use 'none' for unlimited or a number in seconds.")
65
+
66
+
55
67
  def read_file_content(file_path: str) -> str:
56
68
  """
57
69
  Read content from various file types
@@ -143,17 +155,17 @@ def parse_extraction_length(length_str: Optional[str]) -> str:
143
155
  def main():
144
156
  """Main CLI function"""
145
157
  parser = argparse.ArgumentParser(
146
- description="AbstractLLM Entity & Relationship Extractor - Default: qwen3:4b-instruct-2507-q4_K_M (requires Ollama)",
158
+ description="AbstractCore Entity & Relationship Extractor - Default: qwen3:4b-instruct-2507-q4_K_M (requires Ollama)",
147
159
  formatter_class=argparse.RawDescriptionHelpFormatter,
148
160
  epilog="""
149
161
  Examples:
150
- python -m abstractllm.apps.extractor document.pdf
151
- python -m abstractllm.apps.extractor report.txt --focus=technology --style=structured --verbose
152
- python -m abstractllm.apps.extractor data.md --entity-types=person,organization --output=kg.jsonld
153
- python -m abstractllm.apps.extractor large.txt --length=detailed --fast --minified --verbose
154
- python -m abstractllm.apps.extractor doc.txt --iterate=3 --verbose # Iterative refinement for quality
155
- python -m abstractllm.apps.extractor doc.txt --format=triples --verbose # RDF triples output
156
- python -m abstractllm.apps.extractor doc.txt --format=triples --output=triples.txt # Simple triples
162
+ python -m abstractcore.apps.extractor document.pdf
163
+ python -m abstractcore.apps.extractor report.txt --focus=technology --style=structured --verbose
164
+ python -m abstractcore.apps.extractor data.md --entity-types=person,organization --output=kg.jsonld
165
+ python -m abstractcore.apps.extractor large.txt --length=detailed --fast --minified --verbose
166
+ python -m abstractcore.apps.extractor doc.txt --iterate=3 --verbose # Iterative refinement for quality
167
+ python -m abstractcore.apps.extractor doc.txt --format=triples --verbose # RDF triples output
168
+ python -m abstractcore.apps.extractor doc.txt --format=triples --output=triples.txt # Simple triples
157
169
 
158
170
  Supported file types: .txt, .md, .py, .js, .html, .json, .csv, and most text-based files
159
171
 
@@ -295,9 +307,9 @@ Default model setup:
295
307
 
296
308
  parser.add_argument(
297
309
  '--timeout',
298
- type=float,
299
- default=600.0,
300
- help='HTTP request timeout in seconds for LLM providers (default: 300, i.e., 5 minutes). Increase for large models like 36B+ parameters'
310
+ type=timeout_type,
311
+ default=None,
312
+ help='HTTP request timeout in seconds for LLM providers (default: unlimited). Use "none" for unlimited timeout or specify seconds (e.g., 600 for 10 minutes)'
301
313
  )
302
314
 
303
315
  parser.add_argument(
@@ -342,13 +354,7 @@ Default model setup:
342
354
  sys.exit(1)
343
355
 
344
356
  # Validate timeout parameter
345
- if args.timeout < 30.0:
346
- print("Error: Timeout must be at least 30 seconds")
347
- sys.exit(1)
348
357
 
349
- if args.timeout > 7200.0: # 2 hours
350
- print("Error: Timeout cannot exceed 7200 seconds (2 hours)")
351
- sys.exit(1)
352
358
 
353
359
  # Validate provider/model pair
354
360
  if args.provider and not args.model:
@@ -423,7 +429,8 @@ Default model setup:
423
429
  llm=llm,
424
430
  max_chunk_size=adjusted_chunk_size,
425
431
  max_tokens=args.max_tokens,
426
- max_output_tokens=args.max_output_tokens
432
+ max_output_tokens=args.max_output_tokens,
433
+ timeout=args.timeout
427
434
  )
428
435
  else:
429
436
  # Default configuration
@@ -434,7 +441,8 @@ Default model setup:
434
441
  extractor = BasicExtractor(
435
442
  max_chunk_size=args.chunk_size,
436
443
  max_tokens=args.max_tokens,
437
- max_output_tokens=args.max_output_tokens
444
+ max_output_tokens=args.max_output_tokens,
445
+ timeout=args.timeout
438
446
  )
439
447
  except RuntimeError as e:
440
448
  # Handle default model not available
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Basic Judge CLI Application
3
+ AbstractCore Basic Judge CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.judge <file_path_or_text> [file2] [file3] ... [options]
6
+ python -m abstractcore.apps.judge <file_path_or_text> [file2] [file3] ... [options]
7
7
 
8
8
  Options:
9
9
  --context <context> Evaluation context description (e.g., "code review", "documentation assessment")
@@ -25,18 +25,18 @@ Options:
25
25
 
26
26
  Examples:
27
27
  # Single file or text
28
- python -m abstractllm.apps.judge "This code is well-structured and solves the problem efficiently."
29
- python -m abstractllm.apps.judge document.py --context "code review" --criteria clarity,soundness,effectiveness
28
+ python -m abstractcore.apps.judge "This code is well-structured and solves the problem efficiently."
29
+ python -m abstractcore.apps.judge document.py --context "code review" --criteria clarity,soundness,effectiveness
30
30
 
31
31
  # Multiple files (evaluated sequentially to avoid context overflow)
32
- python -m abstractllm.apps.judge file1.py file2.py file3.py --context "code review" --output assessments.json
33
- python -m abstractllm.apps.judge *.py --context "Python code review" --format plain
34
- python -m abstractllm.apps.judge docs/*.md --context "documentation review" --criteria clarity,completeness
32
+ python -m abstractcore.apps.judge file1.py file2.py file3.py --context "code review" --output assessments.json
33
+ python -m abstractcore.apps.judge *.py --context "Python code review" --format plain
34
+ python -m abstractcore.apps.judge docs/*.md --context "documentation review" --criteria clarity,completeness
35
35
 
36
36
  # Other options
37
- python -m abstractllm.apps.judge proposal.md --focus "technical accuracy,completeness,examples" --output assessment.json
38
- python -m abstractllm.apps.judge content.txt --reference ideal_solution.txt --format plain --verbose
39
- python -m abstractllm.apps.judge text.md --provider openai --model gpt-4o-mini --temperature 0.05
37
+ python -m abstractcore.apps.judge proposal.md --focus "technical accuracy,completeness,examples" --output assessment.json
38
+ python -m abstractcore.apps.judge content.txt --reference ideal_solution.txt --format plain --verbose
39
+ python -m abstractcore.apps.judge text.md --provider openai --model gpt-4o-mini --temperature 0.05
40
40
  """
41
41
 
42
42
  import argparse
@@ -250,22 +250,22 @@ def format_assessment_plain(assessment: dict) -> str:
250
250
  def main():
251
251
  """Main CLI function"""
252
252
  parser = argparse.ArgumentParser(
253
- description="AbstractLLM Basic Judge - LLM-as-a-judge for objective evaluation (Default: qwen3:4b-instruct-2507-q4_K_M)",
253
+ description="AbstractCore Basic Judge - LLM-as-a-judge for objective evaluation (Default: qwen3:4b-instruct-2507-q4_K_M)",
254
254
  formatter_class=argparse.RawDescriptionHelpFormatter,
255
255
  epilog="""
256
256
  Examples:
257
257
  # Single file or text
258
- python -m abstractllm.apps.judge "This code is well-structured."
259
- python -m abstractllm.apps.judge document.py --context "code review" --criteria clarity,soundness
260
- python -m abstractllm.apps.judge proposal.md --focus "technical accuracy,examples" --output assessment.json
258
+ python -m abstractcore.apps.judge "This code is well-structured."
259
+ python -m abstractcore.apps.judge document.py --context "code review" --criteria clarity,soundness
260
+ python -m abstractcore.apps.judge proposal.md --focus "technical accuracy,examples" --output assessment.json
261
261
 
262
262
  # Multiple files (evaluated sequentially)
263
- python -m abstractllm.apps.judge file1.py file2.py file3.py --context "code review" --format json
264
- python -m abstractllm.apps.judge docs/*.md --context "documentation review" --format plain
263
+ python -m abstractcore.apps.judge file1.py file2.py file3.py --context "code review" --format json
264
+ python -m abstractcore.apps.judge docs/*.md --context "documentation review" --format plain
265
265
 
266
266
  # Other options
267
- python -m abstractllm.apps.judge content.txt --reference ideal.txt --format plain --verbose
268
- python -m abstractllm.apps.judge text.md --provider openai --model gpt-4o-mini
267
+ python -m abstractcore.apps.judge content.txt --reference ideal.txt --format plain --verbose
268
+ python -m abstractcore.apps.judge text.md --provider openai --model gpt-4o-mini
269
269
 
270
270
  Available criteria:
271
271
  clarity, simplicity, actionability, soundness, innovation, effectiveness,
@@ -383,9 +383,8 @@ Default model setup:
383
383
 
384
384
  parser.add_argument(
385
385
  '--timeout',
386
- type=float,
387
- default=300.0,
388
- help='HTTP request timeout in seconds for LLM providers (default: 300)'
386
+ default=None,
387
+ help='HTTP request timeout in seconds for LLM providers (default: None = infininity)'
389
388
  )
390
389
 
391
390
  # Parse arguments
@@ -398,9 +397,6 @@ Default model setup:
398
397
  sys.exit(1)
399
398
 
400
399
  # Validate timeout
401
- if args.timeout < 30.0:
402
- print("Error: Timeout must be at least 30 seconds")
403
- sys.exit(1)
404
400
 
405
401
  # Validate provider/model pair
406
402
  if args.provider and not args.model:
@@ -1,9 +1,9 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- AbstractLLM Summarizer CLI Application
3
+ AbstractCore Summarizer CLI Application
4
4
 
5
5
  Usage:
6
- python -m abstractllm.apps.summarizer <file_path> [options]
6
+ python -m abstractcore.apps.summarizer <file_path> [options]
7
7
 
8
8
  Options:
9
9
  --style <style> Summary style (structured, narrative, objective, analytical, executive, conversational)
@@ -19,10 +19,10 @@ Options:
19
19
  --help Show this help message
20
20
 
21
21
  Examples:
22
- python -m abstractllm.apps.summarizer document.pdf
23
- python -m abstractllm.apps.summarizer report.txt --style executive --length brief --verbose
24
- python -m abstractllm.apps.summarizer data.md --focus "technical details" --output summary.txt
25
- python -m abstractllm.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
22
+ python -m abstractcore.apps.summarizer document.pdf
23
+ python -m abstractcore.apps.summarizer report.txt --style executive --length brief --verbose
24
+ python -m abstractcore.apps.summarizer data.md --focus "technical details" --output summary.txt
25
+ python -m abstractcore.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
26
26
  """
27
27
 
28
28
  import argparse
@@ -156,14 +156,14 @@ def format_summary_output(result) -> str:
156
156
  def main():
157
157
  """Main CLI function"""
158
158
  parser = argparse.ArgumentParser(
159
- description="AbstractLLM Document Summarizer - Default: gemma3:1b-it-qat (requires Ollama)",
159
+ description="AbstractCore Document Summarizer - Default: gemma3:1b-it-qat (requires Ollama)",
160
160
  formatter_class=argparse.RawDescriptionHelpFormatter,
161
161
  epilog="""
162
162
  Examples:
163
- python -m abstractllm.apps.summarizer document.pdf
164
- python -m abstractllm.apps.summarizer report.txt --style executive --length brief --verbose
165
- python -m abstractllm.apps.summarizer data.md --focus "technical details" --output summary.txt
166
- python -m abstractllm.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
163
+ python -m abstractcore.apps.summarizer document.pdf
164
+ python -m abstractcore.apps.summarizer report.txt --style executive --length brief --verbose
165
+ python -m abstractcore.apps.summarizer data.md --focus "technical details" --output summary.txt
166
+ python -m abstractcore.apps.summarizer large.txt --chunk-size 15000 --provider openai --model gpt-4o-mini
167
167
 
168
168
  Supported file types: .txt, .md, .py, .js, .html, .json, .csv, and most text-based files
169
169
 
@@ -240,6 +240,12 @@ Default model setup:
240
240
  help='Show detailed progress information'
241
241
  )
242
242
 
243
+ parser.add_argument(
244
+ '--timeout',
245
+ default=None,
246
+ help='HTTP request timeout in seconds for LLM providers (default: None = unlimited)'
247
+ )
248
+
243
249
  # Parse arguments
244
250
  args = parser.parse_args()
245
251
 
@@ -286,12 +292,13 @@ Default model setup:
286
292
  if args.verbose:
287
293
  print(f"Initializing summarizer ({args.provider}, {args.model}, {args.max_tokens} token context, {args.max_output_tokens} output tokens)...")
288
294
 
289
- llm = create_llm(args.provider, model=args.model, max_tokens=args.max_tokens, max_output_tokens=args.max_output_tokens)
295
+ llm = create_llm(args.provider, model=args.model, max_tokens=args.max_tokens, max_output_tokens=args.max_output_tokens, timeout=args.timeout)
290
296
  summarizer = BasicSummarizer(
291
297
  llm,
292
298
  max_chunk_size=args.chunk_size,
293
299
  max_tokens=args.max_tokens,
294
- max_output_tokens=args.max_output_tokens
300
+ max_output_tokens=args.max_output_tokens,
301
+ timeout=args.timeout
295
302
  )
296
303
  else:
297
304
  # Default configuration with chunk size override
@@ -302,12 +309,13 @@ Default model setup:
302
309
  print(f"Initializing summarizer (ollama, gemma3:1b-it-qat, {args.max_tokens} token context, {args.max_output_tokens} output tokens, {args.chunk_size} chunk size)...")
303
310
 
304
311
  try:
305
- llm = create_llm("ollama", model="gemma3:1b-it-qat", max_tokens=args.max_tokens, max_output_tokens=args.max_output_tokens)
312
+ llm = create_llm("ollama", model="gemma3:1b-it-qat", max_tokens=args.max_tokens, max_output_tokens=args.max_output_tokens, timeout=args.timeout)
306
313
  summarizer = BasicSummarizer(
307
314
  llm,
308
315
  max_chunk_size=args.chunk_size,
309
316
  max_tokens=args.max_tokens,
310
- max_output_tokens=args.max_output_tokens
317
+ max_output_tokens=args.max_output_tokens,
318
+ timeout=args.timeout
311
319
  )
312
320
  except Exception as e:
313
321
  # Handle default model not available
@@ -327,7 +335,8 @@ Default model setup:
327
335
  summarizer = BasicSummarizer(
328
336
  max_chunk_size=args.chunk_size,
329
337
  max_tokens=args.max_tokens,
330
- max_output_tokens=args.max_output_tokens
338
+ max_output_tokens=args.max_output_tokens,
339
+ timeout=args.timeout
331
340
  )
332
341
  except RuntimeError as e:
333
342
  # Handle default model not available
@@ -12,7 +12,7 @@ from .factory import create_llm
12
12
  from .session import BasicSession
13
13
  from .types import GenerateResponse, Message
14
14
  from .enums import ModelParameter, ModelCapability, MessageRole
15
- from .interface import AbstractLLMInterface
15
+ from .interface import AbstractCoreInterface
16
16
 
17
17
  __all__ = [
18
18
  'create_llm',
@@ -22,5 +22,5 @@ __all__ = [
22
22
  'ModelParameter',
23
23
  'ModelCapability',
24
24
  'MessageRole',
25
- 'AbstractLLMInterface'
25
+ 'AbstractCoreInterface'
26
26
  ]
@@ -1,5 +1,5 @@
1
1
  """
2
- Enums for AbstractLLM.
2
+ Enums for AbstractCore.
3
3
  """
4
4
 
5
5
  from enum import Enum
@@ -3,11 +3,11 @@ Factory for creating LLM providers.
3
3
  """
4
4
 
5
5
  from typing import Optional
6
- from .interface import AbstractLLMInterface
6
+ from .interface import AbstractCoreInterface
7
7
  from ..exceptions import ModelNotFoundError, AuthenticationError, ProviderAPIError
8
8
 
9
9
 
10
- def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> AbstractLLMInterface:
10
+ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> AbstractCoreInterface:
11
11
  """
12
12
  Create an LLM provider instance with unified token parameter support.
13
13
 
@@ -16,7 +16,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
16
16
  model: Model name (optional, will use provider default)
17
17
  **kwargs: Additional configuration including token parameters
18
18
 
19
- Token Parameters (AbstractLLM Unified Standard):
19
+ Token Parameters (AbstractCore Unified Standard):
20
20
  max_tokens: Total context window budget (input + output combined)
21
21
  max_output_tokens: Maximum tokens reserved for generation (default: 2048)
22
22
  max_input_tokens: Maximum tokens for input (auto-calculated if not specified)
@@ -75,7 +75,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
75
75
  from ..providers.openai_provider import OpenAIProvider
76
76
  return OpenAIProvider(model=model or "gpt-5-nano-2025-08-07", **kwargs)
77
77
  except ImportError:
78
- raise ImportError("OpenAI dependencies not installed. Install with: pip install abstractllm[openai]")
78
+ raise ImportError("OpenAI dependencies not installed. Install with: pip install abstractcore[openai]")
79
79
  except (ModelNotFoundError, AuthenticationError, ProviderAPIError) as e:
80
80
  # Re-raise provider exceptions cleanly
81
81
  raise e
@@ -85,7 +85,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
85
85
  from ..providers.anthropic_provider import AnthropicProvider
86
86
  return AnthropicProvider(model=model or "claude-3-5-haiku-latest", **kwargs)
87
87
  except ImportError:
88
- raise ImportError("Anthropic dependencies not installed. Install with: pip install abstractllm[anthropic]")
88
+ raise ImportError("Anthropic dependencies not installed. Install with: pip install abstractcore[anthropic]")
89
89
  except (ModelNotFoundError, AuthenticationError, ProviderAPIError) as e:
90
90
  # Re-raise provider exceptions cleanly
91
91
  raise e
@@ -95,21 +95,21 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
95
95
  from ..providers.ollama_provider import OllamaProvider
96
96
  return OllamaProvider(model=model or "qwen3-coder:30b", **kwargs)
97
97
  except ImportError:
98
- raise ImportError("Ollama dependencies not installed. Install with: pip install abstractllm[ollama]")
98
+ raise ImportError("Ollama dependencies not installed. Install with: pip install abstractcore[ollama]")
99
99
 
100
100
  elif provider.lower() == "huggingface":
101
101
  try:
102
102
  from ..providers.huggingface_provider import HuggingFaceProvider
103
103
  return HuggingFaceProvider(model=model or "Qwen/Qwen3-4B/", **kwargs)
104
104
  except ImportError:
105
- raise ImportError("HuggingFace dependencies not installed. Install with: pip install abstractllm[huggingface]")
105
+ raise ImportError("HuggingFace dependencies not installed. Install with: pip install abstractcore[huggingface]")
106
106
 
107
107
  elif provider.lower() == "mlx":
108
108
  try:
109
109
  from ..providers.mlx_provider import MLXProvider
110
110
  return MLXProvider(model=model or "mlx-community/Qwen3-4B", **kwargs)
111
111
  except ImportError:
112
- raise ImportError("MLX dependencies not installed. Install with: pip install abstractllm[mlx]")
112
+ raise ImportError("MLX dependencies not installed. Install with: pip install abstractcore[mlx]")
113
113
 
114
114
  elif provider.lower() == "lmstudio":
115
115
  try:
@@ -7,11 +7,11 @@ from typing import List, Dict, Any, Optional, Union, Iterator
7
7
  from .types import GenerateResponse, Message
8
8
 
9
9
 
10
- class AbstractLLMInterface(ABC):
10
+ class AbstractCoreInterface(ABC):
11
11
  """
12
12
  Abstract base class for all LLM providers.
13
13
 
14
- AbstractLLM Token Parameter Vocabulary (Unified Standard):
14
+ AbstractCore Token Parameter Vocabulary (Unified Standard):
15
15
  =========================================================
16
16
 
17
17
  • max_tokens: Total context window budget (input + output combined) - YOUR BUDGET
@@ -57,7 +57,7 @@ class AbstractLLMInterface(ABC):
57
57
 
58
58
  Provider Abstraction:
59
59
  ===================
60
- AbstractLLM handles provider-specific parameter mapping internally:
60
+ AbstractCore handles provider-specific parameter mapping internally:
61
61
  • OpenAI: max_tokens → max_completion_tokens (o1 models) or max_tokens (others)
62
62
  • Anthropic: max_output_tokens → max_tokens (output-focused API)
63
63
  • Google: max_output_tokens → max_output_tokens (direct mapping)
@@ -1,5 +1,5 @@
1
1
  """
2
- Production-ready retry strategies for AbstractLLM Core.
2
+ Production-ready retry strategies for AbstractCore.
3
3
 
4
4
  Implements SOTA exponential backoff with jitter and circuit breaker patterns
5
5
  based on 2025 best practices from AWS Architecture Blog, Tenacity principles,
@@ -10,7 +10,7 @@ import json
10
10
  import uuid
11
11
  from collections.abc import Generator
12
12
 
13
- from .interface import AbstractLLMInterface
13
+ from .interface import AbstractCoreInterface
14
14
  from .types import GenerateResponse, Message
15
15
  from .enums import MessageRole
16
16
 
@@ -25,7 +25,7 @@ class BasicSession:
25
25
  """
26
26
 
27
27
  def __init__(self,
28
- provider: Optional[AbstractLLMInterface] = None,
28
+ provider: Optional[AbstractCoreInterface] = None,
29
29
  system_prompt: Optional[str] = None,
30
30
  tools: Optional[List[Callable]] = None,
31
31
  timeout: Optional[float] = None,
@@ -255,7 +255,7 @@ class BasicSession:
255
255
  json.dump(data, f, indent=2)
256
256
 
257
257
  @classmethod
258
- def load(cls, filepath: Union[str, Path], provider: Optional[AbstractLLMInterface] = None,
258
+ def load(cls, filepath: Union[str, Path], provider: Optional[AbstractCoreInterface] = None,
259
259
  tools: Optional[List[Callable]] = None) -> 'BasicSession':
260
260
  """
261
261
  Load session from file with complete metadata restoration.
@@ -325,7 +325,7 @@ class BasicSession:
325
325
  }
326
326
 
327
327
  @classmethod
328
- def from_dict(cls, data: Dict[str, Any], provider: Optional[AbstractLLMInterface] = None,
328
+ def from_dict(cls, data: Dict[str, Any], provider: Optional[AbstractCoreInterface] = None,
329
329
  tools: Optional[List[Callable]] = None) -> 'BasicSession':
330
330
  """
331
331
  Create session from dictionary data (supports both new archive format and legacy format).
@@ -429,7 +429,7 @@ class BasicSession:
429
429
  def compact(self,
430
430
  preserve_recent: int = 6,
431
431
  focus: Optional[str] = None,
432
- compact_provider: Optional[AbstractLLMInterface] = None,
432
+ compact_provider: Optional[AbstractCoreInterface] = None,
433
433
  reason: str = "manual") -> 'BasicSession':
434
434
  """
435
435
  Compact chat history using SOTA 2025 best practices for conversation summarization.
@@ -675,7 +675,7 @@ class BasicSession:
675
675
  print(f"✅ Session compacted: {len(compacted.messages)} messages, ~{compacted.get_token_estimate()} tokens")
676
676
 
677
677
  def generate_summary(self, preserve_recent: int = 6, focus: Optional[str] = None,
678
- compact_provider: Optional[AbstractLLMInterface] = None) -> Dict[str, Any]:
678
+ compact_provider: Optional[AbstractCoreInterface] = None) -> Dict[str, Any]:
679
679
  """
680
680
  Generate a summary of the entire conversation and store it in session.summary.
681
681
 
@@ -1,5 +1,5 @@
1
1
  """
2
- Core types for AbstractLLM.
2
+ Core types for AbstractCore.
3
3
  """
4
4
 
5
5
  from typing import Optional, Dict, List, Any
@@ -1,5 +1,5 @@
1
1
  """
2
- Vector Embeddings for AbstractLLM Core
2
+ Vector Embeddings for AbstractCore
3
3
  =====================================
4
4
 
5
5
  Provides efficient text embedding with SOTA open-source models.
@@ -44,7 +44,7 @@ def _suppress_onnx_warnings():
44
44
  This suppresses the CoreML and node assignment warnings commonly seen on macOS.
45
45
  These warnings are informational only and don't impact performance or quality.
46
46
 
47
- To enable verbose ONNX logging for debugging, set: ABSTRACTLLM_ONNX_VERBOSE=1
47
+ To enable verbose ONNX logging for debugging, set: ABSTRACTCORE_ONNX_VERBOSE=1
48
48
  """
49
49
  with warnings.catch_warnings():
50
50
  # Suppress PyTorch ONNX registration warnings (harmless in PyTorch 2.8+)
@@ -73,8 +73,8 @@ def _suppress_onnx_warnings():
73
73
  import os
74
74
 
75
75
  # Allow users to enable verbose ONNX logging for debugging
76
- # Set ABSTRACTLLM_ONNX_VERBOSE=1 to see ONNX warnings for debugging
77
- if os.environ.get("ABSTRACTLLM_ONNX_VERBOSE", "0") != "1":
76
+ # Set ABSTRACTCORE_ONNX_VERBOSE=1 to see ONNX warnings for debugging
77
+ if os.environ.get("ABSTRACTCORE_ONNX_VERBOSE", "0") != "1":
78
78
  # Suppress the CoreML and node assignment warnings you may see on macOS
79
79
  # These are harmless informational messages that don't affect performance or quality:
80
80
  # - CoreML partitioning warnings: Normal behavior when model ops aren't all CoreML-compatible
@@ -132,7 +132,7 @@ class EmbeddingManager:
132
132
  model: Model identifier (HuggingFace model ID for HF provider, model name for others).
133
133
  provider: Embedding provider ('huggingface', 'ollama', 'lmstudio'). Defaults to 'huggingface'.
134
134
  backend: Inference backend for HuggingFace ('auto', 'pytorch', 'onnx', 'openvino')
135
- cache_dir: Directory for persistent cache. Defaults to ~/.abstractllm/embeddings
135
+ cache_dir: Directory for persistent cache. Defaults to ~/.abstractcore/embeddings
136
136
  cache_size: Maximum number of embeddings to cache in memory
137
137
  output_dims: Output dimensions for Matryoshka truncation (if supported by provider)
138
138
  trust_remote_code: Whether to trust remote code (HuggingFace only)
@@ -193,7 +193,7 @@ class EmbeddingManager:
193
193
  logger.info(f"Initialized LMStudio embedding provider with model: {model}")
194
194
 
195
195
  # Common setup for all providers
196
- self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / ".abstractllm" / "embeddings"
196
+ self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / ".abstractcore" / "embeddings"
197
197
  self.cache_dir.mkdir(parents=True, exist_ok=True)
198
198
  self.cache_size = cache_size
199
199
  self.output_dims = output_dims
@@ -1,5 +1,5 @@
1
1
  """
2
- Event system for AbstractLLM - OpenTelemetry compatible.
2
+ Event system for AbstractCore - OpenTelemetry compatible.
3
3
 
4
4
  This module provides a comprehensive event system for tracking LLM operations,
5
5
  including generation, tool calls, structured output, and performance metrics.
@@ -13,7 +13,7 @@ import json
13
13
  import logging
14
14
  from pydantic import BaseModel, Field
15
15
 
16
- from ..core.interface import AbstractLLMInterface
16
+ from ..core.interface import AbstractCoreInterface
17
17
  from ..core.factory import create_llm
18
18
  from ..structured.retry import FeedbackRetry
19
19
  from ..utils.structured_logging import get_logger
@@ -50,22 +50,24 @@ class BasicExtractor:
50
50
 
51
51
  def __init__(
52
52
  self,
53
- llm: Optional[AbstractLLMInterface] = None,
53
+ llm: Optional[AbstractCoreInterface] = None,
54
54
  max_chunk_size: int = 8000,
55
55
  max_tokens: int = 32000,
56
- max_output_tokens: int = 8000
56
+ max_output_tokens: int = 8000,
57
+ timeout: Optional[float] = None
57
58
  ):
58
59
  """Initialize the extractor
59
60
 
60
61
  Args:
61
- llm: AbstractLLM instance (any provider). If None, uses default Ollama model
62
+ llm: AbstractCore instance (any provider). If None, uses default Ollama model
62
63
  max_chunk_size: Maximum characters per chunk for long documents (default 8000)
63
64
  max_tokens: Maximum total tokens for LLM context (default 32000)
64
65
  max_output_tokens: Maximum tokens for LLM output generation (default 8000)
66
+ timeout: HTTP request timeout in seconds. None for unlimited timeout (default None)
65
67
  """
66
68
  if llm is None:
67
69
  try:
68
- self.llm = create_llm("ollama", model="qwen3:4b-instruct-2507-q4_K_M", max_tokens=max_tokens, max_output_tokens=max_output_tokens)
70
+ self.llm = create_llm("ollama", model="qwen3:4b-instruct-2507-q4_K_M", max_tokens=max_tokens, max_output_tokens=max_output_tokens, timeout=timeout)
69
71
  except Exception as e:
70
72
  error_msg = (
71
73
  f"❌ Failed to initialize default Ollama model 'qwen3:4b-instruct-2507-q4_K_M': {e}\n\n"
@@ -77,8 +79,8 @@ class BasicExtractor:
77
79
  " - qwen3-coder:30b (excellent for structured output, requires 32GB RAM)\n"
78
80
  " - gpt-oss:120b (highest quality, requires 120GB RAM)\n\n"
79
81
  "🔧 Alternatively, provide a custom LLM instance:\n"
80
- " from abstractllm import create_llm\n"
81
- " from abstractllm.processing import BasicExtractor\n"
82
+ " from abstractcore import create_llm\n"
83
+ " from abstractcore.processing import BasicExtractor\n"
82
84
  " \n"
83
85
  " llm = create_llm('openai', model='gpt-4o-mini', max_tokens=32000, max_output_tokens=8000)\n"
84
86
  " extractor = BasicExtractor(llm)"