abstractcore 2.6.0__py3-none-any.whl → 2.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -178,7 +178,7 @@ def format_assessment_plain(assessment: dict) -> str:
178
178
  lines.append(f"Overall Score: {assessment.get('overall_score', 0)}/5")
179
179
  lines.append("")
180
180
 
181
- # Individual scores
181
+ # Predefined criterion scores
182
182
  score_fields = [
183
183
  ('clarity_score', 'Clarity'),
184
184
  ('simplicity_score', 'Simplicity'),
@@ -191,13 +191,28 @@ def format_assessment_plain(assessment: dict) -> str:
191
191
  ('coherence_score', 'Coherence')
192
192
  ]
193
193
 
194
- lines.append("Individual Scores:")
195
- lines.append("-" * 20)
196
- for field, label in score_fields:
197
- score = assessment.get(field)
198
- if score is not None:
199
- lines.append(f"{label:15}: {score}/5")
200
- lines.append("")
194
+ # Check if any predefined scores exist
195
+ has_predefined_scores = any(assessment.get(field) is not None for field, _ in score_fields)
196
+
197
+ if has_predefined_scores:
198
+ lines.append("📋 Predefined Criterion Scores:")
199
+ lines.append("-" * 32)
200
+ for field, label in score_fields:
201
+ score = assessment.get(field)
202
+ if score is not None:
203
+ lines.append(f"{label:15}: {score}/5")
204
+ lines.append("")
205
+
206
+ # Custom criterion scores
207
+ custom_scores = assessment.get('custom_scores', {})
208
+ if custom_scores:
209
+ lines.append("🎯 Custom Criterion Scores:")
210
+ lines.append("-" * 28)
211
+ for criterion, score in custom_scores.items():
212
+ # Format criterion name nicely
213
+ criterion_display = criterion.replace('_', ' ').title()
214
+ lines.append(f"{criterion_display:30}: {score}/5")
215
+ lines.append("")
201
216
 
202
217
  # Strengths
203
218
  strengths = assessment.get('strengths', [])
@@ -7,4 +7,27 @@ Provides configuration management and command-line interface for AbstractCore.
7
7
  from .vision_config import handle_vision_commands, add_vision_arguments
8
8
  from .manager import get_config_manager
9
9
 
10
- __all__ = ['handle_vision_commands', 'add_vision_arguments', 'get_config_manager']
10
+
11
+ def configure_provider(provider: str, **kwargs) -> None:
12
+ """Configure runtime settings for a provider."""
13
+ get_config_manager().configure_provider(provider, **kwargs)
14
+
15
+
16
+ def get_provider_config(provider: str) -> dict:
17
+ """Get runtime configuration for a provider."""
18
+ return get_config_manager().get_provider_config(provider)
19
+
20
+
21
+ def clear_provider_config(provider: str = None) -> None:
22
+ """Clear runtime provider configuration."""
23
+ get_config_manager().clear_provider_config(provider)
24
+
25
+
26
+ __all__ = [
27
+ 'handle_vision_commands',
28
+ 'add_vision_arguments',
29
+ 'get_config_manager',
30
+ 'configure_provider',
31
+ 'get_provider_config',
32
+ 'clear_provider_config'
33
+ ]
@@ -136,6 +136,7 @@ class ConfigurationManager:
136
136
  self.config_dir = Path.home() / ".abstractcore" / "config"
137
137
  self.config_file = self.config_dir / "abstractcore.json"
138
138
  self.config = self._load_config()
139
+ self._provider_config: Dict[str, Dict[str, Any]] = {} # Runtime config (not persisted)
139
140
 
140
141
  def _load_config(self) -> AbstractCoreConfig:
141
142
  """Load configuration from file or create default."""
@@ -437,6 +438,52 @@ class ConfigurationManager:
437
438
  """Check if local_files_only should be forced for transformers."""
438
439
  return self.config.offline.force_local_files_only
439
440
 
441
+ def configure_provider(self, provider: str, **kwargs) -> None:
442
+ """
443
+ Configure runtime settings for a provider.
444
+
445
+ Args:
446
+ provider: Provider name ('ollama', 'lmstudio', 'openai', 'anthropic')
447
+ **kwargs: Configuration options (base_url, timeout, etc.)
448
+
449
+ Example:
450
+ configure_provider('ollama', base_url='http://192.168.1.100:11434')
451
+ """
452
+ provider = provider.lower()
453
+ if provider not in self._provider_config:
454
+ self._provider_config[provider] = {}
455
+
456
+ for key, value in kwargs.items():
457
+ if value is None:
458
+ # Remove config (revert to env var / default)
459
+ self._provider_config[provider].pop(key, None)
460
+ else:
461
+ self._provider_config[provider][key] = value
462
+
463
+ def get_provider_config(self, provider: str) -> Dict[str, Any]:
464
+ """
465
+ Get runtime configuration for a provider.
466
+
467
+ Args:
468
+ provider: Provider name
469
+
470
+ Returns:
471
+ Dict with configured settings, or empty dict if no config
472
+ """
473
+ return self._provider_config.get(provider.lower(), {}).copy()
474
+
475
+ def clear_provider_config(self, provider: Optional[str] = None) -> None:
476
+ """
477
+ Clear runtime provider configuration.
478
+
479
+ Args:
480
+ provider: Provider name, or None to clear all
481
+ """
482
+ if provider is None:
483
+ self._provider_config.clear()
484
+ else:
485
+ self._provider_config.pop(provider.lower(), None)
486
+
440
487
 
441
488
  # Global instance
442
489
  _config_manager = None
@@ -939,13 +939,18 @@ class BasicSession:
939
939
  summary_tokens = self._estimate_tokens_for_summary(summary_text)
940
940
  return original_tokens / summary_tokens if summary_tokens > 0 else 1.0
941
941
 
942
- def generate_assessment(self, criteria: Optional[Dict[str, bool]] = None) -> Dict[str, Any]:
942
+ def generate_assessment(
943
+ self,
944
+ criteria: Optional[Dict[str, bool]] = None,
945
+ custom_criteria: Optional[Dict[str, str]] = None
946
+ ) -> Dict[str, Any]:
943
947
  """
944
948
  Generate a quality assessment of the entire conversation and store it in session.assessment.
945
-
949
+
946
950
  Args:
947
- criteria: Optional criteria for assessment
948
-
951
+ criteria: Optional predefined criteria toggles (e.g., {"clarity": True, "coherence": False})
952
+ custom_criteria: Optional custom domain-specific criteria with descriptions (e.g., {"logical_coherence": "Are results logically consistent?"})
953
+
949
954
  Returns:
950
955
  Dict containing the generated assessment
951
956
  """
@@ -989,13 +994,27 @@ class BasicSession:
989
994
  assessment_result = judge.evaluate(
990
995
  content=conversation_text,
991
996
  context="conversation quality assessment",
992
- criteria=judge_criteria
997
+ criteria=judge_criteria,
998
+ custom_criteria=custom_criteria
993
999
  )
994
1000
 
995
1001
  # Store assessment in session
996
1002
  self.assessment = {
997
1003
  "created_at": start_time.isoformat(),
998
1004
  "criteria": criteria,
1005
+ "custom_criteria": custom_criteria,
1006
+ "scores": {
1007
+ "clarity": assessment_result.get('clarity_score'),
1008
+ "simplicity": assessment_result.get('simplicity_score'),
1009
+ "actionability": assessment_result.get('actionability_score'),
1010
+ "soundness": assessment_result.get('soundness_score'),
1011
+ "innovation": assessment_result.get('innovation_score'),
1012
+ "effectiveness": assessment_result.get('effectiveness_score'),
1013
+ "relevance": assessment_result.get('relevance_score'),
1014
+ "completeness": assessment_result.get('completeness_score'),
1015
+ "coherence": assessment_result.get('coherence_score'),
1016
+ },
1017
+ "custom_scores": assessment_result.get('custom_scores', {}),
999
1018
  "overall_score": assessment_result.get('overall_score', 0),
1000
1019
  "judge_summary": assessment_result.get('judge_summary', ''),
1001
1020
  "strengths": assessment_result.get('strengths', []),
@@ -9,11 +9,11 @@ Features:
9
9
  - Clear, simple and actionable feedback
10
10
  """
11
11
 
12
- from typing import Optional, List, Dict, Any, Union
12
+ from typing import Optional, List, Dict, Any, Union, Type
13
13
  import json
14
14
  import logging
15
15
  from pathlib import Path
16
- from pydantic import BaseModel, Field
16
+ from pydantic import BaseModel, Field, create_model
17
17
 
18
18
  from ..core.interface import AbstractCoreInterface
19
19
  from ..core.factory import create_llm
@@ -44,7 +44,7 @@ class Assessment(BaseModel):
44
44
  judge_summary: str = Field(..., description="Judge's experiential note summarizing the assessment task and key findings")
45
45
  source_reference: str = Field(..., description="Reference to what was assessed (file, content type, context)")
46
46
 
47
- # Individual criterion scores
47
+ # Individual criterion scores (predefined criteria)
48
48
  clarity_score: Optional[int] = Field(None, description="Clarity score (1-5)")
49
49
  simplicity_score: Optional[int] = Field(None, description="Simplicity score (1-5)")
50
50
  actionability_score: Optional[int] = Field(None, description="Actionability score (1-5)")
@@ -161,6 +161,30 @@ class BasicJudge:
161
161
 
162
162
  self.retry_strategy = FeedbackRetry(max_attempts=3)
163
163
 
164
+ def _create_dynamic_assessment_model(self, custom_criteria: Optional[Dict[str, str]]) -> Type[BaseModel]:
165
+ """Create a dynamic Assessment model with custom score fields"""
166
+ if not custom_criteria:
167
+ return Assessment
168
+
169
+ # Build fields dict for dynamic model creation
170
+ fields_dict = {}
171
+
172
+ # Add custom score fields dynamically as REQUIRED (not Optional)
173
+ # This forces the LLM to provide scores for all custom criteria
174
+ for criterion_name in custom_criteria.keys():
175
+ field_name = f"{criterion_name}_score"
176
+ # Make it required (int, not Optional[int]) with Field(...)
177
+ fields_dict[field_name] = (int, Field(..., description=f"{criterion_name} score (1-5)", ge=1, le=5))
178
+
179
+ # Create dynamic model that inherits from Assessment using create_model
180
+ DynamicAssessment = create_model(
181
+ 'DynamicAssessment',
182
+ __base__=Assessment,
183
+ **fields_dict
184
+ )
185
+
186
+ return DynamicAssessment
187
+
164
188
  def evaluate(
165
189
  self,
166
190
  content: str,
@@ -168,7 +192,8 @@ class BasicJudge:
168
192
  criteria: Optional[JudgmentCriteria] = None,
169
193
  focus: Optional[str] = None,
170
194
  reference: Optional[str] = None,
171
- include_criteria: bool = False
195
+ include_criteria: bool = False,
196
+ custom_criteria: Optional[Dict[str, str]] = None
172
197
  ) -> dict:
173
198
  """
174
199
  Evaluate content against specified criteria
@@ -180,6 +205,7 @@ class BasicJudge:
180
205
  focus: Specific areas to focus evaluation on (e.g., "technical accuracy, performance")
181
206
  reference: Optional reference/expected output for comparison
182
207
  include_criteria: Include detailed explanation of evaluation criteria in assessment
208
+ custom_criteria: Custom domain-specific criteria with descriptions (e.g., {"logical_coherence": "Are results logically consistent?"})
183
209
 
184
210
  Returns:
185
211
  dict: Structured assessment result
@@ -196,13 +222,16 @@ class BasicJudge:
196
222
  logger.info("Starting evaluation", context=context)
197
223
 
198
224
  # Build the evaluation prompt
199
- prompt = self._build_evaluation_prompt(content, context, criteria, focus, reference, include_criteria)
225
+ prompt = self._build_evaluation_prompt(content, context, criteria, focus, reference, include_criteria, custom_criteria)
226
+
227
+ # Create dynamic assessment model with custom score fields
228
+ AssessmentModel = self._create_dynamic_assessment_model(custom_criteria)
200
229
 
201
230
  # Generate structured assessment
202
231
  try:
203
232
  result = self.llm.generate(
204
233
  prompt,
205
- response_model=Assessment,
234
+ response_model=AssessmentModel,
206
235
  retry_strategy=self.retry_strategy
207
236
  )
208
237
 
@@ -216,6 +245,19 @@ class BasicJudge:
216
245
  # Convert to dict and add metadata
217
246
  assessment_dict = result.dict() if hasattr(result, 'dict') else result
218
247
 
248
+ # Extract custom scores from individual fields and add to custom_scores dict
249
+ if custom_criteria:
250
+ custom_scores = {}
251
+ for criterion_name in custom_criteria.keys():
252
+ field_name = f"{criterion_name}_score"
253
+ if field_name in assessment_dict:
254
+ score_value = assessment_dict.pop(field_name) # Remove individual field
255
+ if score_value is not None:
256
+ custom_scores[criterion_name] = score_value
257
+ assessment_dict['custom_scores'] = custom_scores
258
+ else:
259
+ assessment_dict['custom_scores'] = {}
260
+
219
261
  # Log results
220
262
  overall_score = assessment_dict.get('overall_score', 0)
221
263
  logger.info("Evaluation completed", overall_score=overall_score, max_score=5)
@@ -247,7 +289,8 @@ class BasicJudge:
247
289
  reference: Optional[str] = None,
248
290
  include_criteria: bool = False,
249
291
  max_file_size: int = 1000000, # 1MB default limit per file
250
- exclude_global: bool = False # Include global assessment by default
292
+ exclude_global: bool = False, # Include global assessment by default
293
+ custom_criteria: Optional[Dict[str, str]] = None
251
294
  ) -> Union[dict, List[dict]]:
252
295
  """
253
296
  Evaluate content from one or multiple files sequentially to avoid context overflow
@@ -261,6 +304,7 @@ class BasicJudge:
261
304
  include_criteria: Include detailed explanation of evaluation criteria in assessment
262
305
  max_file_size: Maximum file size in bytes (default 1MB to avoid context overflow)
263
306
  exclude_global: If True, skip global assessment for multiple files (default False)
307
+ custom_criteria: Custom domain-specific criteria with descriptions (e.g., {"logical_coherence": "Are results logically consistent?"})
264
308
 
265
309
  Returns:
266
310
  dict: Single assessment if one file provided
@@ -360,7 +404,8 @@ class BasicJudge:
360
404
  criteria=criteria,
361
405
  focus=focus,
362
406
  reference=reference,
363
- include_criteria=include_criteria
407
+ include_criteria=include_criteria,
408
+ custom_criteria=custom_criteria
364
409
  )
365
410
 
366
411
  # Update source reference to include file name
@@ -382,7 +427,7 @@ class BasicJudge:
382
427
  # Generate global assessment and return structured result
383
428
  logger.info("Generating global assessment from individual file evaluations", file_count=len(assessments))
384
429
  global_assessment = self._generate_global_assessment(
385
- assessments, context, criteria, focus, include_criteria
430
+ assessments, context, criteria, focus, include_criteria, custom_criteria
386
431
  )
387
432
 
388
433
  return {
@@ -396,7 +441,8 @@ class BasicJudge:
396
441
  context: str,
397
442
  criteria: Optional[JudgmentCriteria],
398
443
  focus: Optional[str],
399
- include_criteria: bool
444
+ include_criteria: bool,
445
+ custom_criteria: Optional[Dict[str, str]] = None
400
446
  ) -> dict:
401
447
  """
402
448
  Generate a global assessment from multiple individual file assessments
@@ -475,7 +521,8 @@ Provide a comprehensive global assessment of overall quality and recommendations
475
521
  context=f"global assessment summary for {total_files} files ({context})",
476
522
  criteria=criteria,
477
523
  focus=focus,
478
- include_criteria=include_criteria
524
+ include_criteria=include_criteria,
525
+ custom_criteria=custom_criteria
479
526
  )
480
527
 
481
528
  # Update the source reference to indicate this is a global assessment
@@ -506,6 +553,19 @@ Provide a comprehensive global assessment of overall quality and recommendations
506
553
  "evaluation_criteria_details": None
507
554
  }
508
555
 
556
+ def _build_custom_scores_format(self, custom_criteria: Optional[Dict[str, str]]) -> str:
557
+ """Build custom score fields for the prompt (individual fields, not dict)"""
558
+ if not custom_criteria:
559
+ return ""
560
+
561
+ # Build individual score fields for each custom criterion
562
+ score_fields = []
563
+ for criterion_name in custom_criteria.keys():
564
+ field_name = f"{criterion_name}_score"
565
+ score_fields.append(f' "{field_name}": <1-5 integer>,')
566
+
567
+ return "\n" + "\n".join(score_fields)
568
+
509
569
  def _build_evaluation_prompt(
510
570
  self,
511
571
  content: str,
@@ -513,7 +573,8 @@ Provide a comprehensive global assessment of overall quality and recommendations
513
573
  criteria: JudgmentCriteria,
514
574
  focus: Optional[str],
515
575
  reference: Optional[str],
516
- include_criteria: bool = False
576
+ include_criteria: bool = False,
577
+ custom_criteria: Optional[Dict[str, str]] = None
517
578
  ) -> str:
518
579
  """Build the evaluation prompt with chain-of-thought reasoning"""
519
580
 
@@ -565,6 +626,12 @@ Provide a comprehensive global assessment of overall quality and recommendations
565
626
  active_criteria.append(focus_item)
566
627
  criteria_descriptions.append(f"- **{focus_item.title()}**: PRIMARY FOCUS AREA - This is a key evaluation target")
567
628
 
629
+ # Add custom criteria with their specific descriptions
630
+ if custom_criteria:
631
+ for name, description in custom_criteria.items():
632
+ active_criteria.append(name)
633
+ criteria_descriptions.append(f"- **{name.replace('_', ' ').title()}**: {description}")
634
+
568
635
  criteria_text = "\n".join(criteria_descriptions)
569
636
 
570
637
  # Build reference section if provided
@@ -613,12 +680,26 @@ SCORING RUBRIC (1-5 scale):
613
680
  - **Score 2**: Poor - Falls short of expectations with significant issues
614
681
  - **Score 1**: Very Poor - Fails to meet basic standards in this dimension
615
682
 
683
+ SCORING PRINCIPLES - CRITICAL:
684
+ - **Be rigorous and avoid grade inflation**: Most adequate responses should be scored 2-3, not 3-4
685
+ - **Context matters**: For routine tasks (e.g., basic arithmetic), criteria like "innovation" should be scored 1-2 unless truly creative
686
+ - **If a criterion doesn't meaningfully apply to the task**, score it 1-2, not 3 (e.g., innovation for standard formula application = 1)
687
+ - **Reserve 4-5 for genuinely excellent work**: Don't give high scores by default
688
+ - **Apply task-appropriate expectations**:
689
+ * Routine calculations: innovation 1-2, soundness 4-5 (if correct)
690
+ * Creative explanations: innovation 3-4 if novel approach shown
691
+ * Complex problem-solving: innovation 4-5 if breakthrough thinking demonstrated
692
+ - **Be appropriately critical**: Question whether the response truly meets each criterion
693
+
616
694
  EVALUATION PROCESS:
617
695
  1. **STEP 1**: Carefully analyze the content for each active criterion
618
- 2. **STEP 2**: Identify specific strengths and weaknesses
619
- 3. **STEP 3**: Provide actionable recommendations for improvement
620
- 4. **STEP 4**: Assign scores based on the rubric (be fair but appropriately critical)
621
- 5. **STEP 5**: Calculate overall score - PRIMARY FOCUS AREAS should heavily influence the final score
696
+ 2. **STEP 2**: Assess if each criterion meaningfully applies to this task (if not, score 1-2)
697
+ 3. **STEP 3**: Identify specific strengths and weaknesses
698
+ 4. **STEP 4**: Provide actionable recommendations for improvement
699
+ 5. **STEP 5**: Assign scores based on the rubric (be rigorous and appropriately critical)
700
+ - For standard criteria: populate the corresponding _score fields (e.g., clarity_score, soundness_score)
701
+ - For custom criteria: populate the custom_scores object with scores for EACH custom criterion listed in EVALUATION CRITERIA
702
+ 6. **STEP 6**: Calculate overall score - PRIMARY FOCUS AREAS should heavily influence the final score
622
703
 
623
704
  CRITICAL ASSESSMENT PRINCIPLES:
624
705
  - Be objective and evidence-based in your evaluation
@@ -628,6 +709,11 @@ CRITICAL ASSESSMENT PRINCIPLES:
628
709
  - Ensure recommendations are specific and implementable
629
710
  - PRIMARY FOCUS AREAS are the most important evaluation targets - weaknesses in these areas should significantly impact the overall score
630
711
 
712
+ IMPORTANT - SCORING REQUIREMENTS:
713
+ - You MUST provide individual scores (1-5) for EVERY criterion in the custom_scores object if custom criteria are present
714
+ - Do NOT leave custom_scores as an empty object {{}} - populate it with scores for each custom criterion
715
+ - Each custom criterion listed in EVALUATION CRITERIA must have a corresponding score in custom_scores
716
+
631
717
  RESPONSE FORMAT:
632
718
  Provide your assessment as a structured JSON response with the following format:
633
719
 
@@ -643,7 +729,7 @@ Provide your assessment as a structured JSON response with the following format:
643
729
  "effectiveness_score": <1-5 integer or null if not evaluated>,
644
730
  "relevance_score": <1-5 integer or null if not evaluated>,
645
731
  "completeness_score": <1-5 integer or null if not evaluated>,
646
- "coherence_score": <1-5 integer or null if not evaluated>,
732
+ "coherence_score": <1-5 integer or null if not evaluated>,{self._build_custom_scores_format(custom_criteria)}
647
733
  "strengths": ["list of specific strengths identified"],
648
734
  "weaknesses": ["list of specific areas for improvement"],
649
735
  "actionable_feedback": ["list of specific actionable recommendations"],
@@ -2,6 +2,7 @@
2
2
  LM Studio provider implementation (OpenAI-compatible API).
3
3
  """
4
4
 
5
+ import os
5
6
  import httpx
6
7
  import json
7
8
  import time
@@ -23,14 +24,19 @@ from ..events import EventType
23
24
  class LMStudioProvider(BaseProvider):
24
25
  """LM Studio provider using OpenAI-compatible API"""
25
26
 
26
- def __init__(self, model: str = "local-model", base_url: str = "http://localhost:1234/v1", **kwargs):
27
+ def __init__(self, model: str = "local-model", base_url: Optional[str] = None, **kwargs):
27
28
  super().__init__(model, **kwargs)
28
29
  self.provider = "lmstudio"
29
30
 
30
31
  # Initialize tool handler
31
32
  self.tool_handler = UniversalToolHandler(model)
32
33
 
33
- self.base_url = base_url.rstrip('/')
34
+ # Base URL priority: parameter > LMSTUDIO_BASE_URL > default
35
+ self.base_url = (
36
+ base_url or
37
+ os.getenv("LMSTUDIO_BASE_URL") or
38
+ "http://localhost:1234/v1"
39
+ ).rstrip('/')
34
40
 
35
41
  # Get timeout value - None means unlimited timeout
36
42
  timeout_value = getattr(self, '_timeout', None)
@@ -3,6 +3,7 @@ Ollama provider implementation.
3
3
  """
4
4
 
5
5
  import json
6
+ import os
6
7
  import httpx
7
8
  import time
8
9
  from typing import List, Dict, Any, Optional, Union, Iterator, AsyncIterator, Type
@@ -23,11 +24,17 @@ from ..events import EventType
23
24
  class OllamaProvider(BaseProvider):
24
25
  """Ollama provider for local models with full integration"""
25
26
 
26
- def __init__(self, model: str = "qwen3:4b-instruct-2507-q4_K_M", base_url: str = "http://localhost:11434", **kwargs):
27
+ def __init__(self, model: str = "qwen3:4b-instruct-2507-q4_K_M", base_url: Optional[str] = None, **kwargs):
27
28
  super().__init__(model, **kwargs)
28
29
  self.provider = "ollama"
29
30
 
30
- self.base_url = base_url.rstrip('/')
31
+ # Base URL priority: parameter > OLLAMA_BASE_URL > OLLAMA_HOST > default
32
+ self.base_url = (
33
+ base_url or
34
+ os.getenv("OLLAMA_BASE_URL") or
35
+ os.getenv("OLLAMA_HOST") or
36
+ "http://localhost:11434"
37
+ ).rstrip('/')
31
38
  self.client = httpx.Client(timeout=self._timeout)
32
39
  self._async_client = None # Lazy-loaded async client
33
40
 
@@ -354,6 +354,8 @@ class ProviderRegistry:
354
354
 
355
355
  This is used by the factory to create provider instances.
356
356
  """
357
+ from ..config import get_provider_config
358
+
357
359
  provider_info = self.get_provider_info(provider_name)
358
360
  if not provider_info:
359
361
  available_providers = ", ".join(self.list_provider_names())
@@ -362,8 +364,14 @@ class ProviderRegistry:
362
364
  provider_class = self.get_provider_class(provider_name)
363
365
  model = model or provider_info.default_model
364
366
 
367
+ # Get runtime config for this provider
368
+ runtime_config = get_provider_config(provider_name)
369
+
370
+ # Merge: runtime_config < kwargs (user kwargs take precedence)
371
+ merged_kwargs = {**runtime_config, **kwargs}
372
+
365
373
  try:
366
- return provider_class(model=model, **kwargs)
374
+ return provider_class(model=model, **merged_kwargs)
367
375
  except ImportError as e:
368
376
  # Re-raise import errors with helpful message
369
377
  if provider_info.installation_extras:
@@ -11,4 +11,4 @@ including when the package is installed from PyPI where pyproject.toml is not av
11
11
 
12
12
  # Package version - update this when releasing new versions
13
13
  # This must be manually synchronized with the version in pyproject.toml
14
- __version__ = "2.6.0"
14
+ __version__ = "2.6.3"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstractcore
3
- Version: 2.6.0
3
+ Version: 2.6.3
4
4
  Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
5
5
  Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
6
6
  Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
@@ -795,6 +795,66 @@ summarizer document.pdf --provider anthropic --model claude-3-5-sonnet
795
795
 
796
796
  **Complete guide**: [Centralized Configuration](docs/centralized-config.md)
797
797
 
798
+ ### Environment Variables
799
+
800
+ AbstractCore supports environment variables for provider base URLs, enabling remote servers, Docker deployments, and non-standard ports:
801
+
802
+ ```bash
803
+ # Ollama on remote server
804
+ export OLLAMA_BASE_URL="http://192.168.1.100:11434"
805
+ # Alternative: OLLAMA_HOST is also supported
806
+ export OLLAMA_HOST="http://192.168.1.100:11434"
807
+
808
+ # LMStudio on non-standard port
809
+ export LMSTUDIO_BASE_URL="http://localhost:1235/v1"
810
+
811
+ # OpenAI-compatible proxy
812
+ export OPENAI_BASE_URL="https://api.portkey.ai/v1"
813
+
814
+ # Anthropic proxy
815
+ export ANTHROPIC_BASE_URL="https://api.portkey.ai/v1"
816
+ ```
817
+
818
+ **Priority**: Programmatic `base_url` parameter > Runtime configuration > Environment variable > Default value
819
+
820
+ **Provider discovery**: `get_all_providers_with_models()` automatically respects these environment variables when checking provider availability.
821
+
822
+ ### Programmatic Configuration
823
+
824
+ Configure provider settings at runtime without environment variables:
825
+
826
+ ```python
827
+ from abstractcore.config import configure_provider, get_provider_config, clear_provider_config
828
+ from abstractcore import create_llm
829
+
830
+ # Set provider base URL programmatically
831
+ configure_provider('ollama', base_url='http://192.168.1.100:11434')
832
+
833
+ # All future create_llm() calls automatically use the configured URL
834
+ llm = create_llm('ollama', model='llama3:8b') # Uses http://192.168.1.100:11434
835
+
836
+ # Query current configuration
837
+ config = get_provider_config('ollama')
838
+ print(config) # {'base_url': 'http://192.168.1.100:11434'}
839
+
840
+ # Clear configuration (revert to env var / default)
841
+ configure_provider('ollama', base_url=None)
842
+ # Or clear all providers
843
+ clear_provider_config()
844
+ ```
845
+
846
+ **Use Cases**:
847
+ - **Web UI Settings**: Configure providers through settings pages
848
+ - **Docker Startup**: Read from custom env vars and configure programmatically
849
+ - **Testing**: Set mock server URLs for integration tests
850
+ - **Multi-tenant**: Configure different base URLs per tenant
851
+
852
+ **Priority System**:
853
+ 1. Constructor parameter (highest): `create_llm("ollama", base_url="...")`
854
+ 2. Runtime configuration: `configure_provider('ollama', base_url="...")`
855
+ 3. Environment variable: `OLLAMA_BASE_URL`
856
+ 4. Default value (lowest): `http://localhost:11434`
857
+
798
858
  ## Documentation
799
859
 
800
860
  **📚 Complete Documentation:** [docs/](docs/) - Full documentation index and navigation guide
@@ -6,7 +6,7 @@ abstractcore/apps/app_config_utils.py,sha256=5GIvXnD996LFIV3-BpfkqII6UqYlStm7ZCg
6
6
  abstractcore/apps/deepsearch.py,sha256=UlmuBS9T4yNsz0V_iY08GNNDTstsI5OJNNV6c8CU6AE,23191
7
7
  abstractcore/apps/extractor.py,sha256=OfiqB9l_alH9xCGb6zOD__QJkDjdKOlLZngriVgmn7c,23749
8
8
  abstractcore/apps/intent.py,sha256=5ie_H9_K_ZxlA0oCu7ROUrsgwfzDNFgVUyBNec6YVRE,22813
9
- abstractcore/apps/judge.py,sha256=nOgxvn-BbhNY6xU9AlTeD1yidTh73AiVlSN7hQCVE2M,23169
9
+ abstractcore/apps/judge.py,sha256=ZoBRGYjM24TrDALwV7MMDO4Cg2pGPtwRMXX5WyFhdVs,23840
10
10
  abstractcore/apps/summarizer.py,sha256=9aD6KH21w-tv_wGp9MaO2uyJuaU71OemW7KpqrG5t6w,14669
11
11
  abstractcore/architectures/__init__.py,sha256=-4JucAM7JkMWShWKkePoclxrUHRKgaG36UTguJihE0U,1046
12
12
  abstractcore/architectures/detection.py,sha256=jmpD04xcKotWCW7--jadBzCtD2a5dYJi1zljpxB9JmU,19813
@@ -26,16 +26,16 @@ abstractcore/compression/pil_text_renderer.py,sha256=IxVs5ZFXIJeXIQdS1hDQKJC6O2l
26
26
  abstractcore/compression/quality.py,sha256=bq7YI_5ywHfPnWSu1MmBRnV5Nxz8KBJGFvnfQOJSx5c,9415
27
27
  abstractcore/compression/text_formatter.py,sha256=5RE6JrLkHvYoDQlsYJSoqfbwAa3caMr2i_DXog6ovZs,27328
28
28
  abstractcore/compression/vision_compressor.py,sha256=5Ox3w_ee7fgPRDOpSQcooEGtuBKpQoAmWjwpLE2hoNU,12773
29
- abstractcore/config/__init__.py,sha256=4mHX5z5Sq8R8xh78tb9jjZLaz_oBNW1eh914OsdDTxs,318
29
+ abstractcore/config/__init__.py,sha256=JQ4feacJV_brzf6qNnNPo5VbmXhdjIxH088jYLTp1ik,919
30
30
  abstractcore/config/main.py,sha256=oQxnNxo_78CusCuDKGgwal2T3S8MDpo3yzLSB1wpYkU,35573
31
- abstractcore/config/manager.py,sha256=JTN_qoNqRGKQoPH66nVGn3PIjgt-eSgsG9BndYIC8Dg,16752
31
+ abstractcore/config/manager.py,sha256=O_zuskt4qe9bzaJ2dVoodYWJ3zbakaDu5ocbuGZuCoo,18388
32
32
  abstractcore/config/vision_config.py,sha256=jJzO4zBexh8SqSKp6YKOXdMDSv4AL4Ztl5Xi-5c4KyY,17869
33
33
  abstractcore/core/__init__.py,sha256=2h-86U4QkCQ4gzZ4iRusSTMlkODiUS6tKjZHiEXz6rM,684
34
34
  abstractcore/core/enums.py,sha256=BhkVnHC-X1_377JDmqd-2mnem9GdBLqixWlYzlP_FJU,695
35
35
  abstractcore/core/factory.py,sha256=ec7WGW2JKK-dhDplziTAeRkebEUFymtEEZ_bS5qkpqY,2798
36
36
  abstractcore/core/interface.py,sha256=-VAY0nlsTnWN_WghiuMC7iE7xUdZfYOg6KlgrAPi14Y,14086
37
37
  abstractcore/core/retry.py,sha256=xP38rabBqJImZ-yg60L5mKeg80ATvxmLG5Yp6lCeTpk,14566
38
- abstractcore/core/session.py,sha256=n9StBlMhSlETlEqQ401PpM8lK0W2ycCP4Zwrywl4Mhs,46147
38
+ abstractcore/core/session.py,sha256=pgiwwgfpgBovwqJ0RkWRsS5TbroTvQEB0jkZnzdhhCY,47278
39
39
  abstractcore/core/types.py,sha256=jj44i07kMjdg9FQ3mA_fK6r_M0Lcgt1RQpy1Ra5w-eI,4578
40
40
  abstractcore/embeddings/__init__.py,sha256=hR3xZyqcRm4c2pq1dIa5lxj_-Bk70Zad802JQN4joWo,637
41
41
  abstractcore/embeddings/manager.py,sha256=bisyQJquM1HLQor8ZAfO9V_XWWHw0b4PjyAz9g7sS-4,52273
@@ -65,18 +65,18 @@ abstractcore/processing/__init__.py,sha256=QcACEnhnHKYCkFL1LNOW_uqBrwkTAmz5A61N4
65
65
  abstractcore/processing/basic_deepsearch.py,sha256=dzJQtH4k44XY9tvG0Z4JIlYt_s7HpbLdSPScha-t7vk,101036
66
66
  abstractcore/processing/basic_extractor.py,sha256=3x-3BdIHgLvqLnLF6K1-P4qVaLIpAnNIIutaJi7lDQM,49832
67
67
  abstractcore/processing/basic_intent.py,sha256=wD99Z7fE2RiYk6oyTZXojUbv-bz8HhKFIuIHYLLTw54,32455
68
- abstractcore/processing/basic_judge.py,sha256=tKWJrg_tY4vCHzWgXxz0ZjgLXBYYfpMcpG7vl03hJcM,32218
68
+ abstractcore/processing/basic_judge.py,sha256=L1fc9H0-_88B1TULL-mlaNL7OydMgp-ru_zzzoGdr38,37220
69
69
  abstractcore/processing/basic_summarizer.py,sha256=XHNxMQ_8aLStTeUo6_2JaThlct12Htpz7ORmm0iuJsg,25495
70
70
  abstractcore/providers/__init__.py,sha256=O7gmT4p_jbzMjoZPhi_6RIMHQm-IMFX1XfcgySz3DSQ,1729
71
71
  abstractcore/providers/anthropic_provider.py,sha256=0-qZb0Es6-VLuVVl2j7IUjOuyRlgjQdJFulWfpi4qb4,31740
72
72
  abstractcore/providers/base.py,sha256=nWF1pxeUlT4ozlUqKG0rWOmLkfo-zQgfU7fv3AUSI08,68452
73
73
  abstractcore/providers/huggingface_provider.py,sha256=v4UUmODrnWKtTygzPh-lm4jSCAPms5VYJE5v7PWB4Lo,79458
74
- abstractcore/providers/lmstudio_provider.py,sha256=e53EF1kyIK4rMRrHZqeE-RfFbxap05iQYWOq_jjxJSk,33935
74
+ abstractcore/providers/lmstudio_provider.py,sha256=92_vx7AVVt_oufJdHo3R0D_V2qyTKO2DKzi9-l4KzWs,34114
75
75
  abstractcore/providers/mlx_provider.py,sha256=afLCEwuw7r8OK4fD3OriyKMcWpxVIob_37ItmgAclfc,23123
76
76
  abstractcore/providers/model_capabilities.py,sha256=C4HIgvNTp9iIPiDeWyXo7vdzRkMdecRPoQi80yHSOL0,11955
77
- abstractcore/providers/ollama_provider.py,sha256=FoofSLtevTGxsjjiQw9Q7OWBjp4RJ8JHrFVefPnHyoA,33915
77
+ abstractcore/providers/ollama_provider.py,sha256=Kg5au_tia0xFTXqUlqDNrSvwVpt2lXvfnVFou9K2FGQ,34144
78
78
  abstractcore/providers/openai_provider.py,sha256=Y-79mAtgDiDw6SqF2LhnWtlfuC_e6TxeF_tqJWAAyWo,36364
79
- abstractcore/providers/registry.py,sha256=KG7qjP76Z5t6k5ZsmqoDEbe3A39RJhhvExKPvSKMEms,19442
79
+ abstractcore/providers/registry.py,sha256=z0FVaucJ6KmE6QAIkskH56jV-7hodWgj_G-u5_bcgp0,19732
80
80
  abstractcore/providers/streaming.py,sha256=HaGkoItPWXqgml3C-KiPc0hBNpztLzjl_ooECw11BHI,31370
81
81
  abstractcore/server/__init__.py,sha256=1DSAz_YhQtnKv7sNi5TMQV8GFujctDOabgvAdilQE0o,249
82
82
  abstractcore/server/app.py,sha256=ajG4yfMOHjqBafquLHeLTaMmEr01RXiBRxjFRTIT2j8,96602
@@ -98,11 +98,11 @@ abstractcore/utils/self_fixes.py,sha256=1VYxPq-q7_DtNl39NbrzUmyHpkhb9Q2SdnXUj4c0
98
98
  abstractcore/utils/structured_logging.py,sha256=Vm-HviSa42G9DJCWmaEv4a0QG3NMsADD3ictLOs4En0,19952
99
99
  abstractcore/utils/token_utils.py,sha256=eLwFmJ68p9WMFD_MHLMmeJRW6Oqx_4hKELB8FNQ2Mnk,21097
100
100
  abstractcore/utils/trace_export.py,sha256=MD1DHDWltpewy62cYzz_OSPAA6edZbZq7_pZbvxz_H8,9279
101
- abstractcore/utils/version.py,sha256=8FvB_bU--hAlMcCZyeDF2i2VlS9p4sYej4VBQOVtRR4,605
101
+ abstractcore/utils/version.py,sha256=MLDRpkxfuzOdSqP9RhPQPGU3g3_lGxn36uZBllC5LY8,605
102
102
  abstractcore/utils/vlm_token_calculator.py,sha256=VBmIji_oiqOQ13IvVhNkb8E246tYMIXWVVOnl86Ne94,27978
103
- abstractcore-2.6.0.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
104
- abstractcore-2.6.0.dist-info/METADATA,sha256=YuVAqs9CI7NP8wjIsa0W2sXLWpNLOC5JYj_VEqJL2N8,41304
105
- abstractcore-2.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
- abstractcore-2.6.0.dist-info/entry_points.txt,sha256=jXNdzeltVs23A2JM2e2HOiAHldHrsnud3EvPI5VffOs,658
107
- abstractcore-2.6.0.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
108
- abstractcore-2.6.0.dist-info/RECORD,,
103
+ abstractcore-2.6.3.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
104
+ abstractcore-2.6.3.dist-info/METADATA,sha256=FSrM7xWLPCdW9mtHmMDB8alRirG1oJ4ieCswyDhZXdk,43479
105
+ abstractcore-2.6.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
+ abstractcore-2.6.3.dist-info/entry_points.txt,sha256=jXNdzeltVs23A2JM2e2HOiAHldHrsnud3EvPI5VffOs,658
107
+ abstractcore-2.6.3.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
108
+ abstractcore-2.6.3.dist-info/RECORD,,