abstractcore 2.4.4__py3-none-any.whl → 2.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/cli/__init__.py +9 -0
- abstractcore/cli/main.py +759 -0
- abstractcore/cli/vision_config.py +491 -0
- abstractcore/core/interface.py +7 -0
- abstractcore/core/session.py +27 -2
- abstractcore/media/handlers/__init__.py +16 -0
- abstractcore/media/handlers/anthropic_handler.py +326 -0
- abstractcore/media/handlers/local_handler.py +541 -0
- abstractcore/media/handlers/openai_handler.py +281 -0
- abstractcore/media/processors/__init__.py +13 -0
- abstractcore/media/processors/image_processor.py +610 -0
- abstractcore/media/processors/office_processor.py +490 -0
- abstractcore/media/processors/pdf_processor.py +485 -0
- abstractcore/media/processors/text_processor.py +557 -0
- abstractcore/media/utils/__init__.py +22 -0
- abstractcore/media/utils/image_scaler.py +306 -0
- abstractcore/providers/anthropic_provider.py +14 -2
- abstractcore/providers/base.py +24 -0
- abstractcore/providers/huggingface_provider.py +23 -9
- abstractcore/providers/lmstudio_provider.py +6 -1
- abstractcore/providers/mlx_provider.py +20 -7
- abstractcore/providers/ollama_provider.py +6 -1
- abstractcore/providers/openai_provider.py +6 -2
- abstractcore/tools/common_tools.py +651 -1
- abstractcore/utils/version.py +1 -1
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.6.dist-info}/METADATA +59 -9
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.6.dist-info}/RECORD +31 -17
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.6.dist-info}/entry_points.txt +2 -0
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.6.dist-info}/WHEEL +0 -0
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.6.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.6.dist-info}/top_level.txt +0 -0
abstractcore/cli/main.py
ADDED
|
@@ -0,0 +1,759 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
AbstractCore CLI - Unified Configuration System
|
|
4
|
+
|
|
5
|
+
Provides configuration commands for all AbstractCore settings:
|
|
6
|
+
- Default models and providers
|
|
7
|
+
- Vision fallback configuration
|
|
8
|
+
- Embeddings settings
|
|
9
|
+
- API keys and authentication
|
|
10
|
+
- Provider preferences
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
# General configuration
|
|
14
|
+
abstractcore --set-default-model ollama/llama3:8b
|
|
15
|
+
abstractcore --set-default-provider ollama
|
|
16
|
+
abstractcore --status
|
|
17
|
+
abstractcore --configure
|
|
18
|
+
|
|
19
|
+
# Vision configuration
|
|
20
|
+
abstractcore --set-vision-caption qwen2.5vl:7b
|
|
21
|
+
abstractcore --set-vision-provider ollama --model qwen2.5vl:7b
|
|
22
|
+
|
|
23
|
+
# Embeddings configuration
|
|
24
|
+
abstractcore --set-embeddings-model sentence-transformers/all-MiniLM-L6-v2
|
|
25
|
+
abstractcore --set-embeddings-provider huggingface
|
|
26
|
+
|
|
27
|
+
# API keys
|
|
28
|
+
abstractcore --set-api-key openai sk-...
|
|
29
|
+
abstractcore --set-api-key anthropic ant_...
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
import sys
|
|
33
|
+
import argparse
|
|
34
|
+
import logging
|
|
35
|
+
from pathlib import Path
|
|
36
|
+
from typing import List, Optional
|
|
37
|
+
|
|
38
|
+
# Add parent directory to path for imports
|
|
39
|
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
|
40
|
+
|
|
41
|
+
from abstractcore.config import get_config_manager
|
|
42
|
+
|
|
43
|
+
def download_vision_model(model_name: str = "blip-base-caption") -> bool:
|
|
44
|
+
"""Download a vision model for local use."""
|
|
45
|
+
AVAILABLE_MODELS = {
|
|
46
|
+
"blip-base-caption": {
|
|
47
|
+
"hf_id": "Salesforce/blip-image-captioning-base",
|
|
48
|
+
"size": "990MB",
|
|
49
|
+
"description": "BLIP base image captioning model"
|
|
50
|
+
},
|
|
51
|
+
"blip-large-caption": {
|
|
52
|
+
"hf_id": "Salesforce/blip-image-captioning-large",
|
|
53
|
+
"size": "1.8GB",
|
|
54
|
+
"description": "BLIP large image captioning model (better quality)"
|
|
55
|
+
},
|
|
56
|
+
"vit-gpt2": {
|
|
57
|
+
"hf_id": "nlpconnect/vit-gpt2-image-captioning",
|
|
58
|
+
"size": "500MB",
|
|
59
|
+
"description": "ViT + GPT-2 image captioning model (CPU friendly)"
|
|
60
|
+
},
|
|
61
|
+
"git-base": {
|
|
62
|
+
"hf_id": "microsoft/git-base",
|
|
63
|
+
"size": "400MB",
|
|
64
|
+
"description": "Microsoft GIT base captioning model (smallest)"
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if model_name not in AVAILABLE_MODELS:
|
|
69
|
+
print(f"❌ Unknown model: {model_name}")
|
|
70
|
+
print(f"Available models: {', '.join(AVAILABLE_MODELS.keys())}")
|
|
71
|
+
return False
|
|
72
|
+
|
|
73
|
+
model_info = AVAILABLE_MODELS[model_name]
|
|
74
|
+
print(f"📋 Model: {model_info['description']} ({model_info['size']})")
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
# Check if transformers is available
|
|
78
|
+
try:
|
|
79
|
+
import transformers
|
|
80
|
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
|
81
|
+
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
|
|
82
|
+
from transformers import GitProcessor, GitForCausalLM
|
|
83
|
+
except ImportError:
|
|
84
|
+
print("❌ Required libraries not found. Installing transformers...")
|
|
85
|
+
import subprocess
|
|
86
|
+
import sys
|
|
87
|
+
|
|
88
|
+
# Install transformers and dependencies
|
|
89
|
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "transformers", "torch", "torchvision", "Pillow"])
|
|
90
|
+
print("✅ Installed transformers and dependencies")
|
|
91
|
+
|
|
92
|
+
# Re-import after installation
|
|
93
|
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
|
94
|
+
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
|
|
95
|
+
from transformers import GitProcessor, GitForCausalLM
|
|
96
|
+
|
|
97
|
+
# Create models directory
|
|
98
|
+
from pathlib import Path
|
|
99
|
+
models_dir = Path.home() / ".abstractcore" / "models" / model_name
|
|
100
|
+
models_dir.mkdir(parents=True, exist_ok=True)
|
|
101
|
+
|
|
102
|
+
print(f"📁 Download path: {models_dir}")
|
|
103
|
+
print(f"🔄 Downloading {model_info['description']}...")
|
|
104
|
+
|
|
105
|
+
hf_id = model_info["hf_id"]
|
|
106
|
+
|
|
107
|
+
# Download based on model type
|
|
108
|
+
if "blip" in model_name:
|
|
109
|
+
print("📥 Downloading BLIP model and processor...")
|
|
110
|
+
processor = BlipProcessor.from_pretrained(hf_id, use_fast=False, cache_dir=str(models_dir))
|
|
111
|
+
model = BlipForConditionalGeneration.from_pretrained(hf_id, cache_dir=str(models_dir))
|
|
112
|
+
|
|
113
|
+
# Save to specific directory structure
|
|
114
|
+
processor.save_pretrained(models_dir / "processor")
|
|
115
|
+
model.save_pretrained(models_dir / "model")
|
|
116
|
+
|
|
117
|
+
elif "vit-gpt2" in model_name:
|
|
118
|
+
print("📥 Downloading ViT-GPT2 model...")
|
|
119
|
+
model = VisionEncoderDecoderModel.from_pretrained(hf_id, cache_dir=str(models_dir))
|
|
120
|
+
feature_extractor = ViTImageProcessor.from_pretrained(hf_id, cache_dir=str(models_dir))
|
|
121
|
+
tokenizer = AutoTokenizer.from_pretrained(hf_id, cache_dir=str(models_dir))
|
|
122
|
+
|
|
123
|
+
# Save components
|
|
124
|
+
model.save_pretrained(models_dir / "model")
|
|
125
|
+
feature_extractor.save_pretrained(models_dir / "feature_extractor")
|
|
126
|
+
tokenizer.save_pretrained(models_dir / "tokenizer")
|
|
127
|
+
|
|
128
|
+
elif "git" in model_name:
|
|
129
|
+
print("📥 Downloading GIT model...")
|
|
130
|
+
processor = GitProcessor.from_pretrained(hf_id, cache_dir=str(models_dir))
|
|
131
|
+
model = GitForCausalLM.from_pretrained(hf_id, cache_dir=str(models_dir))
|
|
132
|
+
|
|
133
|
+
processor.save_pretrained(models_dir / "processor")
|
|
134
|
+
model.save_pretrained(models_dir / "model")
|
|
135
|
+
|
|
136
|
+
# Create a marker file to indicate successful download
|
|
137
|
+
marker_file = models_dir / "download_complete.txt"
|
|
138
|
+
with open(marker_file, 'w') as f:
|
|
139
|
+
f.write(f"Model: {model_info['description']}\n")
|
|
140
|
+
f.write(f"HuggingFace ID: {hf_id}\n")
|
|
141
|
+
f.write(f"Downloaded: {Path(__file__).parent}\n")
|
|
142
|
+
|
|
143
|
+
print(f"✅ Successfully downloaded {model_info['description']}")
|
|
144
|
+
print(f"📁 Model saved to: {models_dir}")
|
|
145
|
+
|
|
146
|
+
# Configure AbstractCore to use this model
|
|
147
|
+
from abstractcore.config import get_config_manager
|
|
148
|
+
config_manager = get_config_manager()
|
|
149
|
+
# Use the proper HuggingFace model identifier
|
|
150
|
+
config_manager.set_vision_provider("huggingface", hf_id)
|
|
151
|
+
|
|
152
|
+
print(f"✅ Configured AbstractCore to use HuggingFace model: {hf_id}")
|
|
153
|
+
print(f"🎯 Vision fallback is now enabled!")
|
|
154
|
+
|
|
155
|
+
return True
|
|
156
|
+
|
|
157
|
+
except Exception as e:
|
|
158
|
+
print(f"❌ Download failed: {e}")
|
|
159
|
+
import traceback
|
|
160
|
+
traceback.print_exc()
|
|
161
|
+
return False
|
|
162
|
+
|
|
163
|
+
def add_arguments(parser: argparse.ArgumentParser):
|
|
164
|
+
"""Add all AbstractCore configuration arguments with organized groups."""
|
|
165
|
+
|
|
166
|
+
# General configuration group
|
|
167
|
+
general_group = parser.add_argument_group('General Configuration')
|
|
168
|
+
general_group.add_argument("--status", action="store_true",
|
|
169
|
+
help="Show current configuration status with change commands")
|
|
170
|
+
general_group.add_argument("--configure", action="store_true",
|
|
171
|
+
help="Interactive guided setup for first-time users")
|
|
172
|
+
general_group.add_argument("--reset", action="store_true",
|
|
173
|
+
help="Reset all configuration to built-in defaults")
|
|
174
|
+
|
|
175
|
+
# Model configuration group
|
|
176
|
+
model_group = parser.add_argument_group('Model Configuration')
|
|
177
|
+
model_group.add_argument("--set-global-default", metavar="PROVIDER/MODEL",
|
|
178
|
+
help="Set fallback model for all apps (e.g., ollama/llama3:8b)")
|
|
179
|
+
model_group.add_argument("--set-app-default", nargs=3, metavar=("APP", "PROVIDER", "MODEL"),
|
|
180
|
+
help="Set app-specific model (apps: cli, summarizer, extractor, judge)")
|
|
181
|
+
model_group.add_argument("--set-chat-model", metavar="PROVIDER/MODEL",
|
|
182
|
+
help="Set specialized chat model (optional)")
|
|
183
|
+
model_group.add_argument("--set-code-model", metavar="PROVIDER/MODEL",
|
|
184
|
+
help="Set specialized coding model (optional)")
|
|
185
|
+
|
|
186
|
+
# Authentication group
|
|
187
|
+
auth_group = parser.add_argument_group('Authentication')
|
|
188
|
+
auth_group.add_argument("--set-api-key", nargs=2, metavar=("PROVIDER", "KEY"),
|
|
189
|
+
help="Set API key for cloud providers (openai, anthropic, google, etc.)")
|
|
190
|
+
auth_group.add_argument("--list-api-keys", action="store_true",
|
|
191
|
+
help="Show which providers have API keys configured")
|
|
192
|
+
|
|
193
|
+
# Media processing group
|
|
194
|
+
media_group = parser.add_argument_group('Media & Vision Configuration')
|
|
195
|
+
media_group.add_argument("--set-vision-provider", nargs=2, metavar=("PROVIDER", "MODEL"),
|
|
196
|
+
help="Set vision model for image analysis with text-only models")
|
|
197
|
+
media_group.add_argument("--add-vision-fallback", nargs=2, metavar=("PROVIDER", "MODEL"),
|
|
198
|
+
help="Add backup vision provider to fallback chain")
|
|
199
|
+
media_group.add_argument("--download-vision-model", nargs="?", const="blip-base-caption", metavar="MODEL",
|
|
200
|
+
help="Download local vision model (default: blip-base-caption, ~1GB)")
|
|
201
|
+
media_group.add_argument("--disable-vision", action="store_true",
|
|
202
|
+
help="Disable vision fallback for text-only models")
|
|
203
|
+
|
|
204
|
+
# Embeddings group
|
|
205
|
+
embed_group = parser.add_argument_group('Embeddings Configuration')
|
|
206
|
+
embed_group.add_argument("--set-embeddings-model", metavar="MODEL",
|
|
207
|
+
help="Set model for semantic search (format: provider/model)")
|
|
208
|
+
embed_group.add_argument("--set-embeddings-provider", nargs="?", const=True, metavar="PROVIDER",
|
|
209
|
+
help="Set embeddings provider (huggingface, openai, etc.)")
|
|
210
|
+
|
|
211
|
+
# Legacy compatibility (hidden in advanced section)
|
|
212
|
+
legacy_group = parser.add_argument_group('Legacy Options')
|
|
213
|
+
legacy_group.add_argument("--set-default-model", metavar="MODEL",
|
|
214
|
+
help="Set global default model (use --set-global-default instead)")
|
|
215
|
+
legacy_group.add_argument("--set-default-provider", metavar="PROVIDER",
|
|
216
|
+
help="Set default provider only (use --set-global-default instead)")
|
|
217
|
+
legacy_group.add_argument("--set-vision-caption", metavar="MODEL",
|
|
218
|
+
help="DEPRECATED: Use --set-vision-provider instead")
|
|
219
|
+
|
|
220
|
+
# Storage and logging group
|
|
221
|
+
storage_group = parser.add_argument_group('Storage & Logging')
|
|
222
|
+
storage_group.add_argument("--set-default-cache-dir", metavar="PATH",
|
|
223
|
+
help="Set default cache directory for models and data")
|
|
224
|
+
storage_group.add_argument("--set-huggingface-cache-dir", metavar="PATH",
|
|
225
|
+
help="Set HuggingFace models cache directory")
|
|
226
|
+
storage_group.add_argument("--set-local-models-cache-dir", metavar="PATH",
|
|
227
|
+
help="Set local vision/embedding models cache directory")
|
|
228
|
+
storage_group.add_argument("--set-log-base-dir", metavar="PATH",
|
|
229
|
+
help="Set directory for log files")
|
|
230
|
+
|
|
231
|
+
# Logging control group
|
|
232
|
+
logging_group = parser.add_argument_group('Logging Control')
|
|
233
|
+
logging_group.add_argument("--set-console-log-level", metavar="LEVEL",
|
|
234
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "NONE"],
|
|
235
|
+
help="Set console logging level (default: WARNING)")
|
|
236
|
+
logging_group.add_argument("--set-file-log-level", metavar="LEVEL",
|
|
237
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "NONE"],
|
|
238
|
+
help="Set file logging level (default: DEBUG)")
|
|
239
|
+
logging_group.add_argument("--enable-debug-logging", action="store_true",
|
|
240
|
+
help="Enable debug logging for both console and file")
|
|
241
|
+
logging_group.add_argument("--disable-console-logging", action="store_true",
|
|
242
|
+
help="Disable all console logging output")
|
|
243
|
+
logging_group.add_argument("--enable-file-logging", action="store_true",
|
|
244
|
+
help="Enable saving logs to files")
|
|
245
|
+
logging_group.add_argument("--disable-file-logging", action="store_true",
|
|
246
|
+
help="Disable file logging")
|
|
247
|
+
|
|
248
|
+
# Streaming configuration group
|
|
249
|
+
streaming_group = parser.add_argument_group('Streaming Configuration')
|
|
250
|
+
streaming_group.add_argument("--stream", choices=["on", "off"],
|
|
251
|
+
help="Set default streaming behavior for CLI (on/off)")
|
|
252
|
+
streaming_group.add_argument("--enable-streaming", action="store_true",
|
|
253
|
+
help="Enable streaming by default for CLI")
|
|
254
|
+
streaming_group.add_argument("--disable-streaming", action="store_true",
|
|
255
|
+
help="Disable streaming by default for CLI")
|
|
256
|
+
|
|
257
|
+
def print_status():
|
|
258
|
+
"""Print comprehensive configuration status with improved readability."""
|
|
259
|
+
config_manager = get_config_manager()
|
|
260
|
+
status = config_manager.get_status()
|
|
261
|
+
|
|
262
|
+
# Header with clear context
|
|
263
|
+
print("📋 AbstractCore Default Configuration Status")
|
|
264
|
+
print(" (Explicit parameters in commands override these defaults)")
|
|
265
|
+
print("=" * 75)
|
|
266
|
+
|
|
267
|
+
# ESSENTIAL SECTION - What users care about most
|
|
268
|
+
print("\n┌─ ESSENTIAL CONFIGURATION")
|
|
269
|
+
print("│")
|
|
270
|
+
|
|
271
|
+
# App defaults with improved formatting
|
|
272
|
+
print("│ 🎯 Application Defaults")
|
|
273
|
+
app_defaults = status["app_defaults"]
|
|
274
|
+
|
|
275
|
+
apps = [
|
|
276
|
+
("CLI (utils)", app_defaults["cli"]),
|
|
277
|
+
("Summarizer", app_defaults["summarizer"]),
|
|
278
|
+
("Extractor", app_defaults["extractor"]),
|
|
279
|
+
("Judge", app_defaults["judge"])
|
|
280
|
+
]
|
|
281
|
+
|
|
282
|
+
for app_name, app_info in apps:
|
|
283
|
+
status_icon = "✅" if app_info["provider"] and app_info["model"] else "⚠️"
|
|
284
|
+
model_text = f"{app_info['provider']}/{app_info['model']}" if app_info["provider"] and app_info["model"] else "Using global fallback"
|
|
285
|
+
print(f"│ {status_icon} {app_name:<12} {model_text}")
|
|
286
|
+
|
|
287
|
+
# Global fallback
|
|
288
|
+
print("│")
|
|
289
|
+
print("│ 🌐 Global Fallback")
|
|
290
|
+
defaults = status["global_defaults"]
|
|
291
|
+
if defaults["provider"] and defaults["model"]:
|
|
292
|
+
print(f"│ ✅ Default {defaults['provider']}/{defaults['model']}")
|
|
293
|
+
else:
|
|
294
|
+
print(f"│ ⚠️ Default Using built-in default (huggingface/unsloth/Qwen3-4B-Instruct-2507-GGUF)")
|
|
295
|
+
|
|
296
|
+
# Show specialized models if set
|
|
297
|
+
chat_model = defaults['chat_model']
|
|
298
|
+
code_model = defaults['code_model']
|
|
299
|
+
if chat_model or code_model:
|
|
300
|
+
print("│ ┌─ Specialized Models")
|
|
301
|
+
if chat_model:
|
|
302
|
+
print(f"│ │ 💬 Chat {chat_model}")
|
|
303
|
+
if code_model:
|
|
304
|
+
print(f"│ │ 💻 Code {code_model}")
|
|
305
|
+
|
|
306
|
+
# API Keys status (simplified)
|
|
307
|
+
print("│")
|
|
308
|
+
print("│ 🔑 Provider Access")
|
|
309
|
+
api_keys = status["api_keys"]
|
|
310
|
+
configured_keys = [provider for provider, status_text in api_keys.items() if "✅" in status_text]
|
|
311
|
+
missing_keys = [provider for provider, status_text in api_keys.items() if "❌" in status_text]
|
|
312
|
+
|
|
313
|
+
if configured_keys:
|
|
314
|
+
print(f"│ ✅ Configured {', '.join(configured_keys)}")
|
|
315
|
+
if missing_keys:
|
|
316
|
+
print(f"│ ⚠️ Missing keys {', '.join(missing_keys)}")
|
|
317
|
+
|
|
318
|
+
print("└─")
|
|
319
|
+
|
|
320
|
+
# SECONDARY SECTION - Important but less frequently changed
|
|
321
|
+
print("\n┌─ SECONDARY CONFIGURATION")
|
|
322
|
+
print("│")
|
|
323
|
+
|
|
324
|
+
# Vision with user-friendly descriptions
|
|
325
|
+
print("│ 👁️ Media Processing")
|
|
326
|
+
vision = status["vision"]
|
|
327
|
+
strategy_desc = {
|
|
328
|
+
"two_stage": "Smart captioning for text-only models",
|
|
329
|
+
"disabled": "Media processing disabled",
|
|
330
|
+
"basic_metadata": "Basic metadata extraction only"
|
|
331
|
+
}
|
|
332
|
+
vision_status = "✅ Ready" if "✅" in vision['status'] else "⚠️ Not configured"
|
|
333
|
+
strategy_text = strategy_desc.get(vision['strategy'], vision['strategy'])
|
|
334
|
+
print(f"│ {vision_status:<12} {strategy_text}")
|
|
335
|
+
if vision["caption_provider"] and vision["caption_model"]:
|
|
336
|
+
print(f"│ 📷 Vision Model {vision['caption_provider']}/{vision['caption_model']}")
|
|
337
|
+
|
|
338
|
+
# Embeddings
|
|
339
|
+
print("│")
|
|
340
|
+
print("│ 🔗 Embeddings")
|
|
341
|
+
embeddings = status["embeddings"]
|
|
342
|
+
emb_status = "✅ Ready" if "✅" in embeddings['status'] else "⚠️ Not configured"
|
|
343
|
+
print(f"│ {emb_status:<12} {embeddings['provider']}/{embeddings['model']}")
|
|
344
|
+
|
|
345
|
+
# Streaming configuration
|
|
346
|
+
print("│")
|
|
347
|
+
print("│ 🌊 Streaming")
|
|
348
|
+
streaming = status["streaming"]
|
|
349
|
+
stream_status = "✅ Enabled" if streaming['cli_stream_default'] else "⚠️ Disabled"
|
|
350
|
+
stream_desc = "Real-time response display by default" if streaming['cli_stream_default'] else "Complete response display by default"
|
|
351
|
+
print(f"│ {stream_status:<12} {stream_desc}")
|
|
352
|
+
|
|
353
|
+
print("└─")
|
|
354
|
+
|
|
355
|
+
# ADVANCED SECTION - System-level settings
|
|
356
|
+
print("\n┌─ ADVANCED CONFIGURATION")
|
|
357
|
+
print("│")
|
|
358
|
+
|
|
359
|
+
# Logging with dual system display
|
|
360
|
+
print("│ 📝 Logging")
|
|
361
|
+
logging_info = status["logging"]
|
|
362
|
+
|
|
363
|
+
console_level = logging_info['console_level']
|
|
364
|
+
file_level = logging_info['file_level']
|
|
365
|
+
file_enabled = logging_info['file_logging_enabled']
|
|
366
|
+
|
|
367
|
+
# Console logging status
|
|
368
|
+
console_status = "✅" if console_level not in ["NONE", "CRITICAL"] else "❌"
|
|
369
|
+
print(f"│ {console_status} Console {console_level}")
|
|
370
|
+
|
|
371
|
+
# File logging status
|
|
372
|
+
if file_enabled:
|
|
373
|
+
file_status = "✅"
|
|
374
|
+
print(f"│ {file_status} File {file_level}")
|
|
375
|
+
else:
|
|
376
|
+
file_status = "❌"
|
|
377
|
+
print(f"│ {file_status} File Disabled")
|
|
378
|
+
|
|
379
|
+
# Overall summary
|
|
380
|
+
if console_level == "NONE" and not file_enabled:
|
|
381
|
+
overall_desc = "No logging output"
|
|
382
|
+
elif console_level == "DEBUG" and file_enabled:
|
|
383
|
+
overall_desc = "Full debug logging enabled"
|
|
384
|
+
elif file_enabled:
|
|
385
|
+
overall_desc = "Dual logging active"
|
|
386
|
+
else:
|
|
387
|
+
overall_desc = "Console logging only"
|
|
388
|
+
|
|
389
|
+
print(f"│ 📊 Summary {overall_desc}")
|
|
390
|
+
|
|
391
|
+
# Cache (simplified)
|
|
392
|
+
print("│")
|
|
393
|
+
print("│ 💾 Storage")
|
|
394
|
+
cache = status["cache"]
|
|
395
|
+
print(f"│ ✅ Configured Cache: {cache['default_cache_dir']}")
|
|
396
|
+
|
|
397
|
+
print("└─")
|
|
398
|
+
|
|
399
|
+
# HELP SECTION - Separate actionable commands
|
|
400
|
+
print("\n┌─ QUICK CONFIGURATION COMMANDS")
|
|
401
|
+
print("│")
|
|
402
|
+
print("│ 🚀 Common Tasks")
|
|
403
|
+
print("│ abstractcore --set-global-default PROVIDER MODEL")
|
|
404
|
+
print("│ abstractcore --set-app-default APPNAME PROVIDER MODEL")
|
|
405
|
+
print("│ abstractcore --set-api-key PROVIDER YOUR_KEY")
|
|
406
|
+
print("│")
|
|
407
|
+
print("│ 🔧 Media & Behavior")
|
|
408
|
+
print("│ abstractcore --set-vision-provider PROVIDER MODEL")
|
|
409
|
+
print("│ abstractcore --download-vision-model (local models)")
|
|
410
|
+
print("│ abstractcore --stream on/off")
|
|
411
|
+
print("│ abstractcore --enable-streaming / --disable-streaming")
|
|
412
|
+
print("│")
|
|
413
|
+
print("│ 📊 Logging & Storage")
|
|
414
|
+
print("│ abstractcore --enable-debug-logging")
|
|
415
|
+
print("│ abstractcore --set-console-log-level LEVEL")
|
|
416
|
+
print("│ abstractcore --set-file-log-level LEVEL")
|
|
417
|
+
print("│ abstractcore --enable-file-logging / --disable-file-logging")
|
|
418
|
+
print("│ abstractcore --set-default-cache-dir PATH")
|
|
419
|
+
print("│")
|
|
420
|
+
print("│ 🎯 Specialized Models")
|
|
421
|
+
print("│ abstractcore --set-chat-model PROVIDER/MODEL")
|
|
422
|
+
print("│ abstractcore --set-code-model PROVIDER/MODEL")
|
|
423
|
+
print("│ abstractcore --set-embeddings-model PROVIDER/MODEL")
|
|
424
|
+
print("│")
|
|
425
|
+
print("│ 🎛️ Advanced")
|
|
426
|
+
print("│ abstractcore --configure (interactive setup)")
|
|
427
|
+
print("│ abstractcore --reset (reset to defaults)")
|
|
428
|
+
print("│ abstractcore --list-api-keys (check API status)")
|
|
429
|
+
print("│")
|
|
430
|
+
print("│ 📖 More Help")
|
|
431
|
+
print("│ abstractcore --help")
|
|
432
|
+
print("│ docs/centralized-config.md")
|
|
433
|
+
print("└─")
|
|
434
|
+
|
|
435
|
+
print(f"\n📁 Configuration file: {status['config_file']}")
|
|
436
|
+
|
|
437
|
+
def interactive_configure():
|
|
438
|
+
"""Interactive configuration setup."""
|
|
439
|
+
config_manager = get_config_manager()
|
|
440
|
+
|
|
441
|
+
print("🚀 AbstractCore Interactive Configuration")
|
|
442
|
+
print("=" * 50)
|
|
443
|
+
|
|
444
|
+
# Ask about default model
|
|
445
|
+
print("\n1. Default Model Setup")
|
|
446
|
+
default_choice = input("Set a default model? [y/N]: ").lower().strip()
|
|
447
|
+
if default_choice == 'y':
|
|
448
|
+
model = input("Enter model (provider/model format): ").strip()
|
|
449
|
+
if model:
|
|
450
|
+
config_manager.set_default_model(model)
|
|
451
|
+
print(f"✅ Set default model to: {model}")
|
|
452
|
+
|
|
453
|
+
# Ask about vision
|
|
454
|
+
print("\n2. Vision Fallback Setup")
|
|
455
|
+
vision_choice = input("Configure vision fallback for text-only models? [y/N]: ").lower().strip()
|
|
456
|
+
if vision_choice == 'y':
|
|
457
|
+
print("Choose vision setup method:")
|
|
458
|
+
print(" 1. Use existing Ollama model (e.g., qwen2.5vl:7b)")
|
|
459
|
+
print(" 2. Use cloud API (OpenAI/Anthropic)")
|
|
460
|
+
print(" 3. Download local model (coming soon)")
|
|
461
|
+
|
|
462
|
+
method = input("Choice [1-3]: ").strip()
|
|
463
|
+
if method == "1":
|
|
464
|
+
model = input("Enter Ollama model name: ").strip()
|
|
465
|
+
if model:
|
|
466
|
+
config_manager.set_vision_caption(model)
|
|
467
|
+
print(f"✅ Set vision model to: {model}")
|
|
468
|
+
elif method == "2":
|
|
469
|
+
provider = input("Enter provider (openai/anthropic): ").strip()
|
|
470
|
+
model = input("Enter model name: ").strip()
|
|
471
|
+
if provider and model:
|
|
472
|
+
config_manager.set_vision_provider(provider, model)
|
|
473
|
+
print(f"✅ Set vision to: {provider}/{model}")
|
|
474
|
+
|
|
475
|
+
# Ask about API keys
|
|
476
|
+
print("\n3. API Keys Setup")
|
|
477
|
+
api_choice = input("Configure API keys? [y/N]: ").lower().strip()
|
|
478
|
+
if api_choice == 'y':
|
|
479
|
+
for provider in ["openai", "anthropic", "google"]:
|
|
480
|
+
key = input(f"Enter {provider} API key (or press Enter to skip): ").strip()
|
|
481
|
+
if key:
|
|
482
|
+
config_manager.set_api_key(provider, key)
|
|
483
|
+
print(f"✅ Set {provider} API key")
|
|
484
|
+
|
|
485
|
+
print("\n✅ Configuration complete! Run 'abstractcore --status' to see current settings.")
|
|
486
|
+
|
|
487
|
+
def handle_commands(args) -> bool:
|
|
488
|
+
"""Handle AbstractCore configuration commands."""
|
|
489
|
+
config_manager = get_config_manager()
|
|
490
|
+
handled = False
|
|
491
|
+
|
|
492
|
+
# Status and configuration
|
|
493
|
+
if args.status:
|
|
494
|
+
print_status()
|
|
495
|
+
handled = True
|
|
496
|
+
|
|
497
|
+
if args.configure:
|
|
498
|
+
interactive_configure()
|
|
499
|
+
handled = True
|
|
500
|
+
|
|
501
|
+
if args.reset:
|
|
502
|
+
config_manager.reset_configuration()
|
|
503
|
+
print("✅ Configuration reset to defaults")
|
|
504
|
+
handled = True
|
|
505
|
+
|
|
506
|
+
# Global default model settings
|
|
507
|
+
if args.set_global_default:
|
|
508
|
+
config_manager.set_global_default_model(args.set_global_default)
|
|
509
|
+
print(f"✅ Set global default to: {args.set_global_default}")
|
|
510
|
+
handled = True
|
|
511
|
+
|
|
512
|
+
if args.set_default_model: # Legacy compatibility
|
|
513
|
+
config_manager.set_global_default_model(args.set_default_model)
|
|
514
|
+
print(f"✅ Set global default to: {args.set_default_model}")
|
|
515
|
+
handled = True
|
|
516
|
+
|
|
517
|
+
if args.set_default_provider:
|
|
518
|
+
config_manager.set_global_default_provider(args.set_default_provider)
|
|
519
|
+
print(f"✅ Set global default provider to: {args.set_default_provider}")
|
|
520
|
+
handled = True
|
|
521
|
+
|
|
522
|
+
# App-specific defaults
|
|
523
|
+
if args.set_app_default:
|
|
524
|
+
app, provider, model = args.set_app_default
|
|
525
|
+
try:
|
|
526
|
+
config_manager.set_app_default(app, provider, model)
|
|
527
|
+
print(f"✅ Set {app} default to: {provider}/{model}")
|
|
528
|
+
except ValueError as e:
|
|
529
|
+
print(f"❌ Error: {e}")
|
|
530
|
+
handled = True
|
|
531
|
+
|
|
532
|
+
if args.set_chat_model:
|
|
533
|
+
config_manager.set_chat_model(args.set_chat_model)
|
|
534
|
+
print(f"✅ Set chat model to: {args.set_chat_model}")
|
|
535
|
+
handled = True
|
|
536
|
+
|
|
537
|
+
if args.set_code_model:
|
|
538
|
+
config_manager.set_code_model(args.set_code_model)
|
|
539
|
+
print(f"✅ Set code model to: {args.set_code_model}")
|
|
540
|
+
handled = True
|
|
541
|
+
|
|
542
|
+
# Vision configuration
|
|
543
|
+
if args.set_vision_caption:
|
|
544
|
+
print("⚠️ WARNING: --set-vision-caption is deprecated")
|
|
545
|
+
print("💡 Use instead: abstractcore --set-vision-provider PROVIDER MODEL")
|
|
546
|
+
print(" This provides clearer, more reliable configuration")
|
|
547
|
+
print()
|
|
548
|
+
config_manager.set_vision_caption(args.set_vision_caption)
|
|
549
|
+
print(f"✅ Set vision caption model to: {args.set_vision_caption}")
|
|
550
|
+
handled = True
|
|
551
|
+
|
|
552
|
+
if args.set_vision_provider:
|
|
553
|
+
provider, model = args.set_vision_provider
|
|
554
|
+
config_manager.set_vision_provider(provider, model)
|
|
555
|
+
print(f"✅ Set vision provider to: {provider}/{model}")
|
|
556
|
+
handled = True
|
|
557
|
+
|
|
558
|
+
if args.add_vision_fallback:
|
|
559
|
+
provider, model = args.add_vision_fallback
|
|
560
|
+
config_manager.add_vision_fallback(provider, model)
|
|
561
|
+
print(f"✅ Added vision fallback: {provider}/{model}")
|
|
562
|
+
handled = True
|
|
563
|
+
|
|
564
|
+
if args.disable_vision:
|
|
565
|
+
config_manager.disable_vision()
|
|
566
|
+
print("✅ Disabled vision fallback")
|
|
567
|
+
handled = True
|
|
568
|
+
|
|
569
|
+
if args.download_vision_model:
|
|
570
|
+
print(f"📥 Starting download of vision model: {args.download_vision_model}")
|
|
571
|
+
success = download_vision_model(args.download_vision_model)
|
|
572
|
+
if success:
|
|
573
|
+
print(f"✅ Successfully downloaded and configured: {args.download_vision_model}")
|
|
574
|
+
else:
|
|
575
|
+
print(f"❌ Failed to download: {args.download_vision_model}")
|
|
576
|
+
handled = True
|
|
577
|
+
|
|
578
|
+
# Embeddings configuration
|
|
579
|
+
if args.set_embeddings_model:
|
|
580
|
+
config_manager.set_embeddings_model(args.set_embeddings_model)
|
|
581
|
+
print(f"✅ Set embeddings model to: {args.set_embeddings_model}")
|
|
582
|
+
handled = True
|
|
583
|
+
|
|
584
|
+
if args.set_embeddings_provider:
|
|
585
|
+
if isinstance(args.set_embeddings_provider, str):
|
|
586
|
+
config_manager.set_embeddings_provider(args.set_embeddings_provider)
|
|
587
|
+
print(f"✅ Set embeddings provider to: {args.set_embeddings_provider}")
|
|
588
|
+
handled = True
|
|
589
|
+
|
|
590
|
+
# API keys
|
|
591
|
+
if args.set_api_key:
|
|
592
|
+
provider, key = args.set_api_key
|
|
593
|
+
config_manager.set_api_key(provider, key)
|
|
594
|
+
print(f"✅ Set API key for: {provider}")
|
|
595
|
+
handled = True
|
|
596
|
+
|
|
597
|
+
if args.list_api_keys:
|
|
598
|
+
status = config_manager.get_status()
|
|
599
|
+
print("🔑 API Key Status:")
|
|
600
|
+
for provider, status_text in status["api_keys"].items():
|
|
601
|
+
print(f" {provider}: {status_text}")
|
|
602
|
+
handled = True
|
|
603
|
+
|
|
604
|
+
# Cache configuration
|
|
605
|
+
if args.set_default_cache_dir:
|
|
606
|
+
config_manager.set_default_cache_dir(args.set_default_cache_dir)
|
|
607
|
+
print(f"✅ Set default cache directory to: {args.set_default_cache_dir}")
|
|
608
|
+
handled = True
|
|
609
|
+
|
|
610
|
+
if args.set_huggingface_cache_dir:
|
|
611
|
+
config_manager.set_huggingface_cache_dir(args.set_huggingface_cache_dir)
|
|
612
|
+
print(f"✅ Set HuggingFace cache directory to: {args.set_huggingface_cache_dir}")
|
|
613
|
+
handled = True
|
|
614
|
+
|
|
615
|
+
if args.set_local_models_cache_dir:
|
|
616
|
+
config_manager.set_local_models_cache_dir(args.set_local_models_cache_dir)
|
|
617
|
+
print(f"✅ Set local models cache directory to: {args.set_local_models_cache_dir}")
|
|
618
|
+
handled = True
|
|
619
|
+
|
|
620
|
+
# Logging configuration
|
|
621
|
+
if args.set_console_log_level:
|
|
622
|
+
config_manager.set_console_log_level(args.set_console_log_level)
|
|
623
|
+
print(f"✅ Set console log level to: {args.set_console_log_level}")
|
|
624
|
+
handled = True
|
|
625
|
+
|
|
626
|
+
if args.set_file_log_level:
|
|
627
|
+
config_manager.set_file_log_level(args.set_file_log_level)
|
|
628
|
+
print(f"✅ Set file log level to: {args.set_file_log_level}")
|
|
629
|
+
handled = True
|
|
630
|
+
|
|
631
|
+
if args.set_log_base_dir:
|
|
632
|
+
config_manager.set_log_base_dir(args.set_log_base_dir)
|
|
633
|
+
print(f"✅ Set log base directory to: {args.set_log_base_dir}")
|
|
634
|
+
handled = True
|
|
635
|
+
|
|
636
|
+
if args.enable_debug_logging:
|
|
637
|
+
config_manager.enable_debug_logging()
|
|
638
|
+
print("✅ Enabled debug logging for both console and file")
|
|
639
|
+
handled = True
|
|
640
|
+
|
|
641
|
+
if args.disable_console_logging:
|
|
642
|
+
config_manager.disable_console_logging()
|
|
643
|
+
print("✅ Disabled console logging")
|
|
644
|
+
handled = True
|
|
645
|
+
|
|
646
|
+
if args.enable_file_logging:
|
|
647
|
+
config_manager.enable_file_logging()
|
|
648
|
+
print("✅ Enabled file logging")
|
|
649
|
+
handled = True
|
|
650
|
+
|
|
651
|
+
if args.disable_file_logging:
|
|
652
|
+
config_manager.disable_file_logging()
|
|
653
|
+
print("✅ Disabled file logging")
|
|
654
|
+
handled = True
|
|
655
|
+
|
|
656
|
+
# Streaming configuration
|
|
657
|
+
if args.stream:
|
|
658
|
+
enabled = args.stream == "on"
|
|
659
|
+
config_manager.set_streaming_default("cli", enabled)
|
|
660
|
+
status = "enabled" if enabled else "disabled"
|
|
661
|
+
print(f"✅ CLI streaming {status} by default")
|
|
662
|
+
handled = True
|
|
663
|
+
|
|
664
|
+
if args.enable_streaming:
|
|
665
|
+
config_manager.enable_cli_streaming()
|
|
666
|
+
print("✅ Enabled CLI streaming by default")
|
|
667
|
+
handled = True
|
|
668
|
+
|
|
669
|
+
if args.disable_streaming:
|
|
670
|
+
config_manager.disable_cli_streaming()
|
|
671
|
+
print("✅ Disabled CLI streaming by default")
|
|
672
|
+
handled = True
|
|
673
|
+
|
|
674
|
+
return handled
|
|
675
|
+
|
|
676
|
+
def main(argv: List[str] = None):
|
|
677
|
+
"""Main CLI entry point."""
|
|
678
|
+
if argv is None:
|
|
679
|
+
argv = sys.argv[1:]
|
|
680
|
+
|
|
681
|
+
parser = argparse.ArgumentParser(
|
|
682
|
+
prog="abstractcore",
|
|
683
|
+
description="AbstractCore Unified Configuration System",
|
|
684
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
685
|
+
epilog="""
|
|
686
|
+
QUICK START:
|
|
687
|
+
abstractcore --status # Show current configuration
|
|
688
|
+
abstractcore --configure # Interactive guided setup
|
|
689
|
+
|
|
690
|
+
COMMON TASKS:
|
|
691
|
+
# Set default model for all apps
|
|
692
|
+
abstractcore --set-global-default ollama llama3:8b
|
|
693
|
+
|
|
694
|
+
# Set different models for specific apps
|
|
695
|
+
abstractcore --set-app-default cli lmstudio qwen/qwen3-next-80b
|
|
696
|
+
abstractcore --set-app-default summarizer openai gpt-4o-mini
|
|
697
|
+
abstractcore --set-app-default extractor ollama qwen3:4b-instruct
|
|
698
|
+
|
|
699
|
+
# Configure API keys
|
|
700
|
+
abstractcore --set-api-key openai sk-your-key-here
|
|
701
|
+
abstractcore --set-api-key anthropic your-anthropic-key
|
|
702
|
+
|
|
703
|
+
# Setup vision for images (with text-only models)
|
|
704
|
+
abstractcore --set-vision-provider ollama qwen2.5vl:7b
|
|
705
|
+
abstractcore --download-vision-model
|
|
706
|
+
|
|
707
|
+
# Configure logging
|
|
708
|
+
abstractcore --enable-debug-logging # Enable debug mode
|
|
709
|
+
abstractcore --set-console-log-level WARNING # Reduce console output
|
|
710
|
+
abstractcore --enable-file-logging # Save logs to files
|
|
711
|
+
|
|
712
|
+
SPECIALIZED MODELS:
|
|
713
|
+
abstractcore --set-chat-model openai/gpt-4o-mini # For chat applications
|
|
714
|
+
abstractcore --set-code-model anthropic/claude-3-5-sonnet # For coding tasks
|
|
715
|
+
|
|
716
|
+
PRIORITY SYSTEM:
|
|
717
|
+
1. Explicit parameters (highest): summarizer doc.pdf --provider openai --model gpt-4o
|
|
718
|
+
2. App-specific config: --set-app-default summarizer openai gpt-4o-mini
|
|
719
|
+
3. Global config: --set-global-default openai/gpt-4o-mini
|
|
720
|
+
4. Built-in defaults (lowest): huggingface/unsloth/Qwen3-4B-Instruct-2507-GGUF
|
|
721
|
+
|
|
722
|
+
APPS:
|
|
723
|
+
cli Interactive CLI (python -m abstractcore.utils.cli)
|
|
724
|
+
summarizer Document summarization (summarizer document.pdf)
|
|
725
|
+
extractor Entity/relationship extraction (extractor data.txt)
|
|
726
|
+
judge Text evaluation and scoring (judge essay.md)
|
|
727
|
+
|
|
728
|
+
TROUBLESHOOTING:
|
|
729
|
+
abstractcore --status # Check current settings
|
|
730
|
+
abstractcore --reset # Reset to defaults
|
|
731
|
+
abstractcore --list-api-keys # Check API key status
|
|
732
|
+
|
|
733
|
+
If apps show "no provider/model configured":
|
|
734
|
+
abstractcore --set-global-default ollama llama3:8b
|
|
735
|
+
|
|
736
|
+
DOCUMENTATION: docs/centralized-config.md
|
|
737
|
+
"""
|
|
738
|
+
)
|
|
739
|
+
|
|
740
|
+
add_arguments(parser)
|
|
741
|
+
args = parser.parse_args(argv)
|
|
742
|
+
|
|
743
|
+
try:
|
|
744
|
+
# Handle configuration commands
|
|
745
|
+
if handle_commands(args):
|
|
746
|
+
return 0
|
|
747
|
+
|
|
748
|
+
# If no commands were handled, show help
|
|
749
|
+
parser.print_help()
|
|
750
|
+
return 1
|
|
751
|
+
|
|
752
|
+
except Exception as e:
|
|
753
|
+
print(f"❌ Error: {e}")
|
|
754
|
+
return 1
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
if __name__ == "__main__":
|
|
758
|
+
logging.basicConfig(level=logging.INFO)
|
|
759
|
+
sys.exit(main())
|