abstractcore 2.4.4__py3-none-any.whl → 2.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/cli/__init__.py +9 -0
- abstractcore/cli/main.py +759 -0
- abstractcore/cli/vision_config.py +491 -0
- abstractcore/media/handlers/__init__.py +16 -0
- abstractcore/media/handlers/anthropic_handler.py +326 -0
- abstractcore/media/handlers/local_handler.py +541 -0
- abstractcore/media/handlers/openai_handler.py +281 -0
- abstractcore/media/processors/__init__.py +13 -0
- abstractcore/media/processors/image_processor.py +610 -0
- abstractcore/media/processors/office_processor.py +490 -0
- abstractcore/media/processors/pdf_processor.py +485 -0
- abstractcore/media/processors/text_processor.py +557 -0
- abstractcore/media/utils/__init__.py +22 -0
- abstractcore/media/utils/image_scaler.py +306 -0
- abstractcore/utils/version.py +1 -1
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.5.dist-info}/METADATA +1 -1
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.5.dist-info}/RECORD +21 -7
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.5.dist-info}/entry_points.txt +2 -0
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.5.dist-info}/WHEEL +0 -0
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.5.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.4.4.dist-info → abstractcore-2.4.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Image scaling utility for AbstractCore media handling.
|
|
3
|
+
|
|
4
|
+
Provides intelligent image scaling based on model-specific requirements
|
|
5
|
+
and capabilities for vision models.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Tuple, Optional, Union, Dict, Any
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
from PIL import Image, ImageOps
|
|
15
|
+
PIL_AVAILABLE = True
|
|
16
|
+
except ImportError:
|
|
17
|
+
PIL_AVAILABLE = False
|
|
18
|
+
|
|
19
|
+
from ..base import MediaProcessingError
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ScalingMode(Enum):
|
|
23
|
+
"""Image scaling modes."""
|
|
24
|
+
FIT = "fit" # Scale to fit within target size, maintaining aspect ratio
|
|
25
|
+
FILL = "fill" # Scale to fill target size, may crop, maintaining aspect ratio
|
|
26
|
+
STRETCH = "stretch" # Stretch to exact target size, may distort aspect ratio
|
|
27
|
+
PAD = "pad" # Scale to fit and pad with background to exact target size
|
|
28
|
+
CROP_CENTER = "crop_center" # Scale to fill and crop from center
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ModelOptimizedScaler:
|
|
32
|
+
"""
|
|
33
|
+
Intelligent image scaler that optimizes images for specific vision models.
|
|
34
|
+
|
|
35
|
+
Uses model capability information to determine optimal scaling strategies.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self):
|
|
39
|
+
self.logger = logging.getLogger(__name__)
|
|
40
|
+
|
|
41
|
+
if not PIL_AVAILABLE:
|
|
42
|
+
raise MediaProcessingError("PIL (Pillow) is required for image scaling")
|
|
43
|
+
|
|
44
|
+
def get_optimal_resolution(self, model_name: str, original_size: Tuple[int, int],
|
|
45
|
+
model_capabilities: Optional[Dict[str, Any]] = None) -> Tuple[int, int]:
|
|
46
|
+
"""
|
|
47
|
+
Get optimal resolution for a specific model.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
model_name: Name of the model
|
|
51
|
+
original_size: Original image size (width, height)
|
|
52
|
+
model_capabilities: Model capability information
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Optimal target size (width, height)
|
|
56
|
+
"""
|
|
57
|
+
if model_capabilities is None:
|
|
58
|
+
model_capabilities = self._get_model_capabilities(model_name)
|
|
59
|
+
|
|
60
|
+
max_resolution = model_capabilities.get("max_image_resolution", "variable")
|
|
61
|
+
image_patch_size = model_capabilities.get("image_patch_size", 16)
|
|
62
|
+
adaptive_windowing = model_capabilities.get("adaptive_windowing", False)
|
|
63
|
+
|
|
64
|
+
# Handle different resolution strategies
|
|
65
|
+
if max_resolution == "variable":
|
|
66
|
+
return self._optimize_variable_resolution(original_size, image_patch_size)
|
|
67
|
+
elif max_resolution == "896x896":
|
|
68
|
+
return (896, 896)
|
|
69
|
+
elif max_resolution == "3584x3584":
|
|
70
|
+
return self._optimize_large_resolution(original_size, (3584, 3584), image_patch_size)
|
|
71
|
+
elif "x" in str(max_resolution):
|
|
72
|
+
# Parse specific resolution like "1568x1568"
|
|
73
|
+
w, h = map(int, str(max_resolution).split("x"))
|
|
74
|
+
return (w, h)
|
|
75
|
+
else:
|
|
76
|
+
# Default fallback
|
|
77
|
+
return self._optimize_variable_resolution(original_size, image_patch_size)
|
|
78
|
+
|
|
79
|
+
def _optimize_variable_resolution(self, original_size: Tuple[int, int],
|
|
80
|
+
patch_size: int = 16) -> Tuple[int, int]:
|
|
81
|
+
"""Optimize for variable resolution models like Qwen3-VL."""
|
|
82
|
+
width, height = original_size
|
|
83
|
+
|
|
84
|
+
# For variable resolution, aim for reasonable size that's efficient
|
|
85
|
+
# while maintaining good quality
|
|
86
|
+
max_dimension = 1024 # Good balance for most use cases
|
|
87
|
+
|
|
88
|
+
# Scale down if too large
|
|
89
|
+
if max(width, height) > max_dimension:
|
|
90
|
+
if width > height:
|
|
91
|
+
new_width = max_dimension
|
|
92
|
+
new_height = int(height * (max_dimension / width))
|
|
93
|
+
else:
|
|
94
|
+
new_height = max_dimension
|
|
95
|
+
new_width = int(width * (max_dimension / height))
|
|
96
|
+
else:
|
|
97
|
+
new_width, new_height = width, height
|
|
98
|
+
|
|
99
|
+
# Round to nearest patch size multiple for efficiency
|
|
100
|
+
new_width = ((new_width + patch_size - 1) // patch_size) * patch_size
|
|
101
|
+
new_height = ((new_height + patch_size - 1) // patch_size) * patch_size
|
|
102
|
+
|
|
103
|
+
return (new_width, new_height)
|
|
104
|
+
|
|
105
|
+
def _optimize_large_resolution(self, original_size: Tuple[int, int],
|
|
106
|
+
max_size: Tuple[int, int],
|
|
107
|
+
patch_size: int = 14) -> Tuple[int, int]:
|
|
108
|
+
"""Optimize for large resolution models like Qwen2.5-VL."""
|
|
109
|
+
width, height = original_size
|
|
110
|
+
max_width, max_height = max_size
|
|
111
|
+
|
|
112
|
+
# Scale to fit within max size while maintaining aspect ratio
|
|
113
|
+
scale = min(max_width / width, max_height / height)
|
|
114
|
+
|
|
115
|
+
if scale < 1: # Only scale down, never up
|
|
116
|
+
new_width = int(width * scale)
|
|
117
|
+
new_height = int(height * scale)
|
|
118
|
+
else:
|
|
119
|
+
new_width, new_height = width, height
|
|
120
|
+
|
|
121
|
+
# Round to nearest patch size multiple
|
|
122
|
+
new_width = ((new_width + patch_size - 1) // patch_size) * patch_size
|
|
123
|
+
new_height = ((new_height + patch_size - 1) // patch_size) * patch_size
|
|
124
|
+
|
|
125
|
+
return (new_width, new_height)
|
|
126
|
+
|
|
127
|
+
def scale_image(self, image: Image.Image, target_size: Tuple[int, int],
|
|
128
|
+
mode: ScalingMode = ScalingMode.FIT,
|
|
129
|
+
background_color: Tuple[int, int, int] = (255, 255, 255)) -> Image.Image:
|
|
130
|
+
"""
|
|
131
|
+
Scale image to target size using specified mode.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
image: PIL Image to scale
|
|
135
|
+
target_size: Target size (width, height)
|
|
136
|
+
mode: Scaling mode
|
|
137
|
+
background_color: Background color for padding (RGB)
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Scaled PIL Image
|
|
141
|
+
"""
|
|
142
|
+
target_width, target_height = target_size
|
|
143
|
+
|
|
144
|
+
if mode == ScalingMode.FIT:
|
|
145
|
+
# Scale to fit within target size, maintaining aspect ratio
|
|
146
|
+
image.thumbnail((target_width, target_height), Image.Resampling.LANCZOS)
|
|
147
|
+
return image
|
|
148
|
+
|
|
149
|
+
elif mode == ScalingMode.FILL:
|
|
150
|
+
# Scale to fill target size, may crop
|
|
151
|
+
return ImageOps.fit(image, target_size, Image.Resampling.LANCZOS)
|
|
152
|
+
|
|
153
|
+
elif mode == ScalingMode.STRETCH:
|
|
154
|
+
# Stretch to exact target size
|
|
155
|
+
return image.resize(target_size, Image.Resampling.LANCZOS)
|
|
156
|
+
|
|
157
|
+
elif mode == ScalingMode.PAD:
|
|
158
|
+
# Scale to fit and pad to exact size
|
|
159
|
+
image.thumbnail((target_width, target_height), Image.Resampling.LANCZOS)
|
|
160
|
+
|
|
161
|
+
# Create new image with background color
|
|
162
|
+
new_image = Image.new('RGB', target_size, background_color)
|
|
163
|
+
|
|
164
|
+
# Paste scaled image centered
|
|
165
|
+
paste_x = (target_width - image.width) // 2
|
|
166
|
+
paste_y = (target_height - image.height) // 2
|
|
167
|
+
new_image.paste(image, (paste_x, paste_y))
|
|
168
|
+
|
|
169
|
+
return new_image
|
|
170
|
+
|
|
171
|
+
elif mode == ScalingMode.CROP_CENTER:
|
|
172
|
+
# Scale to fill and crop from center
|
|
173
|
+
return ImageOps.fit(image, target_size, Image.Resampling.LANCZOS, centering=(0.5, 0.5))
|
|
174
|
+
|
|
175
|
+
else:
|
|
176
|
+
raise MediaProcessingError(f"Unknown scaling mode: {mode}")
|
|
177
|
+
|
|
178
|
+
def scale_for_model(self, image: Image.Image, model_name: str,
|
|
179
|
+
scaling_mode: ScalingMode = ScalingMode.FIT,
|
|
180
|
+
model_capabilities: Optional[Dict[str, Any]] = None) -> Image.Image:
|
|
181
|
+
"""
|
|
182
|
+
Scale image optimally for a specific model.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
image: PIL Image to scale
|
|
186
|
+
model_name: Name of the target model
|
|
187
|
+
scaling_mode: How to scale the image
|
|
188
|
+
model_capabilities: Model capability information
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Optimally scaled PIL Image for the model
|
|
192
|
+
"""
|
|
193
|
+
original_size = image.size
|
|
194
|
+
target_size = self.get_optimal_resolution(model_name, original_size, model_capabilities)
|
|
195
|
+
|
|
196
|
+
self.logger.debug(f"Scaling image for {model_name}: {original_size} -> {target_size}")
|
|
197
|
+
|
|
198
|
+
# For fixed resolution models, always use PAD mode to maintain exact size
|
|
199
|
+
if model_capabilities and model_capabilities.get("max_image_resolution") == "896x896":
|
|
200
|
+
scaling_mode = ScalingMode.PAD
|
|
201
|
+
|
|
202
|
+
return self.scale_image(image, target_size, scaling_mode)
|
|
203
|
+
|
|
204
|
+
def _get_model_capabilities(self, model_name: str) -> Dict[str, Any]:
|
|
205
|
+
"""
|
|
206
|
+
Get model capabilities from the capabilities JSON.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
model_name: Name of the model
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Model capabilities dictionary
|
|
213
|
+
"""
|
|
214
|
+
try:
|
|
215
|
+
from ..capabilities import get_media_capabilities
|
|
216
|
+
return get_media_capabilities(model_name).__dict__
|
|
217
|
+
except ImportError:
|
|
218
|
+
# Fallback capability detection
|
|
219
|
+
return self._fallback_model_capabilities(model_name)
|
|
220
|
+
|
|
221
|
+
def _fallback_model_capabilities(self, model_name: str) -> Dict[str, Any]:
|
|
222
|
+
"""Fallback capability detection when capabilities module not available."""
|
|
223
|
+
model_lower = model_name.lower()
|
|
224
|
+
|
|
225
|
+
# Gemma models - fixed 896x896
|
|
226
|
+
if any(gem in model_lower for gem in ["gemma3", "gemma-3n"]):
|
|
227
|
+
return {
|
|
228
|
+
"max_image_resolution": "896x896",
|
|
229
|
+
"image_patch_size": 16,
|
|
230
|
+
"adaptive_windowing": True
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
# Qwen2.5-VL models - up to 3584x3584
|
|
234
|
+
elif "qwen2.5" in model_lower and "vl" in model_lower:
|
|
235
|
+
return {
|
|
236
|
+
"max_image_resolution": "3584x3584",
|
|
237
|
+
"image_patch_size": 14,
|
|
238
|
+
"pixel_grouping": "28x28"
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
# Qwen3-VL models - variable resolution
|
|
242
|
+
elif "qwen3" in model_lower and "vl" in model_lower:
|
|
243
|
+
return {
|
|
244
|
+
"max_image_resolution": "variable",
|
|
245
|
+
"image_patch_size": 16,
|
|
246
|
+
"pixel_grouping": "32x32"
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
# Claude models - up to 1568x1568
|
|
250
|
+
elif "claude" in model_lower:
|
|
251
|
+
return {
|
|
252
|
+
"max_image_resolution": "1568x1568",
|
|
253
|
+
"image_patch_size": 14
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
# Default fallback
|
|
257
|
+
else:
|
|
258
|
+
return {
|
|
259
|
+
"max_image_resolution": "variable",
|
|
260
|
+
"image_patch_size": 16
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
# Convenience functions for easy usage
|
|
265
|
+
_scaler_instance = None
|
|
266
|
+
|
|
267
|
+
def get_scaler() -> ModelOptimizedScaler:
|
|
268
|
+
"""Get shared scaler instance."""
|
|
269
|
+
global _scaler_instance
|
|
270
|
+
if _scaler_instance is None:
|
|
271
|
+
_scaler_instance = ModelOptimizedScaler()
|
|
272
|
+
return _scaler_instance
|
|
273
|
+
|
|
274
|
+
def scale_image_for_model(image: Union[Image.Image, str, Path],
|
|
275
|
+
model_name: str,
|
|
276
|
+
scaling_mode: ScalingMode = ScalingMode.FIT) -> Image.Image:
|
|
277
|
+
"""
|
|
278
|
+
Convenience function to scale an image for a specific model.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
image: PIL Image, or path to image file
|
|
282
|
+
model_name: Name of the target model
|
|
283
|
+
scaling_mode: How to scale the image
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
Optimally scaled PIL Image
|
|
287
|
+
"""
|
|
288
|
+
if isinstance(image, (str, Path)):
|
|
289
|
+
image = Image.open(image)
|
|
290
|
+
|
|
291
|
+
scaler = get_scaler()
|
|
292
|
+
return scaler.scale_for_model(image, model_name, scaling_mode)
|
|
293
|
+
|
|
294
|
+
def get_optimal_size_for_model(model_name: str, original_size: Tuple[int, int]) -> Tuple[int, int]:
|
|
295
|
+
"""
|
|
296
|
+
Get optimal image size for a specific model.
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
model_name: Name of the target model
|
|
300
|
+
original_size: Original image size (width, height)
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
Optimal target size (width, height)
|
|
304
|
+
"""
|
|
305
|
+
scaler = get_scaler()
|
|
306
|
+
return scaler.get_optimal_resolution(model_name, original_size)
|
abstractcore/utils/version.py
CHANGED
|
@@ -11,4 +11,4 @@ including when the package is installed from PyPI where pyproject.toml is not av
|
|
|
11
11
|
|
|
12
12
|
# Package version - update this when releasing new versions
|
|
13
13
|
# This must be manually synchronized with the version in pyproject.toml
|
|
14
|
-
__version__ = "2.4.
|
|
14
|
+
__version__ = "2.4.5"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: abstractcore
|
|
3
|
-
Version: 2.4.
|
|
3
|
+
Version: 2.4.5
|
|
4
4
|
Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
|
|
5
5
|
Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
6
6
|
Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
@@ -11,6 +11,9 @@ abstractcore/architectures/enums.py,sha256=9vIv2vDBEKhxwzwH9iaSAyf-iVj3p8y9loMeN
|
|
|
11
11
|
abstractcore/assets/architecture_formats.json,sha256=CIf6SaR_IJs1D7Uvd1K3zWngIXJ_yq3DM_IE3wnpCHY,16076
|
|
12
12
|
abstractcore/assets/model_capabilities.json,sha256=iUkDiljyZUZzPlpYCOFgStXyc6e7dvOfReYQ0HFrX9Q,49703
|
|
13
13
|
abstractcore/assets/session_schema.json,sha256=b6HTAWxRVlVhAzA7FqaKpunK1yO6jilBOsD5sQkqJTo,10580
|
|
14
|
+
abstractcore/cli/__init__.py,sha256=rUjLjZSK3wENSw4g_iN43Bc2i5cggcEmj4NPXBMohdc,241
|
|
15
|
+
abstractcore/cli/main.py,sha256=QD38nnfrInavO452WbkXCI37SVsdIu9VhvjEOojXBGY,31834
|
|
16
|
+
abstractcore/cli/vision_config.py,sha256=jJzO4zBexh8SqSKp6YKOXdMDSv4AL4Ztl5Xi-5c4KyY,17869
|
|
14
17
|
abstractcore/core/__init__.py,sha256=2h-86U4QkCQ4gzZ4iRusSTMlkODiUS6tKjZHiEXz6rM,684
|
|
15
18
|
abstractcore/core/enums.py,sha256=BhkVnHC-X1_377JDmqd-2mnem9GdBLqixWlYzlP_FJU,695
|
|
16
19
|
abstractcore/core/factory.py,sha256=UdrNwQAvifvFS3LMjF5KO87m-2n1bJBryTs9pvesYcI,2804
|
|
@@ -29,6 +32,17 @@ abstractcore/media/base.py,sha256=vWdxscqTGTvd3oc4IzzsBTWhUrznWcqM7M_sFyq6-eE,15
|
|
|
29
32
|
abstractcore/media/capabilities.py,sha256=qqKvXGkUT-FNnbFS-EYx8KCT9SZOovO2h4N7ucrHgBA,12844
|
|
30
33
|
abstractcore/media/types.py,sha256=jG-g_2_gzl8eOgEalk9x3Ikhni9GoGfoRjkZWaBhV30,10165
|
|
31
34
|
abstractcore/media/vision_fallback.py,sha256=XcEV5T9ekqd4DRBrhJvxgX5j_puxSlofvuUIfQc2vmg,10629
|
|
35
|
+
abstractcore/media/handlers/__init__.py,sha256=HBqFo15JX1q7RM11076iFQUfPvInLlOizX-LGSznLuI,404
|
|
36
|
+
abstractcore/media/handlers/anthropic_handler.py,sha256=iwcHKnHgHoQGpJKlJmwFJWBvrYg9lAzAnndybwsWZRA,12427
|
|
37
|
+
abstractcore/media/handlers/local_handler.py,sha256=xfMV2Ztre3eUkDno4aSGob96oWUlgicZ3VChs-txjXU,23033
|
|
38
|
+
abstractcore/media/handlers/openai_handler.py,sha256=o0H_WQ_NQt133my55xYQmq6_QFGafghF8sPTrqr1f0Q,9726
|
|
39
|
+
abstractcore/media/processors/__init__.py,sha256=tExCZwVhD9Qzn3D99-zQcU-T1324YtiLkWjIfWLC708,442
|
|
40
|
+
abstractcore/media/processors/image_processor.py,sha256=wj-f1W71ZCs4AZdmyTKZvnMee83GkiXKuZ6QvJwd3Lo,22577
|
|
41
|
+
abstractcore/media/processors/office_processor.py,sha256=MqhLDWNtjHEpiMgpFaf7tbj8iDcTCf_zelWrHZkr7Z4,18580
|
|
42
|
+
abstractcore/media/processors/pdf_processor.py,sha256=qniYt7cTYYPVRi_cS1IsXztOldeY0bqdn7sdbELBU9k,17157
|
|
43
|
+
abstractcore/media/processors/text_processor.py,sha256=E28FtT2_jzsvMIDwZi6aVWuu_pSyAPSBa96fe4YYcU8,21092
|
|
44
|
+
abstractcore/media/utils/__init__.py,sha256=30-CTif91iRKOXJ4njGiduWAt-xp31U7NafMBNvgdO0,460
|
|
45
|
+
abstractcore/media/utils/image_scaler.py,sha256=QrYqoNQc8tzGu7I9Sf_E-Iv7ei2oLh714AGiX3yACNM,11338
|
|
32
46
|
abstractcore/processing/__init__.py,sha256=t6hiakQjcZROT4pw9ZFt2q6fF3vf5VpdMKG2EWlsFd8,540
|
|
33
47
|
abstractcore/processing/basic_extractor.py,sha256=3x-3BdIHgLvqLnLF6K1-P4qVaLIpAnNIIutaJi7lDQM,49832
|
|
34
48
|
abstractcore/processing/basic_judge.py,sha256=tKWJrg_tY4vCHzWgXxz0ZjgLXBYYfpMcpG7vl03hJcM,32218
|
|
@@ -63,10 +77,10 @@ abstractcore/utils/message_preprocessor.py,sha256=GdHkm6tmrgjm3PwHRSCjIsq1XLkbhy
|
|
|
63
77
|
abstractcore/utils/self_fixes.py,sha256=QEDwNTW80iQM4ftfEY3Ghz69F018oKwLM9yeRCYZOvw,5886
|
|
64
78
|
abstractcore/utils/structured_logging.py,sha256=Vm-HviSa42G9DJCWmaEv4a0QG3NMsADD3ictLOs4En0,19952
|
|
65
79
|
abstractcore/utils/token_utils.py,sha256=eLwFmJ68p9WMFD_MHLMmeJRW6Oqx_4hKELB8FNQ2Mnk,21097
|
|
66
|
-
abstractcore/utils/version.py,sha256=
|
|
67
|
-
abstractcore-2.4.
|
|
68
|
-
abstractcore-2.4.
|
|
69
|
-
abstractcore-2.4.
|
|
70
|
-
abstractcore-2.4.
|
|
71
|
-
abstractcore-2.4.
|
|
72
|
-
abstractcore-2.4.
|
|
80
|
+
abstractcore/utils/version.py,sha256=WKRMOS_TRIxsNu-vJqfCYHA335-rya9xBBMzhx_S-Z8,605
|
|
81
|
+
abstractcore-2.4.5.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
|
|
82
|
+
abstractcore-2.4.5.dist-info/METADATA,sha256=9w6Q7NZXePxhRYnmloyJ6Nd2DFQ7YY4294OMF-6QYuY,27596
|
|
83
|
+
abstractcore-2.4.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
84
|
+
abstractcore-2.4.5.dist-info/entry_points.txt,sha256=UdVmchBC_Lt3H4Vlkt5js-QDAkVlBbkCu1yCsswk-KE,454
|
|
85
|
+
abstractcore-2.4.5.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
|
|
86
|
+
abstractcore-2.4.5.dist-info/RECORD,,
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
[console_scripts]
|
|
2
2
|
abstractcore = abstractcore.cli.main:main
|
|
3
|
+
abstractcore-chat = abstractcore.utils.cli:main
|
|
4
|
+
abstractcore-config = abstractcore.cli.main:main
|
|
3
5
|
abstractcore-extractor = abstractcore.apps.extractor:main
|
|
4
6
|
abstractcore-judge = abstractcore.apps.judge:main
|
|
5
7
|
abstractcore-summarizer = abstractcore.apps.summarizer:main
|
|
File without changes
|
|
File without changes
|
|
File without changes
|