@aj-archipelago/cortex 1.4.2 → 1.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/config.js +1 -1
- package/helper-apps/cortex-autogen2/.dockerignore +1 -0
- package/helper-apps/cortex-autogen2/Dockerfile +6 -10
- package/helper-apps/cortex-autogen2/Dockerfile.worker +2 -0
- package/helper-apps/cortex-autogen2/agents.py +203 -2
- package/helper-apps/cortex-autogen2/main.py +1 -1
- package/helper-apps/cortex-autogen2/pyproject.toml +12 -0
- package/helper-apps/cortex-autogen2/requirements.txt +14 -0
- package/helper-apps/cortex-autogen2/services/redis_publisher.py +1 -1
- package/helper-apps/cortex-autogen2/services/run_analyzer.py +1 -1
- package/helper-apps/cortex-autogen2/task_processor.py +431 -229
- package/helper-apps/cortex-autogen2/test_entity_fetcher.py +305 -0
- package/helper-apps/cortex-autogen2/tests/README.md +240 -0
- package/helper-apps/cortex-autogen2/tests/TEST_REPORT.md +342 -0
- package/helper-apps/cortex-autogen2/tests/__init__.py +8 -0
- package/helper-apps/cortex-autogen2/tests/analysis/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/tests/analysis/improvement_suggester.py +224 -0
- package/helper-apps/cortex-autogen2/tests/analysis/trend_analyzer.py +211 -0
- package/helper-apps/cortex-autogen2/tests/cli/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/tests/cli/run_tests.py +296 -0
- package/helper-apps/cortex-autogen2/tests/collectors/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/tests/collectors/log_collector.py +252 -0
- package/helper-apps/cortex-autogen2/tests/collectors/progress_collector.py +182 -0
- package/helper-apps/cortex-autogen2/tests/conftest.py +15 -0
- package/helper-apps/cortex-autogen2/tests/database/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/tests/database/repository.py +501 -0
- package/helper-apps/cortex-autogen2/tests/database/schema.sql +108 -0
- package/helper-apps/cortex-autogen2/tests/evaluators/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/tests/evaluators/llm_scorer.py +294 -0
- package/helper-apps/cortex-autogen2/tests/evaluators/prompts.py +250 -0
- package/helper-apps/cortex-autogen2/tests/evaluators/wordcloud_validator.py +168 -0
- package/helper-apps/cortex-autogen2/tests/metrics/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/tests/metrics/collector.py +155 -0
- package/helper-apps/cortex-autogen2/tests/orchestrator.py +576 -0
- package/helper-apps/cortex-autogen2/tests/test_cases.yaml +279 -0
- package/helper-apps/cortex-autogen2/tests/test_data.db +0 -0
- package/helper-apps/cortex-autogen2/tests/utils/__init__.py +3 -0
- package/helper-apps/cortex-autogen2/tests/utils/connectivity.py +112 -0
- package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +74 -24
- package/helper-apps/cortex-autogen2/tools/entity_api_registry.json +38 -0
- package/helper-apps/cortex-autogen2/tools/file_tools.py +1 -1
- package/helper-apps/cortex-autogen2/tools/search_tools.py +436 -238
- package/helper-apps/cortex-file-handler/package-lock.json +2 -2
- package/helper-apps/cortex-file-handler/package.json +1 -1
- package/helper-apps/cortex-file-handler/scripts/setup-test-containers.js +4 -5
- package/helper-apps/cortex-file-handler/src/blobHandler.js +36 -144
- package/helper-apps/cortex-file-handler/src/services/FileConversionService.js +5 -3
- package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +34 -1
- package/helper-apps/cortex-file-handler/src/services/storage/GCSStorageProvider.js +22 -0
- package/helper-apps/cortex-file-handler/src/services/storage/LocalStorageProvider.js +28 -1
- package/helper-apps/cortex-file-handler/src/services/storage/StorageFactory.js +29 -4
- package/helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js +11 -0
- package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +1 -1
- package/helper-apps/cortex-file-handler/tests/blobHandler.test.js +3 -2
- package/helper-apps/cortex-file-handler/tests/checkHashShortLived.test.js +8 -1
- package/helper-apps/cortex-file-handler/tests/containerConversionFlow.test.js +5 -2
- package/helper-apps/cortex-file-handler/tests/containerNameParsing.test.js +14 -7
- package/helper-apps/cortex-file-handler/tests/containerParameterFlow.test.js +5 -2
- package/helper-apps/cortex-file-handler/tests/storage/StorageFactory.test.js +31 -19
- package/package.json +1 -1
- package/server/modelExecutor.js +4 -0
- package/server/plugins/claude4VertexPlugin.js +540 -0
- package/server/plugins/openAiWhisperPlugin.js +43 -2
- package/tests/integration/rest/vendors/claude_streaming.test.js +121 -0
- package/tests/unit/plugins/claude4VertexPlugin.test.js +462 -0
- package/tests/unit/plugins/claude4VertexToolConversion.test.js +413 -0
- package/helper-apps/cortex-autogen/.funcignore +0 -8
- package/helper-apps/cortex-autogen/Dockerfile +0 -10
- package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +0 -6
- package/helper-apps/cortex-autogen/agents.py +0 -493
- package/helper-apps/cortex-autogen/agents_extra.py +0 -14
- package/helper-apps/cortex-autogen/config.py +0 -18
- package/helper-apps/cortex-autogen/data_operations.py +0 -29
- package/helper-apps/cortex-autogen/function_app.py +0 -44
- package/helper-apps/cortex-autogen/host.json +0 -15
- package/helper-apps/cortex-autogen/main.py +0 -38
- package/helper-apps/cortex-autogen/prompts.py +0 -196
- package/helper-apps/cortex-autogen/prompts_extra.py +0 -5
- package/helper-apps/cortex-autogen/requirements.txt +0 -9
- package/helper-apps/cortex-autogen/search.py +0 -85
- package/helper-apps/cortex-autogen/test.sh +0 -40
- package/helper-apps/cortex-autogen/tools/sasfileuploader.py +0 -66
- package/helper-apps/cortex-autogen/utils.py +0 -88
- package/helper-apps/cortex-autogen2/DigiCertGlobalRootCA.crt.pem +0 -22
- package/helper-apps/cortex-autogen2/poetry.lock +0 -3652
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""Wordcloud quality validation - checks for stop words and rendering issues."""
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
import os
|
|
5
|
+
from typing import List, Tuple
|
|
6
|
+
|
|
7
|
+
# Same stopword lists as in agent prompt
|
|
8
|
+
ENGLISH_STOPWORDS = {
|
|
9
|
+
# Articles, determiners
|
|
10
|
+
'a', 'an', 'the', 'this', 'that', 'these', 'those',
|
|
11
|
+
# Pronouns
|
|
12
|
+
'i', 'you', 'he', 'she', 'it', 'we', 'they', 'me', 'him', 'her', 'us', 'them',
|
|
13
|
+
'my', 'your', 'his', 'its', 'our', 'their', 'mine', 'yours', 'hers', 'ours', 'theirs',
|
|
14
|
+
'myself', 'yourself', 'himself', 'herself', 'itself', 'ourselves', 'themselves',
|
|
15
|
+
# Prepositions
|
|
16
|
+
'in', 'on', 'at', 'to', 'for', 'of', 'from', 'by', 'with', 'about', 'into', 'through',
|
|
17
|
+
'during', 'before', 'after', 'above', 'below', 'between', 'under', 'over', 'against',
|
|
18
|
+
# Conjunctions
|
|
19
|
+
'and', 'but', 'or', 'nor', 'so', 'yet', 'because', 'although', 'while', 'if', 'unless',
|
|
20
|
+
'until', 'when', 'where', 'whether', 'than', 'as', 'since',
|
|
21
|
+
# Verbs (common)
|
|
22
|
+
'is', 'am', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having',
|
|
23
|
+
'do', 'does', 'did', 'doing', 'will', 'would', 'should', 'could', 'may', 'might', 'must',
|
|
24
|
+
'can', 'shall',
|
|
25
|
+
# Adverbs
|
|
26
|
+
'not', 'no', 'yes', 'very', 'too', 'also', 'just', 'only', 'even', 'now', 'then',
|
|
27
|
+
'here', 'there', 'how', 'why', 'what', 'who', 'which', 'whom', 'whose',
|
|
28
|
+
# Other common words
|
|
29
|
+
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such',
|
|
30
|
+
'own', 'same', 'out', 'up', 'down', 'off', 'again', 'further', 'once'
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
ARABIC_STOPWORDS = {
|
|
34
|
+
# Prepositions and particles
|
|
35
|
+
'في', 'من', 'إلى', 'على', 'عن', 'الى', 'الي', 'مع', 'ضد', 'حول', 'خلال',
|
|
36
|
+
'عند', 'لدى', 'منذ', 'حتى', 'ب', 'ل', 'ك',
|
|
37
|
+
# Conjunctions
|
|
38
|
+
'و', 'أو', 'لكن', 'لكن', 'بل', 'إذا', 'لو', 'ف',
|
|
39
|
+
# Pronouns
|
|
40
|
+
'هو', 'هي', 'هم', 'هن', 'أنت', 'أنتم', 'أنا', 'نحن', 'أنتن',
|
|
41
|
+
'ه', 'ها', 'هما', 'كم', 'كما', 'هذا', 'هذه', 'ذلك', 'تلك',
|
|
42
|
+
# Verbs (common auxiliaries)
|
|
43
|
+
'كان', 'يكون', 'تكون', 'كانت', 'ليس', 'ليست', 'كن',
|
|
44
|
+
# Question words
|
|
45
|
+
'ما', 'ماذا', 'من', 'متى', 'أين', 'كيف', 'لماذا', 'هل', 'أي',
|
|
46
|
+
# Determiners
|
|
47
|
+
'ال', 'كل', 'بعض', 'جميع', 'أحد', 'إحدى',
|
|
48
|
+
# Time/sequence
|
|
49
|
+
'قبل', 'بعد', 'ثم', 'الآن', 'أمس', 'اليوم', 'غدا',
|
|
50
|
+
# Common words
|
|
51
|
+
'أن', 'ان', 'إن', 'لا', 'لم', 'لن', 'قد', 'التي', 'الذي', 'اللذان',
|
|
52
|
+
'اللتان', 'الذين', 'اللاتي', 'اللواتي', 'عن', 'عند', 'غير', 'بعد',
|
|
53
|
+
'بين', 'ذات', 'صار', 'أصبح', 'أضحى', 'ظل', 'أمسى', 'بات', 'ما زال'
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def validate_wordcloud_quality(csv_file_path: str, language: str) -> Tuple[bool, List[str], int]:
|
|
58
|
+
"""
|
|
59
|
+
Validate wordcloud CSV for stop word contamination.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
csv_file_path: Path to the CSV file with word frequencies
|
|
63
|
+
language: 'en' for English, 'ar' for Arabic
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
(is_valid, stopwords_found, score_deduction)
|
|
67
|
+
"""
|
|
68
|
+
if not os.path.exists(csv_file_path):
|
|
69
|
+
return False, [f"CSV file not found: {csv_file_path}"], 50
|
|
70
|
+
|
|
71
|
+
stopwords_set = ENGLISH_STOPWORDS if language == 'en' else ARABIC_STOPWORDS
|
|
72
|
+
stopwords_found = []
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
with open(csv_file_path, 'r', encoding='utf-8') as f:
|
|
76
|
+
reader = csv.DictReader(f)
|
|
77
|
+
for row in reader:
|
|
78
|
+
word = row.get('token', row.get('word', '')).strip()
|
|
79
|
+
if word.lower() in stopwords_set or word in stopwords_set:
|
|
80
|
+
count = row.get('count', row.get('frequency', '?'))
|
|
81
|
+
stopwords_found.append(f"{word} ({count} occurrences)")
|
|
82
|
+
except Exception as e:
|
|
83
|
+
return False, [f"Error reading CSV: {str(e)}"], 30
|
|
84
|
+
|
|
85
|
+
# Score deduction: -5 points per stop word found (max -50)
|
|
86
|
+
deduction = min(len(stopwords_found) * 5, 50)
|
|
87
|
+
is_valid = len(stopwords_found) == 0
|
|
88
|
+
|
|
89
|
+
return is_valid, stopwords_found, deduction
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def validate_arabic_rendering(image_path: str) -> Tuple[bool, str]:
|
|
93
|
+
"""
|
|
94
|
+
Check if Arabic wordcloud image has proper text rendering (not boxes).
|
|
95
|
+
|
|
96
|
+
This is a heuristic check - looks for:
|
|
97
|
+
- Image variance (boxes = low variance, text = high variance)
|
|
98
|
+
- Proper resolution
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
image_path: Path to the wordcloud PNG image
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
(is_valid, message)
|
|
105
|
+
"""
|
|
106
|
+
if not os.path.exists(image_path):
|
|
107
|
+
return False, f"Image file not found: {image_path}"
|
|
108
|
+
|
|
109
|
+
# Simplified check - in production, use OCR or visual analysis
|
|
110
|
+
# For now, just verify file size is reasonable
|
|
111
|
+
try:
|
|
112
|
+
file_size = os.path.getsize(image_path)
|
|
113
|
+
|
|
114
|
+
if file_size < 50000: # Less than 50KB suggests rendering failure
|
|
115
|
+
return False, f"Image suspiciously small ({file_size} bytes) - possible rendering failure"
|
|
116
|
+
|
|
117
|
+
return True, "OK"
|
|
118
|
+
except Exception as e:
|
|
119
|
+
return False, f"Error checking image: {str(e)}"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def get_wordcloud_validation_report(output_dir: str) -> dict:
|
|
123
|
+
"""
|
|
124
|
+
Generate a comprehensive validation report for all wordcloud files in the output directory.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
output_dir: Directory containing wordcloud files
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Dictionary with validation results for each file
|
|
131
|
+
"""
|
|
132
|
+
report = {
|
|
133
|
+
'aje_english': {'valid': True, 'issues': [], 'deduction': 0},
|
|
134
|
+
'aja_arabic': {'valid': True, 'issues': [], 'deduction': 0},
|
|
135
|
+
'total_deduction': 0
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
# Check English (AJE) wordcloud
|
|
139
|
+
aje_csv = os.path.join(output_dir, 'aje_freq.csv')
|
|
140
|
+
if os.path.exists(aje_csv):
|
|
141
|
+
is_valid, stops_found, deduction = validate_wordcloud_quality(aje_csv, 'en')
|
|
142
|
+
if not is_valid:
|
|
143
|
+
report['aje_english']['valid'] = False
|
|
144
|
+
report['aje_english']['issues'] = stops_found[:10] # First 10
|
|
145
|
+
report['aje_english']['deduction'] = deduction
|
|
146
|
+
report['total_deduction'] += deduction
|
|
147
|
+
|
|
148
|
+
# Check Arabic (AJA) wordcloud CSV
|
|
149
|
+
aja_csv = os.path.join(output_dir, 'aja_freq.csv')
|
|
150
|
+
if os.path.exists(aja_csv):
|
|
151
|
+
is_valid, stops_found, deduction = validate_wordcloud_quality(aja_csv, 'ar')
|
|
152
|
+
if not is_valid:
|
|
153
|
+
report['aja_arabic']['valid'] = False
|
|
154
|
+
report['aja_arabic']['issues'] = stops_found[:10] # First 10
|
|
155
|
+
report['aja_arabic']['deduction'] = deduction
|
|
156
|
+
report['total_deduction'] += deduction
|
|
157
|
+
|
|
158
|
+
# Check Arabic rendering quality
|
|
159
|
+
aja_img = os.path.join(output_dir, 'aja_wordcloud.png')
|
|
160
|
+
if os.path.exists(aja_img):
|
|
161
|
+
is_valid, msg = validate_arabic_rendering(aja_img)
|
|
162
|
+
if not is_valid:
|
|
163
|
+
report['aja_arabic']['valid'] = False
|
|
164
|
+
report['aja_arabic']['issues'].append(f"Rendering: {msg}")
|
|
165
|
+
report['aja_arabic']['deduction'] += 20
|
|
166
|
+
report['total_deduction'] += 20
|
|
167
|
+
|
|
168
|
+
return report
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Performance and quality metrics collectors."""
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Metrics collector for test performance analysis.
|
|
3
|
+
|
|
4
|
+
Calculates latency, frequency, and quality metrics from test run data.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import List, Dict, Optional
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MetricsCollector:
|
|
15
|
+
"""Collects and calculates performance metrics from test data."""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def calculate_metrics(
|
|
19
|
+
test_run_data: Dict,
|
|
20
|
+
progress_updates: List[Dict],
|
|
21
|
+
logs: List[Dict],
|
|
22
|
+
files_created: List[Dict]
|
|
23
|
+
) -> Dict:
|
|
24
|
+
"""
|
|
25
|
+
Calculate comprehensive metrics from test run data.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
test_run_data: Test run information (started_at, completed_at, etc.)
|
|
29
|
+
progress_updates: List of progress updates
|
|
30
|
+
logs: List of log entries
|
|
31
|
+
files_created: List of files created
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Dictionary with calculated metrics
|
|
35
|
+
"""
|
|
36
|
+
logger.info("📈 Calculating metrics...")
|
|
37
|
+
|
|
38
|
+
metrics = {}
|
|
39
|
+
|
|
40
|
+
# Time metrics
|
|
41
|
+
metrics.update(MetricsCollector._calculate_time_metrics(test_run_data, progress_updates))
|
|
42
|
+
|
|
43
|
+
# Progress update metrics
|
|
44
|
+
metrics.update(MetricsCollector._calculate_progress_metrics(progress_updates))
|
|
45
|
+
|
|
46
|
+
# Log metrics
|
|
47
|
+
metrics.update(MetricsCollector._calculate_log_metrics(logs))
|
|
48
|
+
|
|
49
|
+
# File metrics
|
|
50
|
+
metrics.update(MetricsCollector._calculate_file_metrics(files_created))
|
|
51
|
+
|
|
52
|
+
logger.info(f" Time to completion: {metrics.get('time_to_completion', 0):.1f}s")
|
|
53
|
+
logger.info(f" Progress updates: {metrics.get('total_progress_updates', 0)}")
|
|
54
|
+
logger.info(f" Files created: {metrics.get('files_created', 0)}")
|
|
55
|
+
logger.info(f" Errors: {metrics.get('errors_count', 0)}")
|
|
56
|
+
|
|
57
|
+
return metrics
|
|
58
|
+
|
|
59
|
+
@staticmethod
|
|
60
|
+
def _calculate_time_metrics(test_run_data: Dict, progress_updates: List[Dict]) -> Dict:
|
|
61
|
+
"""Calculate timing-related metrics."""
|
|
62
|
+
started_at_str = test_run_data.get('started_at')
|
|
63
|
+
completed_at_str = test_run_data.get('completed_at')
|
|
64
|
+
|
|
65
|
+
if not started_at_str:
|
|
66
|
+
return {
|
|
67
|
+
'time_to_first_progress': 0,
|
|
68
|
+
'time_to_completion': 0
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
started_at = datetime.fromisoformat(started_at_str)
|
|
72
|
+
|
|
73
|
+
# Time to first progress update
|
|
74
|
+
time_to_first_progress = 0
|
|
75
|
+
if progress_updates:
|
|
76
|
+
first_update_time = datetime.fromisoformat(progress_updates[0]['timestamp'])
|
|
77
|
+
time_to_first_progress = (first_update_time - started_at).total_seconds()
|
|
78
|
+
|
|
79
|
+
# Time to completion
|
|
80
|
+
time_to_completion = 0
|
|
81
|
+
if completed_at_str:
|
|
82
|
+
completed_at = datetime.fromisoformat(completed_at_str)
|
|
83
|
+
time_to_completion = (completed_at - started_at).total_seconds()
|
|
84
|
+
elif progress_updates:
|
|
85
|
+
# Use last progress update time if no completion time
|
|
86
|
+
last_update_time = datetime.fromisoformat(progress_updates[-1]['timestamp'])
|
|
87
|
+
time_to_completion = (last_update_time - started_at).total_seconds()
|
|
88
|
+
|
|
89
|
+
return {
|
|
90
|
+
'time_to_first_progress': time_to_first_progress,
|
|
91
|
+
'time_to_completion': time_to_completion
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
@staticmethod
|
|
95
|
+
def _calculate_progress_metrics(progress_updates: List[Dict]) -> Dict:
|
|
96
|
+
"""Calculate progress update frequency metrics."""
|
|
97
|
+
if not progress_updates:
|
|
98
|
+
return {
|
|
99
|
+
'total_progress_updates': 0,
|
|
100
|
+
'avg_update_interval': 0,
|
|
101
|
+
'min_update_interval': 0,
|
|
102
|
+
'max_update_interval': 0
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# Calculate intervals between updates
|
|
106
|
+
intervals = []
|
|
107
|
+
for i in range(1, len(progress_updates)):
|
|
108
|
+
prev_time = datetime.fromisoformat(progress_updates[i-1]['timestamp'])
|
|
109
|
+
curr_time = datetime.fromisoformat(progress_updates[i]['timestamp'])
|
|
110
|
+
interval = (curr_time - prev_time).total_seconds()
|
|
111
|
+
intervals.append(interval)
|
|
112
|
+
|
|
113
|
+
avg_interval = sum(intervals) / len(intervals) if intervals else 0
|
|
114
|
+
min_interval = min(intervals) if intervals else 0
|
|
115
|
+
max_interval = max(intervals) if intervals else 0
|
|
116
|
+
|
|
117
|
+
return {
|
|
118
|
+
'total_progress_updates': len(progress_updates),
|
|
119
|
+
'avg_update_interval': avg_interval,
|
|
120
|
+
'min_update_interval': min_interval,
|
|
121
|
+
'max_update_interval': max_interval
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
@staticmethod
|
|
125
|
+
def _calculate_log_metrics(logs: List[Dict]) -> Dict:
|
|
126
|
+
"""Calculate log-related metrics."""
|
|
127
|
+
if not logs:
|
|
128
|
+
return {
|
|
129
|
+
'errors_count': 0,
|
|
130
|
+
'warnings_count': 0
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
errors = sum(1 for log in logs if log.get('level') == 'ERROR')
|
|
134
|
+
warnings = sum(1 for log in logs if log.get('level') in ('WARNING', 'WARN'))
|
|
135
|
+
|
|
136
|
+
return {
|
|
137
|
+
'errors_count': errors,
|
|
138
|
+
'warnings_count': warnings
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
@staticmethod
|
|
142
|
+
def _calculate_file_metrics(files_created: List[Dict]) -> Dict:
|
|
143
|
+
"""Calculate file creation metrics."""
|
|
144
|
+
if not files_created:
|
|
145
|
+
return {
|
|
146
|
+
'files_created': 0,
|
|
147
|
+
'sas_urls_provided': 0
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
sas_urls = sum(1 for file in files_created if file.get('sas_url'))
|
|
151
|
+
|
|
152
|
+
return {
|
|
153
|
+
'files_created': len(files_created),
|
|
154
|
+
'sas_urls_provided': sas_urls
|
|
155
|
+
}
|