qmenta-client 2.0__py3-none-any.whl → 2.1.dev1508__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qmenta/client/Project.py
CHANGED
|
@@ -231,7 +231,7 @@ class Project:
|
|
|
231
231
|
name="",
|
|
232
232
|
input_data_type="qmenta_medical_image_data:3.10",
|
|
233
233
|
add_to_container_id=0,
|
|
234
|
-
chunk_size=2
|
|
234
|
+
chunk_size=2**9,
|
|
235
235
|
split_data=False,
|
|
236
236
|
):
|
|
237
237
|
"""
|
|
@@ -262,14 +262,14 @@ class Project:
|
|
|
262
262
|
a power of 2: 2**x. Default value of x is 9 (chunk_size = 512 kB)
|
|
263
263
|
split_data : bool
|
|
264
264
|
If True, the platform will try to split the uploaded file into
|
|
265
|
-
different sessions. It will be ignored when the ssid
|
|
265
|
+
different sessions. It will be ignored when the ssid or a
|
|
266
|
+
add_to_container_id are given.
|
|
266
267
|
|
|
267
268
|
Returns
|
|
268
269
|
-------
|
|
269
270
|
bool
|
|
270
271
|
True if correctly uploaded, False otherwise.
|
|
271
272
|
"""
|
|
272
|
-
|
|
273
273
|
filename = os.path.split(file_path)[1]
|
|
274
274
|
input_data_type = "offline_analysis:1.0" if result else input_data_type
|
|
275
275
|
|
|
@@ -280,6 +280,8 @@ class Project:
|
|
|
280
280
|
|
|
281
281
|
total_bytes = os.path.getsize(file_path)
|
|
282
282
|
|
|
283
|
+
split_data = self.__assert_split_data(split_data, ssid, add_to_container_id)
|
|
284
|
+
|
|
283
285
|
# making chunks of the file and sending one by one
|
|
284
286
|
logger = logging.getLogger(logger_name)
|
|
285
287
|
with open(file_path, "rb") as file_object:
|
|
@@ -296,10 +298,6 @@ class Project:
|
|
|
296
298
|
response = None
|
|
297
299
|
last_chunk = False
|
|
298
300
|
|
|
299
|
-
if ssid and split_data:
|
|
300
|
-
logger.warning("split-data argument will be ignored because" + " ssid has been specified")
|
|
301
|
-
split_data = False
|
|
302
|
-
|
|
303
301
|
while True:
|
|
304
302
|
data = file_object.read(chunk_size)
|
|
305
303
|
if not data:
|
|
@@ -374,11 +372,33 @@ class Project:
|
|
|
374
372
|
logger.error(error)
|
|
375
373
|
return False
|
|
376
374
|
|
|
377
|
-
message = "Your data was successfully uploaded."
|
|
378
|
-
message += "The uploaded file will be soon processed !"
|
|
375
|
+
message = "Your data was successfully uploaded. The uploaded file will be soon processed !"
|
|
379
376
|
logger.info(message)
|
|
380
377
|
return True
|
|
381
378
|
|
|
379
|
+
def delete_file(self, container_id, filenames):
|
|
380
|
+
"""
|
|
381
|
+
Delete a file or files from a container.
|
|
382
|
+
Can be an input or an output container
|
|
383
|
+
|
|
384
|
+
Parameters
|
|
385
|
+
----------
|
|
386
|
+
container_id : int
|
|
387
|
+
filenames : str or list of str
|
|
388
|
+
|
|
389
|
+
"""
|
|
390
|
+
if not isinstance(filenames, str):
|
|
391
|
+
if isinstance(filenames, list):
|
|
392
|
+
if not all([isinstance(f, str) for f in filenames]):
|
|
393
|
+
raise TypeError("Elements of `filenames` must be str")
|
|
394
|
+
filenames = ";".join(filenames)
|
|
395
|
+
else:
|
|
396
|
+
raise TypeError("`filenames` must be str or list of str")
|
|
397
|
+
|
|
398
|
+
platform.post(
|
|
399
|
+
self._account.auth, "file_manager/delete_files", data={"container_id": container_id, "files": filenames}
|
|
400
|
+
)
|
|
401
|
+
|
|
382
402
|
def upload_mri(self, file_path, subject_name):
|
|
383
403
|
"""
|
|
384
404
|
Upload new MRI data to the subject.
|
|
@@ -477,7 +497,7 @@ class Project:
|
|
|
477
497
|
self._account.auth, "file_manager/download_file", data=params, stream=True
|
|
478
498
|
) as response, open(local_filename, "wb") as f:
|
|
479
499
|
|
|
480
|
-
for chunk in response.iter_content(chunk_size=2
|
|
500
|
+
for chunk in response.iter_content(chunk_size=2**9 * 1024):
|
|
481
501
|
f.write(chunk)
|
|
482
502
|
f.flush()
|
|
483
503
|
|
|
@@ -523,7 +543,7 @@ class Project:
|
|
|
523
543
|
self._account.auth, "file_manager/download_file", data=params, stream=True
|
|
524
544
|
) as response, open(zip_name, "wb") as f:
|
|
525
545
|
|
|
526
|
-
for chunk in response.iter_content(chunk_size=2
|
|
546
|
+
for chunk in response.iter_content(chunk_size=2**9 * 1024):
|
|
527
547
|
f.write(chunk)
|
|
528
548
|
f.flush()
|
|
529
549
|
|
|
@@ -2045,7 +2065,7 @@ class Project:
|
|
|
2045
2065
|
"""
|
|
2046
2066
|
Parse QC (Quality Control) text output into a structured dictionary format.
|
|
2047
2067
|
|
|
2048
|
-
This function takes raw QC text output (
|
|
2068
|
+
This function takes raw QC text output (from the Protocol Adherence analysis)
|
|
2049
2069
|
and parses it into a structured format that separates passed and failed rules,
|
|
2050
2070
|
along with their associated files and conditions.
|
|
2051
2071
|
|
|
@@ -2104,18 +2124,15 @@ class Project:
|
|
|
2104
2124
|
|
|
2105
2125
|
_, text = self.get_qc_status_subject(patient_id=patient_id, subject_name=subject_name, ssid=ssid)
|
|
2106
2126
|
|
|
2107
|
-
result = {
|
|
2108
|
-
"passed": [],
|
|
2109
|
-
"failed": []
|
|
2110
|
-
}
|
|
2127
|
+
result = {"passed": [], "failed": []}
|
|
2111
2128
|
|
|
2112
2129
|
# Split into failed and passed sections
|
|
2113
|
-
sections = re.split(r
|
|
2130
|
+
sections = re.split(r"={10,}\n\n", text)
|
|
2114
2131
|
if len(sections) == 3:
|
|
2115
|
-
failed_section = sections[1].split(
|
|
2132
|
+
failed_section = sections[1].split("=" * 10)[0].strip()
|
|
2116
2133
|
passed_section = sections[2].strip()
|
|
2117
2134
|
else:
|
|
2118
|
-
section = sections[1].split(
|
|
2135
|
+
section = sections[1].split("=" * 10)[0].strip()
|
|
2119
2136
|
if "PASSED QC MESSAGES" in section:
|
|
2120
2137
|
passed_section = section
|
|
2121
2138
|
failed_section = ""
|
|
@@ -2123,85 +2140,13 @@ class Project:
|
|
|
2123
2140
|
failed_section = section
|
|
2124
2141
|
passed_section = ""
|
|
2125
2142
|
|
|
2126
|
-
|
|
2127
|
-
failed_rules = re.split(r
|
|
2128
|
-
|
|
2129
|
-
rule_name = rule_text.split(' ❌')[0].strip()
|
|
2130
|
-
rule_data = {
|
|
2131
|
-
"rule": rule_name,
|
|
2132
|
-
"files": [],
|
|
2133
|
-
"failed_conditions": {}
|
|
2134
|
-
}
|
|
2135
|
-
|
|
2136
|
-
# Extract all file comparisons for this rule
|
|
2137
|
-
file_comparisons = re.split(r'\t- Comparison with file:', rule_text)
|
|
2138
|
-
for comp in file_comparisons[1:]: # Skip first part
|
|
2139
|
-
file_name = comp.split('\n')[0].strip()
|
|
2140
|
-
conditions_match = re.search(
|
|
2141
|
-
r'Conditions:(.*?)(?=\n\t- Comparison|\n\n|$)',
|
|
2142
|
-
comp,
|
|
2143
|
-
re.DOTALL
|
|
2144
|
-
)
|
|
2145
|
-
if not conditions_match:
|
|
2146
|
-
continue
|
|
2147
|
-
|
|
2148
|
-
conditions_text = conditions_match.group(1).strip()
|
|
2149
|
-
# Parse conditions
|
|
2150
|
-
conditions = []
|
|
2151
|
-
for line in conditions_text.split('\n'):
|
|
2152
|
-
line = line.strip()
|
|
2153
|
-
if line.startswith('·'):
|
|
2154
|
-
status = '✔' if '✔' in line else '🚫'
|
|
2155
|
-
condition = re.sub(r'^· [✔🚫]\s*', '', line)
|
|
2156
|
-
conditions.append({
|
|
2157
|
-
"status": "passed" if status == '✔' else "failed",
|
|
2158
|
-
"condition": condition
|
|
2159
|
-
})
|
|
2160
|
-
|
|
2161
|
-
# Add to failed conditions summary
|
|
2162
|
-
for cond in conditions:
|
|
2163
|
-
if cond['status'] == 'failed':
|
|
2164
|
-
cond_text = cond['condition']
|
|
2165
|
-
if cond_text not in rule_data['failed_conditions']:
|
|
2166
|
-
rule_data['failed_conditions'][cond_text] = 0
|
|
2167
|
-
rule_data['failed_conditions'][cond_text] += 1
|
|
2168
|
-
|
|
2169
|
-
rule_data['files'].append({
|
|
2170
|
-
"file": file_name,
|
|
2171
|
-
"conditions": conditions
|
|
2172
|
-
})
|
|
2173
|
-
|
|
2174
|
-
result['failed'].append(rule_data)
|
|
2143
|
+
# Parse failed rules
|
|
2144
|
+
failed_rules = re.split(r"\n ❌ ", failed_section)
|
|
2145
|
+
result = self.__parse_fail_rules(failed_rules, result)
|
|
2175
2146
|
|
|
2176
2147
|
# Parse passed rules
|
|
2177
|
-
passed_rules = re.split(r
|
|
2178
|
-
|
|
2179
|
-
rule_name = rule_text.split(' ✅')[0].strip()
|
|
2180
|
-
rule_data = {
|
|
2181
|
-
"rule": rule_name,
|
|
2182
|
-
"sub_rule": None,
|
|
2183
|
-
"files": []
|
|
2184
|
-
}
|
|
2185
|
-
|
|
2186
|
-
# Get sub-rule
|
|
2187
|
-
sub_rule_match = re.search(r'Sub-rule: (.*?)\n', rule_text)
|
|
2188
|
-
if sub_rule_match:
|
|
2189
|
-
rule_data['sub_rule'] = sub_rule_match.group(1).strip()
|
|
2190
|
-
|
|
2191
|
-
# Get files passed
|
|
2192
|
-
files_passed = re.search(r'List of files passed:(.*?)(?=\n\n|\Z)', rule_text, re.DOTALL)
|
|
2193
|
-
if files_passed:
|
|
2194
|
-
for line in files_passed.group(1).split('\n'):
|
|
2195
|
-
line = line.strip()
|
|
2196
|
-
if line.startswith('·'):
|
|
2197
|
-
file_match = re.match(r'· (.*?) \((\d+)/(\d+)\)', line)
|
|
2198
|
-
if file_match:
|
|
2199
|
-
rule_data['files'].append({
|
|
2200
|
-
"file": file_match.group(1).strip(),
|
|
2201
|
-
"passed_conditions": int(file_match.group(2)),
|
|
2202
|
-
})
|
|
2203
|
-
|
|
2204
|
-
result['passed'].append(rule_data)
|
|
2148
|
+
passed_rules = re.split(r"\n ✅ ", passed_section)
|
|
2149
|
+
result = self.__parse_pass_rules(passed_rules, result)
|
|
2205
2150
|
|
|
2206
2151
|
return result
|
|
2207
2152
|
|
|
@@ -2272,19 +2217,15 @@ class Project:
|
|
|
2272
2217
|
|
|
2273
2218
|
# Initialize statistics
|
|
2274
2219
|
stats = {
|
|
2275
|
-
|
|
2276
|
-
|
|
2220
|
+
"passed_rules": 0,
|
|
2221
|
+
"failed_rules": 0,
|
|
2277
2222
|
"subjects_passed": 0,
|
|
2278
2223
|
"subjects_with_failed": 0,
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
'pass_percentage': 0.0
|
|
2285
|
-
},
|
|
2286
|
-
'condition_failure_rates': defaultdict(lambda: {'count': 0, 'percentage': 0.0}),
|
|
2287
|
-
'rule_success_rates': defaultdict(lambda: {'passed': 0, 'failed': 0, 'success_rate': 0.0}),
|
|
2224
|
+
"num_passed_files_distribution": defaultdict(int),
|
|
2225
|
+
# How many rules have N passed files
|
|
2226
|
+
"file_stats": {"total": 0, "passed": 0, "failed": 0, "pass_percentage": 0.0},
|
|
2227
|
+
"condition_failure_rates": defaultdict(lambda: {"count": 0, "percentage": 0.0}),
|
|
2228
|
+
"rule_success_rates": defaultdict(lambda: {"passed": 0, "failed": 0, "success_rate": 0.0}),
|
|
2288
2229
|
}
|
|
2289
2230
|
|
|
2290
2231
|
total_failures = 0
|
|
@@ -2294,56 +2235,56 @@ class Project:
|
|
|
2294
2235
|
# sum subjects with some failed qc message
|
|
2295
2236
|
stats["subjects_with_failed"] = sum([1 for rules in qc_results_list if rules["failed"]])
|
|
2296
2237
|
# sum rules that have passed
|
|
2297
|
-
stats["passed_rules"] = sum([len(rules[
|
|
2238
|
+
stats["passed_rules"] = sum([len(rules["passed"]) for rules in qc_results_list if rules["failed"]])
|
|
2298
2239
|
# sum rules that have failed
|
|
2299
|
-
stats["failed_rules"] = sum([len(rules[
|
|
2240
|
+
stats["failed_rules"] = sum([len(rules["failed"]) for rules in qc_results_list if rules["failed"]])
|
|
2300
2241
|
|
|
2301
2242
|
for qc_results in qc_results_list:
|
|
2302
2243
|
|
|
2303
2244
|
# Count passed files distribution
|
|
2304
|
-
for rule in qc_results[
|
|
2305
|
-
num_files = len(rule[
|
|
2306
|
-
stats[
|
|
2307
|
-
stats[
|
|
2308
|
-
stats[
|
|
2309
|
-
rule_name = rule[
|
|
2310
|
-
stats[
|
|
2311
|
-
|
|
2312
|
-
for rule in qc_results[
|
|
2313
|
-
stats[
|
|
2314
|
-
stats[
|
|
2315
|
-
for condition, count in rule[
|
|
2245
|
+
for rule in qc_results["passed"]:
|
|
2246
|
+
num_files = len(rule["files"])
|
|
2247
|
+
stats["num_passed_files_distribution"][num_files] += 1
|
|
2248
|
+
stats["file_stats"]["passed"] += len(rule["files"])
|
|
2249
|
+
stats["file_stats"]["total"] += len(rule["files"])
|
|
2250
|
+
rule_name = rule["rule"]
|
|
2251
|
+
stats["rule_success_rates"][rule_name]["passed"] += 1
|
|
2252
|
+
|
|
2253
|
+
for rule in qc_results["failed"]:
|
|
2254
|
+
stats["file_stats"]["total"] += len(rule["files"])
|
|
2255
|
+
stats["file_stats"]["failed"] += len(rule["files"])
|
|
2256
|
+
for condition, count in rule["failed_conditions"].items():
|
|
2316
2257
|
# Extract just the condition text without actual value
|
|
2317
|
-
clean_condition = re.sub(r
|
|
2318
|
-
stats[
|
|
2258
|
+
clean_condition = re.sub(r"\.\s*Actual value:.*$", "", condition)
|
|
2259
|
+
stats["condition_failure_rates"][clean_condition]["count"] += count
|
|
2319
2260
|
total_failures += count
|
|
2320
|
-
rule_name = rule[
|
|
2321
|
-
stats[
|
|
2261
|
+
rule_name = rule["rule"]
|
|
2262
|
+
stats["rule_success_rates"][rule_name]["failed"] += 1
|
|
2322
2263
|
|
|
2323
|
-
if stats[
|
|
2324
|
-
stats[
|
|
2325
|
-
(stats[
|
|
2264
|
+
if stats["file_stats"]["total"] > 0:
|
|
2265
|
+
stats["file_stats"]["pass_percentage"] = round(
|
|
2266
|
+
(stats["file_stats"]["passed"] / stats["file_stats"]["total"]) * 100, 2
|
|
2326
2267
|
)
|
|
2327
2268
|
|
|
2328
2269
|
# Calculate condition failure percentages
|
|
2329
|
-
for condition in stats[
|
|
2270
|
+
for condition in stats["condition_failure_rates"]:
|
|
2330
2271
|
if total_failures > 0:
|
|
2331
|
-
stats[
|
|
2332
|
-
(stats[
|
|
2272
|
+
stats["condition_failure_rates"][condition]["percentage"] = round(
|
|
2273
|
+
(stats["condition_failure_rates"][condition]["count"] / total_failures) * 100, 2
|
|
2333
2274
|
)
|
|
2334
2275
|
|
|
2335
2276
|
# Calculate rule success rates
|
|
2336
|
-
for rule in stats[
|
|
2337
|
-
total = stats[
|
|
2277
|
+
for rule in stats["rule_success_rates"]:
|
|
2278
|
+
total = stats["rule_success_rates"][rule]["passed"] + stats["rule_success_rates"][rule]["failed"]
|
|
2338
2279
|
if total > 0:
|
|
2339
|
-
stats[
|
|
2340
|
-
(stats[
|
|
2280
|
+
stats["rule_success_rates"][rule]["success_rate"] = round(
|
|
2281
|
+
(stats["rule_success_rates"][rule]["passed"] / total) * 100, 2
|
|
2341
2282
|
)
|
|
2342
2283
|
|
|
2343
2284
|
# Convert defaultdict to regular dict for cleaner JSON output
|
|
2344
|
-
stats[
|
|
2345
|
-
stats[
|
|
2346
|
-
stats[
|
|
2285
|
+
stats["num_passed_files_distribution"] = dict(stats["num_passed_files_distribution"])
|
|
2286
|
+
stats["condition_failure_rates"] = dict(stats["condition_failure_rates"])
|
|
2287
|
+
stats["rule_success_rates"] = dict(stats["rule_success_rates"])
|
|
2347
2288
|
|
|
2348
2289
|
return stats
|
|
2349
2290
|
|
|
@@ -2673,3 +2614,107 @@ class Project:
|
|
|
2673
2614
|
value.replace(d_type + ";", "")
|
|
2674
2615
|
file_metadata[d_tag] = {"operation": "in-list", "value": value.replace(d_type + ";", "").split(";")}
|
|
2675
2616
|
return modality, tags, file_metadata
|
|
2617
|
+
|
|
2618
|
+
def __assert_split_data(self, split_data, ssid, add_to_container_id):
|
|
2619
|
+
"""
|
|
2620
|
+
Assert if the split_data parameter is possible to use in regards
|
|
2621
|
+
to the ssid and add_to_container_id parameters during upload.
|
|
2622
|
+
Changes its status to False if needed.
|
|
2623
|
+
|
|
2624
|
+
Parameters
|
|
2625
|
+
----------
|
|
2626
|
+
split_data : Bool
|
|
2627
|
+
split_data parameter from method 'upload_file'.
|
|
2628
|
+
ssid : str
|
|
2629
|
+
Session ID.
|
|
2630
|
+
add_to_container_id : int or bool
|
|
2631
|
+
Container ID or False
|
|
2632
|
+
|
|
2633
|
+
Returns
|
|
2634
|
+
-------
|
|
2635
|
+
split_data : Bool
|
|
2636
|
+
|
|
2637
|
+
"""
|
|
2638
|
+
|
|
2639
|
+
logger = logging.getLogger(logger_name)
|
|
2640
|
+
if ssid and split_data:
|
|
2641
|
+
logger.warning("split-data argument will be ignored because ssid has been specified")
|
|
2642
|
+
split_data = False
|
|
2643
|
+
|
|
2644
|
+
if add_to_container_id and split_data:
|
|
2645
|
+
logger.warning("split-data argument will be ignored because add_to_container_id has been specified")
|
|
2646
|
+
split_data = False
|
|
2647
|
+
|
|
2648
|
+
return split_data
|
|
2649
|
+
|
|
2650
|
+
def __parse_fail_rules(self, failed_rules, result):
|
|
2651
|
+
"""
|
|
2652
|
+
Parse fail rules.
|
|
2653
|
+
"""
|
|
2654
|
+
|
|
2655
|
+
for rule_text in failed_rules[1:]: # Skip first empty part
|
|
2656
|
+
rule_name = rule_text.split(" ❌")[0].strip()
|
|
2657
|
+
rule_data = {"rule": rule_name, "files": [], "failed_conditions": {}}
|
|
2658
|
+
|
|
2659
|
+
# Extract all file comparisons for this rule
|
|
2660
|
+
file_comparisons = re.split(r"\t- Comparison with file:", rule_text)
|
|
2661
|
+
for comp in file_comparisons[1:]: # Skip first part
|
|
2662
|
+
file_name = comp.split("\n")[0].strip()
|
|
2663
|
+
conditions_match = re.search(r"Conditions:(.*?)(?=\n\t- Comparison|\n\n|$)", comp, re.DOTALL)
|
|
2664
|
+
if not conditions_match:
|
|
2665
|
+
continue
|
|
2666
|
+
|
|
2667
|
+
conditions_text = conditions_match.group(1).strip()
|
|
2668
|
+
# Parse conditions
|
|
2669
|
+
conditions = []
|
|
2670
|
+
for line in conditions_text.split("\n"):
|
|
2671
|
+
line = line.strip()
|
|
2672
|
+
if line.startswith("·"):
|
|
2673
|
+
status = "✔" if "✔" in line else "🚫"
|
|
2674
|
+
condition = re.sub(r"^· [✔🚫]\s*", "", line)
|
|
2675
|
+
conditions.append({"status": "passed" if status == "✔" else "failed", "condition": condition})
|
|
2676
|
+
|
|
2677
|
+
# Add to failed conditions summary
|
|
2678
|
+
for cond in conditions:
|
|
2679
|
+
if cond["status"] == "failed":
|
|
2680
|
+
cond_text = cond["condition"]
|
|
2681
|
+
if cond_text not in rule_data["failed_conditions"]:
|
|
2682
|
+
rule_data["failed_conditions"][cond_text] = 0
|
|
2683
|
+
rule_data["failed_conditions"][cond_text] += 1
|
|
2684
|
+
|
|
2685
|
+
rule_data["files"].append({"file": file_name, "conditions": conditions})
|
|
2686
|
+
|
|
2687
|
+
result["failed"].append(rule_data)
|
|
2688
|
+
return result
|
|
2689
|
+
|
|
2690
|
+
def __parse_pass_rules(self, passed_rules, result):
|
|
2691
|
+
"""
|
|
2692
|
+
Parse pass rules.
|
|
2693
|
+
"""
|
|
2694
|
+
|
|
2695
|
+
for rule_text in passed_rules[1:]: # Skip first empty part
|
|
2696
|
+
rule_name = rule_text.split(" ✅")[0].strip()
|
|
2697
|
+
rule_data = {"rule": rule_name, "sub_rule": None, "files": []}
|
|
2698
|
+
|
|
2699
|
+
# Get sub-rule
|
|
2700
|
+
sub_rule_match = re.search(r"Sub-rule: (.*?)\n", rule_text)
|
|
2701
|
+
if sub_rule_match:
|
|
2702
|
+
rule_data["sub_rule"] = sub_rule_match.group(1).strip()
|
|
2703
|
+
|
|
2704
|
+
# Get files passed
|
|
2705
|
+
files_passed = re.search(r"List of files passed:(.*?)(?=\n\n|\Z)", rule_text, re.DOTALL)
|
|
2706
|
+
if files_passed:
|
|
2707
|
+
for line in files_passed.group(1).split("\n"):
|
|
2708
|
+
line = line.strip()
|
|
2709
|
+
if line.startswith("·"):
|
|
2710
|
+
file_match = re.match(r"· (.*?) \((\d+)/(\d+)\)", line)
|
|
2711
|
+
if file_match:
|
|
2712
|
+
rule_data["files"].append(
|
|
2713
|
+
{
|
|
2714
|
+
"file": file_match.group(1).strip(),
|
|
2715
|
+
"passed_conditions": int(file_match.group(2)),
|
|
2716
|
+
}
|
|
2717
|
+
)
|
|
2718
|
+
|
|
2719
|
+
result["passed"].append(rule_data)
|
|
2720
|
+
return result
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
qmenta/__init__.py,sha256=ED6jHcYiuYpr_0vjGz0zx2lrrmJT9sDJCzIljoDfmlM,65
|
|
2
2
|
qmenta/client/Account.py,sha256=7BOWHtRbHdfpBYQqv9v2m2Fag13pExZSxFsjDA7UsW0,9500
|
|
3
3
|
qmenta/client/File.py,sha256=iCrzrd7rIfjjW2AgMgUoK-ZF2wf-95wCcPKxKw6PGyg,4816
|
|
4
|
-
qmenta/client/Project.py,sha256=
|
|
4
|
+
qmenta/client/Project.py,sha256=TP_CHoyKQsqlQ7AALnjx2kwcgQ-5os26lJGH5XW--5E,101305
|
|
5
5
|
qmenta/client/Subject.py,sha256=b5sg9UFtn11bmPM-xFXP8aehOm_HGxnhgT7IPKbrZnE,8688
|
|
6
6
|
qmenta/client/__init__.py,sha256=Mtqe4zf8n3wuwMXSALENQgp5atQY5VcsyXWs2hjBs28,133
|
|
7
7
|
qmenta/client/utils.py,sha256=vWUAW0r9yDetdlwNo86sdzKn03FNGvwa7D9UtOA3TEc,2419
|
|
8
|
-
qmenta_client-2.
|
|
9
|
-
qmenta_client-2.
|
|
10
|
-
qmenta_client-2.
|
|
8
|
+
qmenta_client-2.1.dev1508.dist-info/METADATA,sha256=zV7x_6f673FhaUoFQ8f_5f42D4VGxojIx0mCwuQP8cg,672
|
|
9
|
+
qmenta_client-2.1.dev1508.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
|
10
|
+
qmenta_client-2.1.dev1508.dist-info/RECORD,,
|
|
File without changes
|