ara-cli 0.1.9.50__py3-none-any.whl → 0.1.9.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ara-cli might be problematic. Click here for more details.
- ara_cli/__main__.py +3 -1
- ara_cli/analyse_artefacts.py +133 -0
- ara_cli/ara_command_action.py +71 -50
- ara_cli/ara_command_parser.py +5 -0
- ara_cli/ara_config.py +65 -38
- ara_cli/artefact_lister.py +60 -49
- ara_cli/artefact_models/artefact_model.py +9 -7
- ara_cli/artefact_models/feature_artefact_model.py +4 -1
- ara_cli/artefact_reader.py +104 -57
- ara_cli/artefact_scan.py +46 -0
- ara_cli/file_classifier.py +21 -13
- ara_cli/prompt_extractor.py +10 -2
- ara_cli/tag_extractor.py +6 -16
- ara_cli/templates/specification_breakdown_files/template.concept.md +12 -14
- ara_cli/tests/test_ara_command_action.py +242 -108
- ara_cli/tests/test_artefact_lister.py +552 -183
- ara_cli/tests/test_artefact_reader.py +18 -46
- ara_cli/tests/test_artefact_scan.py +126 -0
- ara_cli/tests/test_file_classifier.py +68 -29
- ara_cli/tests/test_tag_extractor.py +42 -61
- ara_cli/version.py +1 -1
- {ara_cli-0.1.9.50.dist-info → ara_cli-0.1.9.51.dist-info}/METADATA +1 -1
- {ara_cli-0.1.9.50.dist-info → ara_cli-0.1.9.51.dist-info}/RECORD +26 -23
- {ara_cli-0.1.9.50.dist-info → ara_cli-0.1.9.51.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.9.50.dist-info → ara_cli-0.1.9.51.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.9.50.dist-info → ara_cli-0.1.9.51.dist-info}/top_level.txt +0 -0
ara_cli/artefact_lister.py
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
from ara_cli.file_classifier import FileClassifier
|
|
2
|
-
from ara_cli.template_manager import DirectoryNavigator
|
|
3
2
|
from ara_cli.artefact_reader import ArtefactReader
|
|
4
3
|
from ara_cli.file_lister import list_files_in_directory
|
|
5
|
-
from ara_cli.
|
|
4
|
+
from ara_cli.artefact_models.artefact_model import Artefact
|
|
6
5
|
from ara_cli.list_filter import ListFilter, filter_list
|
|
7
6
|
from ara_cli.artefact_fuzzy_search import suggest_close_name_matches
|
|
8
7
|
import os
|
|
@@ -13,16 +12,26 @@ class ArtefactLister:
|
|
|
13
12
|
self.file_system = file_system or os
|
|
14
13
|
|
|
15
14
|
@staticmethod
|
|
16
|
-
def artefact_content_retrieval(artefact):
|
|
17
|
-
|
|
15
|
+
def artefact_content_retrieval(artefact: Artefact):
|
|
16
|
+
content = artefact.serialize()
|
|
17
|
+
return content
|
|
18
18
|
|
|
19
19
|
@staticmethod
|
|
20
|
-
def artefact_path_retrieval(artefact):
|
|
20
|
+
def artefact_path_retrieval(artefact: Artefact):
|
|
21
21
|
return artefact.file_path
|
|
22
22
|
|
|
23
23
|
@staticmethod
|
|
24
|
-
def artefact_tags_retrieval(artefact):
|
|
25
|
-
|
|
24
|
+
def artefact_tags_retrieval(artefact: Artefact):
|
|
25
|
+
final_tags = []
|
|
26
|
+
|
|
27
|
+
if not artefact:
|
|
28
|
+
return []
|
|
29
|
+
|
|
30
|
+
final_tags.extend([f"user_{user}" for user in artefact.users])
|
|
31
|
+
final_tags.append(artefact.status)
|
|
32
|
+
final_tags.extend(artefact.tags)
|
|
33
|
+
|
|
34
|
+
return final_tags
|
|
26
35
|
|
|
27
36
|
def filter_artefacts(self, classified_files: list, list_filter: ListFilter):
|
|
28
37
|
filtered_list = filter_list(
|
|
@@ -40,17 +49,15 @@ class ArtefactLister:
|
|
|
40
49
|
navigate_to_target=False,
|
|
41
50
|
list_filter: ListFilter | None = None
|
|
42
51
|
):
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
if navigate_to_target:
|
|
46
|
-
navigator.navigate_to_target()
|
|
52
|
+
artefact_list = ArtefactReader.read_artefacts(tags=tags)
|
|
53
|
+
artefact_list = self.filter_artefacts(artefact_list, list_filter)
|
|
47
54
|
|
|
55
|
+
filtered_artefact_list = {
|
|
56
|
+
key: [artefact for artefact in value if artefact is not None]
|
|
57
|
+
for key, value in artefact_list.items()
|
|
58
|
+
}
|
|
48
59
|
file_classifier = FileClassifier(self.file_system)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
classified_files = self.filter_artefacts(classified_files, list_filter)
|
|
52
|
-
|
|
53
|
-
file_classifier.print_classified_files(classified_files)
|
|
60
|
+
file_classifier.print_classified_files(filtered_artefact_list)
|
|
54
61
|
|
|
55
62
|
def list_branch(
|
|
56
63
|
self,
|
|
@@ -59,21 +66,23 @@ class ArtefactLister:
|
|
|
59
66
|
list_filter: ListFilter | None = None
|
|
60
67
|
):
|
|
61
68
|
file_classifier = FileClassifier(os)
|
|
69
|
+
classified_artefacts = file_classifier.classify_files_new()
|
|
70
|
+
artefact_info = classified_artefacts.get(classifier, [])
|
|
71
|
+
matching_artefact_info = [p for p in artefact_info if p["title"] == artefact_name]
|
|
62
72
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
73
|
+
if not matching_artefact_info:
|
|
74
|
+
suggest_close_name_matches(
|
|
75
|
+
artefact_name,
|
|
76
|
+
[info["title"] for info in artefact_info]
|
|
77
|
+
)
|
|
68
78
|
|
|
69
79
|
artefacts_by_classifier = {classifier: []}
|
|
70
80
|
ArtefactReader.step_through_value_chain(
|
|
71
81
|
artefact_name=artefact_name,
|
|
72
82
|
classifier=classifier,
|
|
73
|
-
artefacts_by_classifier=artefacts_by_classifier
|
|
83
|
+
artefacts_by_classifier=artefacts_by_classifier,
|
|
74
84
|
)
|
|
75
85
|
artefacts_by_classifier = self.filter_artefacts(artefacts_by_classifier, list_filter)
|
|
76
|
-
|
|
77
86
|
file_classifier.print_classified_files(artefacts_by_classifier)
|
|
78
87
|
|
|
79
88
|
def list_children(
|
|
@@ -83,22 +92,24 @@ class ArtefactLister:
|
|
|
83
92
|
list_filter: ListFilter | None = None
|
|
84
93
|
):
|
|
85
94
|
file_classifier = FileClassifier(os)
|
|
95
|
+
classified_artefacts = file_classifier.classify_files_new()
|
|
96
|
+
artefact_info = classified_artefacts.get(classifier, [])
|
|
97
|
+
matching_artefact_info = [p for p in artefact_info if p["title"] == artefact_name]
|
|
86
98
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
99
|
+
if not matching_artefact_info:
|
|
100
|
+
suggest_close_name_matches(
|
|
101
|
+
artefact_name,
|
|
102
|
+
[info["title"] for info in artefact_info]
|
|
103
|
+
)
|
|
92
104
|
|
|
93
105
|
child_artefacts = ArtefactReader.find_children(
|
|
94
|
-
|
|
95
|
-
|
|
106
|
+
artefact_name=artefact_name,
|
|
107
|
+
classifier=classifier
|
|
96
108
|
)
|
|
109
|
+
|
|
97
110
|
child_artefacts = self.filter_artefacts(child_artefacts, list_filter)
|
|
98
111
|
|
|
99
|
-
file_classifier.print_classified_files(
|
|
100
|
-
files_by_classifier=child_artefacts
|
|
101
|
-
)
|
|
112
|
+
file_classifier.print_classified_files(child_artefacts)
|
|
102
113
|
|
|
103
114
|
def list_data(
|
|
104
115
|
self,
|
|
@@ -107,21 +118,21 @@ class ArtefactLister:
|
|
|
107
118
|
list_filter: ListFilter | None = None
|
|
108
119
|
):
|
|
109
120
|
file_classifier = FileClassifier(os)
|
|
110
|
-
|
|
111
121
|
classified_artefacts = file_classifier.classify_files()
|
|
112
|
-
|
|
113
|
-
if artefact_name
|
|
114
|
-
|
|
122
|
+
artefact_paths = classified_artefacts.get(classifier, [])
|
|
123
|
+
matching_paths = [p for p in artefact_paths if os.path.basename(p).startswith(artefact_name + '.')]
|
|
124
|
+
|
|
125
|
+
if not matching_paths:
|
|
126
|
+
suggest_close_name_matches(
|
|
127
|
+
artefact_name,
|
|
128
|
+
[os.path.basename(f).split('.')[0] for f in artefact_paths]
|
|
129
|
+
)
|
|
130
|
+
artefact_paths = classified_artefacts.get(classifier, [])
|
|
131
|
+
matching_paths = [p for p in artefact_paths if os.path.basename(p).startswith(artefact_name + '.')]
|
|
132
|
+
|
|
133
|
+
if len(matching_paths) == 0:
|
|
115
134
|
return
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
artefact = Artefact.from_content(content)
|
|
123
|
-
file_path = next((classified_artefact.file_path for classified_artefact in classified_artefacts.get(classifier, []) if classified_artefact.name == artefact.name), artefact)
|
|
124
|
-
|
|
125
|
-
file_path = os.path.splitext(file_path)[0] + '.data'
|
|
126
|
-
if os.path.exists(file_path):
|
|
127
|
-
list_files_in_directory(file_path, list_filter)
|
|
135
|
+
artefact_path = matching_paths[0]
|
|
136
|
+
data_dir = os.path.splitext(artefact_path)[0] + '.data'
|
|
137
|
+
if os.path.exists(data_dir):
|
|
138
|
+
list_files_in_directory(data_dir, list_filter)
|
|
@@ -90,7 +90,7 @@ class Contribution(BaseModel):
|
|
|
90
90
|
rule = rule_text
|
|
91
91
|
parent_text_list = parent_text.split(' ')
|
|
92
92
|
classifier = parent_text_list[-1].lower()
|
|
93
|
-
artefact_name = '_'.join(parent_text_list[:-1])
|
|
93
|
+
artefact_name = '_'.join([s for s in parent_text_list if s][:-1])
|
|
94
94
|
|
|
95
95
|
return cls(
|
|
96
96
|
artefact_name=artefact_name,
|
|
@@ -165,6 +165,14 @@ class Artefact(BaseModel, ABC):
|
|
|
165
165
|
default=None,
|
|
166
166
|
description="Optional further description to understand the artefact. It is strongly recommended to add a description to every artefact."
|
|
167
167
|
)
|
|
168
|
+
_file_path: Optional[str] = None
|
|
169
|
+
|
|
170
|
+
@property
|
|
171
|
+
def file_path(self) -> str:
|
|
172
|
+
if self._file_path is not None:
|
|
173
|
+
return self._file_path
|
|
174
|
+
sub_dir = Classifier.get_sub_directory(self.artefact_type)
|
|
175
|
+
return f"{sub_dir}/{self.title}.{self.artefact_type}"
|
|
168
176
|
|
|
169
177
|
@field_validator('artefact_type')
|
|
170
178
|
def validate_artefact_type(cls, v):
|
|
@@ -230,12 +238,6 @@ class Artefact(BaseModel, ABC):
|
|
|
230
238
|
def _artefact_type(cls) -> ArtefactType: # pragma: no cover
|
|
231
239
|
pass
|
|
232
240
|
|
|
233
|
-
def file_location(self) -> str:
|
|
234
|
-
artefact_type = self.artefact_type
|
|
235
|
-
sub_directory = Classifier.get_sub_directory(artefact_type)
|
|
236
|
-
file_path = f"ara/{sub_directory}/{self.title}.{artefact_type}"
|
|
237
|
-
return file_path
|
|
238
|
-
|
|
239
241
|
@classmethod
|
|
240
242
|
def _deserialize_tags(cls, lines) -> (Dict[str, str], List[str]):
|
|
241
243
|
assert len(lines) > 0, "Empty lines given, can't extract tags"
|
|
@@ -85,7 +85,7 @@ class Example(BaseModel):
|
|
|
85
85
|
def from_row(cls, headers: List[str], row: List[str]) -> 'Example':
|
|
86
86
|
if len(row) != len(headers):
|
|
87
87
|
raise ValueError(
|
|
88
|
-
f"Row has {len(row)} cells, but expected {len(headers)}")
|
|
88
|
+
f"Row has {len(row)} cells, but expected {len(headers)}.\nFound row: {row}")
|
|
89
89
|
values = {header: value.strip() for header, value in zip(headers, row)}
|
|
90
90
|
return cls(values=values)
|
|
91
91
|
|
|
@@ -175,6 +175,7 @@ class ScenarioOutline(BaseModel):
|
|
|
175
175
|
@classmethod
|
|
176
176
|
def from_lines(cls, lines: List[str], start_idx: int) -> Tuple['ScenarioOutline', int]:
|
|
177
177
|
"""Parse a ScenarioOutline from a list of lines starting at start_idx."""
|
|
178
|
+
|
|
178
179
|
if not lines[start_idx].startswith('Scenario Outline:'):
|
|
179
180
|
raise ValueError("Expected 'Scenario Outline:' at start index")
|
|
180
181
|
title = lines[start_idx][len('Scenario Outline:'):].strip()
|
|
@@ -190,6 +191,8 @@ class ScenarioOutline(BaseModel):
|
|
|
190
191
|
headers = [h.strip() for h in lines[idx].split('|') if h.strip()]
|
|
191
192
|
idx += 1
|
|
192
193
|
while idx < len(lines) and lines[idx].strip():
|
|
194
|
+
if lines[idx].strip().startswith("Scenario:") or lines[idx].strip().startswith("Scenario Outline:"):
|
|
195
|
+
break
|
|
193
196
|
row = [cell.strip()
|
|
194
197
|
for cell in lines[idx].split('|') if cell.strip()]
|
|
195
198
|
example = Example.from_row(headers, row)
|
ara_cli/artefact_reader.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from ara_cli.directory_navigator import DirectoryNavigator
|
|
2
2
|
from ara_cli.classifier import Classifier
|
|
3
3
|
from ara_cli.file_classifier import FileClassifier
|
|
4
|
-
from ara_cli.
|
|
5
|
-
from ara_cli.artefact_fuzzy_search import
|
|
4
|
+
from ara_cli.artefact_models.artefact_model import Artefact
|
|
5
|
+
from ara_cli.artefact_fuzzy_search import suggest_close_name_matches_for_parent, suggest_close_name_matches
|
|
6
|
+
from typing import Dict, List
|
|
6
7
|
import os
|
|
7
8
|
import re
|
|
8
9
|
|
|
@@ -19,22 +20,25 @@ class ArtefactReader:
|
|
|
19
20
|
os.chdir(original_directory)
|
|
20
21
|
return None, None
|
|
21
22
|
|
|
22
|
-
|
|
23
|
-
|
|
23
|
+
file_classifier = FileClassifier(os)
|
|
24
|
+
classified_files = file_classifier.classify_files()
|
|
25
|
+
artefact_paths = classified_files.get(classifier, [])
|
|
24
26
|
|
|
25
|
-
|
|
27
|
+
for file_path in artefact_paths:
|
|
28
|
+
if os.path.basename(file_path).startswith(artefact_name + '.'):
|
|
29
|
+
with open(file_path, 'r') as file:
|
|
30
|
+
content = file.read()
|
|
31
|
+
os.chdir(original_directory)
|
|
32
|
+
return content, file_path
|
|
26
33
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
with open(file_path, 'r') as file:
|
|
33
|
-
content = file.read()
|
|
34
|
+
print(f"No match found for artefact with name '{artefact_name}'")
|
|
35
|
+
suggest_close_name_matches(
|
|
36
|
+
artefact_name,
|
|
37
|
+
[os.path.basename(f).split('.')[0] for f in artefact_paths]
|
|
38
|
+
)
|
|
34
39
|
|
|
35
40
|
os.chdir(original_directory)
|
|
36
|
-
|
|
37
|
-
return content, file_path
|
|
41
|
+
return None, None
|
|
38
42
|
|
|
39
43
|
@staticmethod
|
|
40
44
|
def extract_parent_tree(artefact_content):
|
|
@@ -53,41 +57,77 @@ class ArtefactReader:
|
|
|
53
57
|
return parent_name, parent_type
|
|
54
58
|
|
|
55
59
|
@staticmethod
|
|
56
|
-
def
|
|
60
|
+
def merge_dicts(dict1, dict2):
|
|
61
|
+
from collections import defaultdict
|
|
57
62
|
|
|
58
|
-
|
|
59
|
-
|
|
63
|
+
merged = defaultdict(list)
|
|
64
|
+
for d in [dict1, dict2]:
|
|
65
|
+
for key, artefacts in d.items():
|
|
66
|
+
merged[key].extend(artefacts)
|
|
67
|
+
return dict(merged)
|
|
60
68
|
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
for key, artefact_list in dict1.items():
|
|
65
|
-
merged_dict[key].extend(artefact_list)
|
|
69
|
+
@staticmethod
|
|
70
|
+
def read_artefacts(classified_artefacts=None, file_system=os, tags=None) -> Dict[str, List[Artefact]]:
|
|
71
|
+
from ara_cli.artefact_models.artefact_load import artefact_from_content
|
|
66
72
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
73
|
+
if classified_artefacts is None:
|
|
74
|
+
file_classifier = FileClassifier(file_system)
|
|
75
|
+
classified_artefacts = file_classifier.classify_files_new()
|
|
76
|
+
|
|
77
|
+
artefacts = {artefact_type: []
|
|
78
|
+
for artefact_type in classified_artefacts.keys()}
|
|
79
|
+
for artefact_type, artefact_info_dicts in classified_artefacts.items():
|
|
80
|
+
for artefact_info in artefact_info_dicts:
|
|
81
|
+
try:
|
|
82
|
+
with open(artefact_info["file_path"], 'r') as file:
|
|
83
|
+
content = file.read()
|
|
84
|
+
artefact = artefact_from_content(content)
|
|
85
|
+
if not artefact:
|
|
86
|
+
continue
|
|
87
|
+
# Store the full file path in the artefact
|
|
88
|
+
artefact._file_path = artefact_info["file_path"]
|
|
89
|
+
artefacts[artefact_type].append(artefact)
|
|
90
|
+
# else:
|
|
91
|
+
# Include file path if deserialization fails
|
|
92
|
+
# FIXME: LOOK INTO IT
|
|
93
|
+
# artefacts[artefact_type].append(file_path)
|
|
94
|
+
except Exception:
|
|
95
|
+
# TODO: catch only specific exceptions
|
|
96
|
+
# TODO: implament error message for deserialization or "ara scan" or "ara autofix"
|
|
97
|
+
# print(f"Warning: Could not deserialize artefact at {artefact_info}: {e}")
|
|
98
|
+
# artefacts[artefact_type].append(file_path)
|
|
99
|
+
continue
|
|
100
|
+
return artefacts
|
|
70
101
|
|
|
71
|
-
|
|
102
|
+
@staticmethod
|
|
103
|
+
def find_children(artefact_name, classifier, artefacts_by_classifier={}, classified_artefacts=None):
|
|
104
|
+
artefacts_by_classifier = artefacts_by_classifier or {}
|
|
105
|
+
filtered_artefacts = {k: [] for k in artefacts_by_classifier.keys()}
|
|
72
106
|
|
|
73
107
|
if classified_artefacts is None:
|
|
74
|
-
|
|
75
|
-
classified_artefacts = file_classifier.classify_files()
|
|
108
|
+
classified_artefacts = ArtefactReader.read_artefacts()
|
|
76
109
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
if artefact.parent and
|
|
82
|
-
artefact.parent.name == artefact_name and
|
|
83
|
-
artefact.parent.classifier == classifier
|
|
84
|
-
]
|
|
85
|
-
if filtered_list:
|
|
86
|
-
filtered_artefacts[key] = filtered_list
|
|
110
|
+
for artefact_classifier, artefacts in classified_artefacts.items():
|
|
111
|
+
for artefact in artefacts:
|
|
112
|
+
if not isinstance(artefact, Artefact):
|
|
113
|
+
continue
|
|
87
114
|
|
|
88
|
-
|
|
115
|
+
try:
|
|
116
|
+
contribution = artefact.contribution
|
|
117
|
+
if (contribution and
|
|
118
|
+
contribution.artefact_name == artefact_name and
|
|
119
|
+
contribution.classifier == classifier):
|
|
89
120
|
|
|
90
|
-
|
|
121
|
+
file_classifier = artefact._file_path.split('.')[-1]
|
|
122
|
+
|
|
123
|
+
if file_classifier not in filtered_artefacts:
|
|
124
|
+
filtered_artefacts[file_classifier] = []
|
|
125
|
+
filtered_artefacts[file_classifier].append(artefact)
|
|
126
|
+
|
|
127
|
+
except AttributeError as e:
|
|
128
|
+
continue
|
|
129
|
+
|
|
130
|
+
return ArtefactReader.merge_dicts(artefacts_by_classifier, filtered_artefacts)
|
|
91
131
|
|
|
92
132
|
@staticmethod
|
|
93
133
|
def step_through_value_chain(
|
|
@@ -95,34 +135,41 @@ class ArtefactReader:
|
|
|
95
135
|
classifier,
|
|
96
136
|
artefacts_by_classifier={},
|
|
97
137
|
classified_artefacts: dict[str, list[Artefact]] | None = None
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
file_classifier = FileClassifier(os)
|
|
101
|
-
classified_artefacts = file_classifier.classify_files()
|
|
102
|
-
|
|
103
|
-
content, file_path = ArtefactReader.read_artefact(artefact_name, classifier)
|
|
104
|
-
|
|
105
|
-
artefact = Artefact.from_content(content)
|
|
106
|
-
artefact_path = next((classified_artefact.file_path for classified_artefact in classified_artefacts.get(classifier, []) if classified_artefact.name == artefact.name), artefact.file_path)
|
|
107
|
-
artefact._file_path = artefact_path
|
|
138
|
+
):
|
|
139
|
+
from ara_cli.artefact_models.artefact_load import artefact_from_content
|
|
108
140
|
|
|
141
|
+
if classified_artefacts is None:
|
|
142
|
+
classified_artefacts = ArtefactReader.read_artefacts()
|
|
109
143
|
|
|
110
144
|
if classifier not in artefacts_by_classifier:
|
|
111
145
|
artefacts_by_classifier[classifier] = []
|
|
112
146
|
|
|
113
|
-
|
|
114
|
-
|
|
147
|
+
artefact = next(filter(
|
|
148
|
+
lambda x: x.title == artefact_name, classified_artefacts[classifier]
|
|
149
|
+
))
|
|
150
|
+
|
|
151
|
+
if not artefact:
|
|
152
|
+
return
|
|
153
|
+
if artefact in artefacts_by_classifier[classifier]:
|
|
115
154
|
return
|
|
116
155
|
|
|
117
156
|
artefacts_by_classifier[classifier].append(artefact)
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
157
|
+
|
|
158
|
+
parent = artefact.contribution
|
|
159
|
+
if parent and parent.artefact_name and parent.classifier:
|
|
160
|
+
parent_name = parent.artefact_name
|
|
121
161
|
parent_classifier = parent.classifier
|
|
122
162
|
|
|
123
|
-
|
|
163
|
+
parent_classifier_artefacts = classified_artefacts[parent_classifier]
|
|
164
|
+
all_artefact_names = [x.title for x in parent_classifier_artefacts]
|
|
165
|
+
|
|
124
166
|
if parent_name not in all_artefact_names:
|
|
125
|
-
|
|
167
|
+
if parent_name is not None:
|
|
168
|
+
suggest_close_name_matches_for_parent(
|
|
169
|
+
artefact_name,
|
|
170
|
+
all_artefact_names,
|
|
171
|
+
parent_name
|
|
172
|
+
)
|
|
126
173
|
print()
|
|
127
174
|
return
|
|
128
175
|
|
ara_cli/artefact_scan.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
def check_file(file_path, artefact_class):
|
|
2
|
+
from pydantic import ValidationError
|
|
3
|
+
try:
|
|
4
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
5
|
+
content = f.read()
|
|
6
|
+
except OSError as e:
|
|
7
|
+
return False, f"File error: {e}"
|
|
8
|
+
try:
|
|
9
|
+
artefact_class.deserialize(content)
|
|
10
|
+
return True, None
|
|
11
|
+
except (ValidationError, ValueError, AssertionError) as e:
|
|
12
|
+
return False, str(e)
|
|
13
|
+
except Exception as e:
|
|
14
|
+
return False, f"Unexpected error: {e!r}"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def find_invalid_files(classified_artefact_info, classifier):
|
|
18
|
+
from ara_cli.artefact_models.artefact_mapping import artefact_type_mapping
|
|
19
|
+
|
|
20
|
+
artefact_class = artefact_type_mapping[classifier]
|
|
21
|
+
invalid_files = []
|
|
22
|
+
for artefact_info in classified_artefact_info[classifier]:
|
|
23
|
+
if "templates/" in artefact_info["file_path"]:
|
|
24
|
+
continue
|
|
25
|
+
is_valid, reason = check_file(artefact_info["file_path"], artefact_class)
|
|
26
|
+
if not is_valid:
|
|
27
|
+
invalid_files.append((artefact_info["file_path"], reason))
|
|
28
|
+
return invalid_files
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def show_results(invalid_artefacts):
|
|
32
|
+
has_issues = False
|
|
33
|
+
with open("incompatible_artefacts_report.md", "w") as report:
|
|
34
|
+
report.write("# Artefact Check Report\n\n")
|
|
35
|
+
for classifier, files in invalid_artefacts.items():
|
|
36
|
+
if files:
|
|
37
|
+
has_issues = True
|
|
38
|
+
print(f"\nIncompatible {classifier} Files:")
|
|
39
|
+
report.write(f"## {classifier}\n")
|
|
40
|
+
for file, reason in files:
|
|
41
|
+
print(f"\t- {file}")
|
|
42
|
+
report.write(f"- `{file}`: {reason}\n")
|
|
43
|
+
report.write("\n")
|
|
44
|
+
if not has_issues:
|
|
45
|
+
print("All files are good!")
|
|
46
|
+
report.write("No problems found.\n")
|
ara_cli/file_classifier.py
CHANGED
|
@@ -4,6 +4,7 @@ from ara_cli.artefact_fuzzy_search import find_closest_name_match
|
|
|
4
4
|
from functools import lru_cache
|
|
5
5
|
from typing import Optional
|
|
6
6
|
import textwrap
|
|
7
|
+
import os
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class FileClassifier:
|
|
@@ -12,7 +13,8 @@ class FileClassifier:
|
|
|
12
13
|
|
|
13
14
|
def find_closest_artefact_name_match(self, name, classifier) -> Optional[str]:
|
|
14
15
|
classified_artefacts = self.classify_files()
|
|
15
|
-
all_artefact_names = [
|
|
16
|
+
all_artefact_names = [
|
|
17
|
+
artefact_name for artefact_name in classified_artefacts.get(classifier, [])]
|
|
16
18
|
if name in all_artefact_names:
|
|
17
19
|
return name
|
|
18
20
|
return find_closest_name_match(name, all_artefact_names)
|
|
@@ -62,9 +64,13 @@ class FileClassifier:
|
|
|
62
64
|
return classifier
|
|
63
65
|
return None
|
|
64
66
|
|
|
65
|
-
def classify_files(self, tags=None, read_content=False) -> dict[str, list
|
|
66
|
-
|
|
67
|
+
def classify_files(self, tags=None, read_content=False) -> dict[str, list]:
|
|
68
|
+
from ara_cli.artefact_models.artefact_load import artefact_from_content
|
|
69
|
+
|
|
70
|
+
files_by_classifier = {classifier: []
|
|
71
|
+
for classifier in Classifier.ordered_classifiers()}
|
|
67
72
|
|
|
73
|
+
# TODO: file location information inside the artefact.
|
|
68
74
|
for root, _, files in self.file_system.walk("."):
|
|
69
75
|
for file in files:
|
|
70
76
|
file_path = self.file_system.path.join(root, file)
|
|
@@ -73,23 +79,22 @@ class FileClassifier:
|
|
|
73
79
|
continue
|
|
74
80
|
|
|
75
81
|
if not read_content:
|
|
76
|
-
|
|
77
|
-
artefact = Artefact(classifier=classifier, name=name, _file_path=file_path)
|
|
78
|
-
files_by_classifier[classifier].append(artefact)
|
|
82
|
+
files_by_classifier[classifier].append(file_path)
|
|
79
83
|
continue
|
|
80
84
|
|
|
81
85
|
with open(file_path, 'r') as f:
|
|
82
86
|
content = f.read()
|
|
83
87
|
try:
|
|
84
|
-
artefact =
|
|
85
|
-
artefact
|
|
86
|
-
|
|
88
|
+
artefact = artefact_from_content(content)
|
|
89
|
+
if artefact:
|
|
90
|
+
artefact._file_path = file_path
|
|
91
|
+
files_by_classifier[classifier].append(artefact)
|
|
87
92
|
except ValueError:
|
|
88
93
|
continue
|
|
89
94
|
|
|
90
95
|
return files_by_classifier
|
|
91
96
|
|
|
92
|
-
def classify_files_new(self, tags=None) -> dict[str, list[
|
|
97
|
+
def classify_files_new(self, tags=None) -> dict[str, list[dict]]:
|
|
93
98
|
files_by_classifier = {classifier: [] for classifier in Classifier.ordered_classifiers()}
|
|
94
99
|
|
|
95
100
|
for root, _, files in self.file_system.walk("."):
|
|
@@ -97,18 +102,21 @@ class FileClassifier:
|
|
|
97
102
|
file_path = self.file_system.path.join(root, file)
|
|
98
103
|
classifier = self.classify_file(file_path, tags)
|
|
99
104
|
if not classifier:
|
|
105
|
+
# no return
|
|
100
106
|
continue
|
|
101
107
|
|
|
102
|
-
|
|
108
|
+
file_info = {"file_path": file_path, "title": '.'.join(file.split('.')[:-1])}
|
|
109
|
+
|
|
110
|
+
files_by_classifier[classifier].append(file_info)
|
|
103
111
|
continue
|
|
104
112
|
|
|
105
113
|
return files_by_classifier
|
|
106
114
|
|
|
107
115
|
def print_artefact_list(self, artefacts: list[Artefact], print_content=False):
|
|
108
116
|
for artefact in artefacts:
|
|
109
|
-
print(f" - {artefact.file_path}")
|
|
117
|
+
print(f" - ./{os.path.relpath(artefact.file_path, os.getcwd())}")
|
|
110
118
|
if print_content:
|
|
111
|
-
indented_content = textwrap.indent(artefact.
|
|
119
|
+
indented_content = textwrap.indent(artefact.serialize(), prefix=" ")
|
|
112
120
|
print(f" Content:\n{indented_content}")
|
|
113
121
|
|
|
114
122
|
def print_classified_files(self, files_by_classifier, print_content=False):
|
ara_cli/prompt_extractor.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from ara_cli.prompt_handler import send_prompt, get_file_content
|
|
2
2
|
from ara_cli.classifier import Classifier
|
|
3
|
+
from ara_cli.directory_navigator import DirectoryNavigator
|
|
3
4
|
from ara_cli.artefact_models.artefact_mapping import title_prefix_to_artefact_class
|
|
4
5
|
import re
|
|
5
6
|
import json
|
|
@@ -57,6 +58,7 @@ def extract_responses(document_path, relative_to_ara_root=False):
|
|
|
57
58
|
# Update the markdown content
|
|
58
59
|
updated_content = update_markdown(content, block, file_path)
|
|
59
60
|
else:
|
|
61
|
+
# Extract artefact
|
|
60
62
|
artefact_class = None
|
|
61
63
|
for line in block_lines[:2]:
|
|
62
64
|
words = line.strip().split(' ')
|
|
@@ -71,13 +73,19 @@ def extract_responses(document_path, relative_to_ara_root=False):
|
|
|
71
73
|
continue
|
|
72
74
|
artefact = artefact_class.deserialize('\n'.join(block_lines))
|
|
73
75
|
serialized_artefact = artefact.serialize()
|
|
74
|
-
|
|
75
|
-
|
|
76
|
+
|
|
77
|
+
original_directory = os.getcwd()
|
|
78
|
+
directory_navigator = DirectoryNavigator()
|
|
79
|
+
directory_navigator.navigate_to_target()
|
|
80
|
+
|
|
81
|
+
artefact_path = artefact.file_path
|
|
76
82
|
directory = os.path.dirname(artefact_path)
|
|
77
83
|
os.makedirs(directory, exist_ok=True)
|
|
78
84
|
with open(artefact_path, 'w') as file:
|
|
79
85
|
file.write(serialized_artefact)
|
|
80
86
|
|
|
87
|
+
os.chdir(original_directory)
|
|
88
|
+
|
|
81
89
|
# TODO: make update_markdown work block by block instead of updating the whole document at once
|
|
82
90
|
block_extraction_counter += 1
|
|
83
91
|
updated_content = update_markdown(content, block, None)
|
ara_cli/tag_extractor.py
CHANGED
|
@@ -8,30 +8,20 @@ class TagExtractor:
|
|
|
8
8
|
|
|
9
9
|
def extract_tags(self, navigate_to_target=False):
|
|
10
10
|
from ara_cli.template_manager import DirectoryNavigator
|
|
11
|
-
from ara_cli.
|
|
11
|
+
from ara_cli.artefact_reader import ArtefactReader
|
|
12
12
|
|
|
13
13
|
navigator = DirectoryNavigator()
|
|
14
14
|
if navigate_to_target:
|
|
15
15
|
navigator.navigate_to_target()
|
|
16
16
|
|
|
17
|
-
|
|
18
|
-
classified_files = file_classifier.classify_files_new()
|
|
17
|
+
artefacts = ArtefactReader.read_artefacts()
|
|
19
18
|
|
|
20
19
|
unique_tags = set()
|
|
21
20
|
|
|
22
|
-
for
|
|
23
|
-
for artefact in
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
try:
|
|
27
|
-
artefact_object = artefact_from_content(artefact_content)
|
|
28
|
-
except ValueError:
|
|
29
|
-
continue
|
|
30
|
-
if not artefact_object:
|
|
31
|
-
continue
|
|
32
|
-
status_list = ([artefact_object.status] if artefact_object.status is not None else [])
|
|
33
|
-
users_list = [f"user_{user}" for user in artefact_object.users]
|
|
34
|
-
tags = artefact_object.tags + users_list + status_list
|
|
21
|
+
for artefact_list in artefacts.values():
|
|
22
|
+
for artefact in artefact_list:
|
|
23
|
+
user_tags = [f"user_{tag}" for tag in artefact.users]
|
|
24
|
+
tags = [tag for tag in (artefact.tags + [artefact.status] + user_tags) if tag is not None]
|
|
35
25
|
unique_tags.update(tags)
|
|
36
26
|
|
|
37
27
|
sorted_tags = sorted(unique_tags)
|