ara-cli 0.1.9.69__py3-none-any.whl → 0.1.9.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/ara_command_action.py +16 -12
- ara_cli/ara_config.py +24 -10
- ara_cli/artefact_autofix.py +278 -23
- ara_cli/artefact_creator.py +3 -3
- ara_cli/artefact_fuzzy_search.py +9 -4
- ara_cli/artefact_link_updater.py +4 -4
- ara_cli/artefact_models/artefact_model.py +14 -7
- ara_cli/artefact_models/artefact_templates.py +1 -1
- ara_cli/artefact_models/feature_artefact_model.py +72 -18
- ara_cli/artefact_models/serialize_helper.py +1 -1
- ara_cli/artefact_reader.py +16 -38
- ara_cli/artefact_renamer.py +2 -2
- ara_cli/artefact_scan.py +28 -3
- ara_cli/chat.py +1 -1
- ara_cli/file_classifier.py +3 -3
- ara_cli/file_lister.py +1 -1
- ara_cli/list_filter.py +1 -1
- ara_cli/output_suppressor.py +1 -1
- ara_cli/prompt_extractor.py +3 -3
- ara_cli/prompt_handler.py +9 -10
- ara_cli/prompt_rag.py +2 -2
- ara_cli/template_manager.py +2 -2
- ara_cli/templates/prompt-modules/blueprints/complete_pytest_unittest.blueprint.md +1 -1
- ara_cli/update_config_prompt.py +2 -2
- ara_cli/version.py +1 -1
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.9.71.dist-info}/METADATA +1 -1
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.9.71.dist-info}/RECORD +39 -39
- tests/test_ara_command_action.py +7 -7
- tests/{test_ara_autofix.py → test_artefact_autofix.py} +163 -29
- tests/test_artefact_link_updater.py +3 -3
- tests/test_artefact_renamer.py +2 -2
- tests/test_artefact_scan.py +52 -19
- tests/test_file_classifier.py +1 -1
- tests/test_file_lister.py +1 -1
- tests/test_list_filter.py +2 -2
- tests/test_update_config_prompt.py +2 -2
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.9.71.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.9.71.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.9.71.dist-info}/top_level.txt +0 -0
ara_cli/artefact_link_updater.py
CHANGED
|
@@ -34,7 +34,7 @@ class ArtefactLinkUpdater:
|
|
|
34
34
|
# Check if it's a file and not a directory
|
|
35
35
|
elif self.file_system.path.isfile(item_path) and Classifier.is_valid_classifier(extension):
|
|
36
36
|
# Read the content of the file
|
|
37
|
-
with open(item_path, 'r') as file:
|
|
37
|
+
with open(item_path, 'r', encoding='utf-8') as file:
|
|
38
38
|
content = file.read()
|
|
39
39
|
|
|
40
40
|
# Replace all occurrences of the old name with the new name using regular expressions
|
|
@@ -42,7 +42,7 @@ class ArtefactLinkUpdater:
|
|
|
42
42
|
content = pattern.sub(replacement, content)
|
|
43
43
|
|
|
44
44
|
# Write the updated content back to the file
|
|
45
|
-
with open(item_path, 'w') as file:
|
|
45
|
+
with open(item_path, 'w', encoding='utf-8') as file:
|
|
46
46
|
file.write(content)
|
|
47
47
|
|
|
48
48
|
def remove_links_in_related_artefacts(self, artefact_name, dir_path="."):
|
|
@@ -65,12 +65,12 @@ class ArtefactLinkUpdater:
|
|
|
65
65
|
|
|
66
66
|
# Check if it's a file and not a directory, and if extension is a valid artefact classifier
|
|
67
67
|
elif self.file_system.path.isfile(item_path) and Classifier.is_valid_classifier(extension):
|
|
68
|
-
with open(item_path, 'r') as file:
|
|
68
|
+
with open(item_path, 'r', encoding='utf-8') as file:
|
|
69
69
|
content = file.read()
|
|
70
70
|
|
|
71
71
|
# Remove the artefact name from 'Contributes to' and 'Illustrates' lines
|
|
72
72
|
content = contribute_pattern.sub("Contributes to", content)
|
|
73
73
|
content = illustrates_pattern.sub("Illustrates", content)
|
|
74
74
|
|
|
75
|
-
with open(item_path, 'w') as file:
|
|
75
|
+
with open(item_path, 'w', encoding='utf-8') as file:
|
|
76
76
|
file.write(content)
|
|
@@ -197,6 +197,15 @@ class Artefact(BaseModel, ABC):
|
|
|
197
197
|
self._file_path = file_path
|
|
198
198
|
return self
|
|
199
199
|
|
|
200
|
+
@model_validator(mode="after")
|
|
201
|
+
def validate_contribution(self):
|
|
202
|
+
contribution = self.contribution
|
|
203
|
+
classifier = self.artefact_type.value
|
|
204
|
+
name = self.title
|
|
205
|
+
if not contribution:
|
|
206
|
+
warnings.warn(f"Contribution of {classifier} '{name}' is not set and will be empty")
|
|
207
|
+
return self
|
|
208
|
+
|
|
200
209
|
@field_validator('artefact_type')
|
|
201
210
|
def validate_artefact_type(cls, v):
|
|
202
211
|
if not isinstance(v, ArtefactType):
|
|
@@ -230,7 +239,11 @@ class Artefact(BaseModel, ABC):
|
|
|
230
239
|
def validate_title(cls, v):
|
|
231
240
|
if not v.strip():
|
|
232
241
|
raise ValueError("artefact_title must not be empty")
|
|
233
|
-
v = replace_space_with_underscore(v)
|
|
242
|
+
v = replace_space_with_underscore(v).strip()
|
|
243
|
+
|
|
244
|
+
whitelisted_placeholder = "<descriptive_title>"
|
|
245
|
+
if v == whitelisted_placeholder:
|
|
246
|
+
return v
|
|
234
247
|
|
|
235
248
|
letters = list(string.ascii_letters)
|
|
236
249
|
digits = list(string.digits)
|
|
@@ -243,12 +256,6 @@ class Artefact(BaseModel, ABC):
|
|
|
243
256
|
|
|
244
257
|
return v
|
|
245
258
|
|
|
246
|
-
@field_validator('contribution')
|
|
247
|
-
def validate_contribution(cls, v):
|
|
248
|
-
if not v:
|
|
249
|
-
warnings.warn("Contribution is not set and will be empty")
|
|
250
|
-
return v
|
|
251
|
-
|
|
252
259
|
@classmethod
|
|
253
260
|
@abstractmethod
|
|
254
261
|
def _title_prefix(cls) -> str: # pragma: no cover
|
|
@@ -207,7 +207,7 @@ def _default_issue(title: str) -> IssueArtefact:
|
|
|
207
207
|
)
|
|
208
208
|
|
|
209
209
|
|
|
210
|
-
def template_artefact_of_type(artefact_type: ArtefactType, title: str) -> Artefact:
|
|
210
|
+
def template_artefact_of_type(artefact_type: ArtefactType, title: str = "<descriptive_title>") -> Artefact:
|
|
211
211
|
default_creation_functions = {
|
|
212
212
|
ArtefactType.vision: _default_vision,
|
|
213
213
|
ArtefactType.businessgoal: _default_businessgoal,
|
|
@@ -363,27 +363,43 @@ class FeatureArtefact(Artefact):
|
|
|
363
363
|
|
|
364
364
|
@classmethod
|
|
365
365
|
def deserialize(cls, text: str) -> 'FeatureArtefact':
|
|
366
|
-
|
|
366
|
+
"""
|
|
367
|
+
Deserializes the feature file using a robust extract-and-reinject strategy.
|
|
368
|
+
1. Hides all docstrings by replacing them with placeholders.
|
|
369
|
+
2. Parses the sanitized text using the original, simple parsing logic.
|
|
370
|
+
3. Re-injects the original docstring content back into the parsed objects.
|
|
371
|
+
This prevents the parser from ever being confused by content within docstrings.
|
|
372
|
+
"""
|
|
373
|
+
# 1. Hide all docstrings from the entire file text first.
|
|
374
|
+
sanitized_text, docstrings = cls._hide_docstrings(text)
|
|
375
|
+
|
|
376
|
+
# 2. Perform the original parsing logic on the SANITIZED text.
|
|
377
|
+
# This part of the code is now "safe" because it will never see a docstring.
|
|
378
|
+
fields = super()._parse_common_fields(sanitized_text)
|
|
379
|
+
intent = FeatureIntent.deserialize(sanitized_text)
|
|
380
|
+
background = cls.deserialize_background(sanitized_text)
|
|
381
|
+
scenarios = cls.deserialize_scenarios(sanitized_text)
|
|
367
382
|
|
|
368
|
-
intent = FeatureIntent.deserialize(text)
|
|
369
|
-
background = cls.deserialize_background(text)
|
|
370
|
-
scenarios = cls.deserialize_scenarios(text)
|
|
371
|
-
|
|
372
|
-
fields['scenarios'] = scenarios
|
|
373
|
-
fields['background'] = background
|
|
374
383
|
fields['intent'] = intent
|
|
384
|
+
fields['background'] = background
|
|
385
|
+
fields['scenarios'] = scenarios
|
|
386
|
+
|
|
387
|
+
# 3. Re-inject the docstrings back into the parsed scenarios.
|
|
388
|
+
if fields['scenarios'] and docstrings:
|
|
389
|
+
for scenario in fields['scenarios']:
|
|
390
|
+
if isinstance(scenario, (Scenario, ScenarioOutline)):
|
|
391
|
+
scenario.steps = cls._reinject_docstrings_into_steps(scenario.steps, docstrings)
|
|
375
392
|
|
|
376
393
|
return cls(**fields)
|
|
377
394
|
|
|
378
395
|
@classmethod
|
|
379
396
|
def deserialize_scenarios(cls, text):
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
397
|
+
if not text: return []
|
|
398
|
+
lines = [line.strip() for line in text.strip().splitlines() if line.strip()]
|
|
383
399
|
scenarios = []
|
|
384
400
|
idx = 0
|
|
385
401
|
while idx < len(lines):
|
|
386
|
-
line = lines[idx]
|
|
402
|
+
line = lines[idx]
|
|
387
403
|
if line.startswith('Scenario:'):
|
|
388
404
|
scenario, next_idx = Scenario.from_lines(lines, idx)
|
|
389
405
|
scenarios.append(scenario)
|
|
@@ -398,16 +414,54 @@ class FeatureArtefact(Artefact):
|
|
|
398
414
|
|
|
399
415
|
@classmethod
|
|
400
416
|
def deserialize_background(cls, text):
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
417
|
+
if not text: return None
|
|
418
|
+
lines = [line.strip() for line in text.strip().splitlines() if line.strip()]
|
|
404
419
|
background = None
|
|
405
420
|
idx = 0
|
|
406
421
|
while idx < len(lines):
|
|
407
|
-
line = lines[idx]
|
|
422
|
+
line = lines[idx]
|
|
408
423
|
if line.startswith('Background:'):
|
|
409
|
-
background,
|
|
424
|
+
background, _ = Background.from_lines(lines, idx)
|
|
410
425
|
break
|
|
411
|
-
|
|
412
|
-
idx += 1
|
|
426
|
+
idx += 1
|
|
413
427
|
return background
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
@staticmethod
|
|
431
|
+
def _hide_docstrings(text: str) -> Tuple[str, Dict[str, str]]:
|
|
432
|
+
"""
|
|
433
|
+
Finds all docstring blocks ('''...''') in the text,
|
|
434
|
+
replaces them with a unique placeholder, and returns the sanitized
|
|
435
|
+
text and a dictionary mapping placeholders to the original docstrings.
|
|
436
|
+
"""
|
|
437
|
+
docstrings = {}
|
|
438
|
+
placeholder_template = "__DOCSTRING_PLACEHOLDER_{}__"
|
|
439
|
+
|
|
440
|
+
def replacer(match):
|
|
441
|
+
# This function is called for each found docstring.
|
|
442
|
+
key = placeholder_template.format(len(docstrings))
|
|
443
|
+
docstrings[key] = match.group(0) # Store the full matched docstring
|
|
444
|
+
return key
|
|
445
|
+
|
|
446
|
+
# The regex finds ''' followed by any character (including newlines)
|
|
447
|
+
# in a non-greedy way (.*?) until the next '''.
|
|
448
|
+
sanitized_text = re.sub(r'"""[\s\S]*?"""', replacer, text)
|
|
449
|
+
|
|
450
|
+
return sanitized_text, docstrings
|
|
451
|
+
|
|
452
|
+
@staticmethod
|
|
453
|
+
def _reinject_docstrings_into_steps(steps: List[str], docstrings: Dict[str, str]) -> List[str]:
|
|
454
|
+
"""
|
|
455
|
+
Iterates through a list of steps, finds any placeholders,
|
|
456
|
+
and replaces them with their original docstring content.
|
|
457
|
+
"""
|
|
458
|
+
rehydrated_steps = []
|
|
459
|
+
for step in steps:
|
|
460
|
+
for key, value in docstrings.items():
|
|
461
|
+
if key in step:
|
|
462
|
+
# Replace the placeholder with the original, full docstring block.
|
|
463
|
+
# This handles cases where the step is just the placeholder,
|
|
464
|
+
# or the placeholder is at the end of a line (e.g., "Then I see... __PLACEHOLDER__").
|
|
465
|
+
step = step.replace(key, value)
|
|
466
|
+
rehydrated_steps.append(step)
|
|
467
|
+
return rehydrated_steps
|
ara_cli/artefact_reader.py
CHANGED
|
@@ -10,20 +10,21 @@ import re
|
|
|
10
10
|
|
|
11
11
|
class ArtefactReader:
|
|
12
12
|
@staticmethod
|
|
13
|
-
def
|
|
13
|
+
def read_artefact_data(artefact_name, classifier, classified_file_info = None) -> tuple[str, dict[str, str]]:
|
|
14
14
|
if not Classifier.is_valid_classifier(classifier):
|
|
15
15
|
print("Invalid classifier provided. Please provide a valid classifier.")
|
|
16
16
|
return None, None
|
|
17
17
|
|
|
18
|
-
|
|
19
|
-
|
|
18
|
+
if not classified_file_info:
|
|
19
|
+
file_classifier = FileClassifier(os)
|
|
20
|
+
classified_file_info = file_classifier.classify_files()
|
|
20
21
|
artefact_info_of_classifier = classified_file_info.get(classifier, [])
|
|
21
22
|
|
|
22
23
|
for artefact_info in artefact_info_of_classifier:
|
|
23
24
|
file_path = artefact_info["file_path"]
|
|
24
25
|
artefact_title = artefact_info["title"]
|
|
25
26
|
if artefact_title == artefact_name:
|
|
26
|
-
with open(file_path, 'r') as file:
|
|
27
|
+
with open(file_path, 'r', encoding='utf-8') as file:
|
|
27
28
|
content = file.read()
|
|
28
29
|
return content, artefact_info
|
|
29
30
|
|
|
@@ -36,25 +37,14 @@ class ArtefactReader:
|
|
|
36
37
|
return None, None
|
|
37
38
|
|
|
38
39
|
@staticmethod
|
|
39
|
-
def
|
|
40
|
-
|
|
41
|
-
|
|
40
|
+
def read_artefact(artefact_name, classifier, classified_file_info=None) -> Artefact:
|
|
41
|
+
content, artefact_info = ArtefactReader.read_artefact_data(artefact_name, classifier, classified_file_info)
|
|
42
|
+
if not content or not artefact_info:
|
|
42
43
|
return None
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
for artefact_info in artefact_info_of_classifier:
|
|
49
|
-
file_path = artefact_info["file_path"]
|
|
50
|
-
artefact_title = artefact_info["title"]
|
|
51
|
-
if artefact_title == artefact_name:
|
|
52
|
-
with open(file_path, 'r') as file:
|
|
53
|
-
content = file.read()
|
|
54
|
-
artefact = artefact_from_content(content)
|
|
55
|
-
artefact._file_path = file_path
|
|
56
|
-
return artefact
|
|
57
|
-
return None
|
|
44
|
+
file_path = artefact_info["file_path"]
|
|
45
|
+
artefact = artefact_from_content(content)
|
|
46
|
+
artefact._file_path = file_path
|
|
47
|
+
return artefact
|
|
58
48
|
|
|
59
49
|
@staticmethod
|
|
60
50
|
def extract_parent_tree(artefact_content):
|
|
@@ -94,24 +84,12 @@ class ArtefactReader:
|
|
|
94
84
|
for artefact_type in classified_artefacts.keys()}
|
|
95
85
|
for artefact_type, artefact_info_dicts in classified_artefacts.items():
|
|
96
86
|
for artefact_info in artefact_info_dicts:
|
|
87
|
+
title = artefact_info["title"]
|
|
97
88
|
try:
|
|
98
|
-
|
|
99
|
-
content = file.read()
|
|
100
|
-
artefact = artefact_from_content(content)
|
|
101
|
-
if not artefact:
|
|
102
|
-
continue
|
|
103
|
-
# Store the full file path in the artefact
|
|
104
|
-
artefact._file_path = artefact_info["file_path"]
|
|
89
|
+
artefact = ArtefactReader.read_artefact(title, artefact_type, classified_artefacts)
|
|
105
90
|
artefacts[artefact_type].append(artefact)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
# FIXME: LOOK INTO IT
|
|
109
|
-
# artefacts[artefact_type].append(file_path)
|
|
110
|
-
except Exception:
|
|
111
|
-
# TODO: catch only specific exceptions
|
|
112
|
-
# TODO: implament error message for deserialization or "ara scan" or "ara autofix"
|
|
113
|
-
# print(f"Warning: Could not deserialize artefact at {artefact_info}: {e}")
|
|
114
|
-
# artefacts[artefact_type].append(file_path)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
print(f"Failed to read {artefact_type} '{title}' with an error: ", e)
|
|
115
93
|
continue
|
|
116
94
|
return artefacts
|
|
117
95
|
|
ara_cli/artefact_renamer.py
CHANGED
|
@@ -75,7 +75,7 @@ class ArtefactRenamer:
|
|
|
75
75
|
raise ValueError(f"Invalid classifier: {classifier}")
|
|
76
76
|
|
|
77
77
|
# Read the file content
|
|
78
|
-
with open(artefact_path, 'r') as file:
|
|
78
|
+
with open(artefact_path, 'r', encoding='utf-8') as file:
|
|
79
79
|
content = file.read()
|
|
80
80
|
|
|
81
81
|
# Find the old title line
|
|
@@ -89,5 +89,5 @@ class ArtefactRenamer:
|
|
|
89
89
|
new_content = content.replace(old_title_line, new_title_line, 1)
|
|
90
90
|
|
|
91
91
|
# Write the updated content back to the file
|
|
92
|
-
with open(artefact_path, 'w') as file:
|
|
92
|
+
with open(artefact_path, 'w', encoding='utf-8') as file:
|
|
93
93
|
file.write(new_content)
|
ara_cli/artefact_scan.py
CHANGED
|
@@ -2,13 +2,21 @@ from textwrap import indent
|
|
|
2
2
|
import os
|
|
3
3
|
|
|
4
4
|
|
|
5
|
-
def check_file(file_path, artefact_class):
|
|
5
|
+
def check_file(file_path, artefact_class, classified_artefact_info=None):
|
|
6
6
|
from pydantic import ValidationError
|
|
7
|
+
from ara_cli.artefact_fuzzy_search import extract_artefact_names_of_classifier
|
|
8
|
+
from ara_cli.file_classifier import FileClassifier
|
|
9
|
+
|
|
7
10
|
try:
|
|
8
11
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
9
12
|
content = f.read()
|
|
10
13
|
except OSError as e:
|
|
11
14
|
return False, f"File error: {e}"
|
|
15
|
+
|
|
16
|
+
if not classified_artefact_info:
|
|
17
|
+
file_classifier = FileClassifier(os)
|
|
18
|
+
classified_artefact_info = file_classifier.classify_files()
|
|
19
|
+
|
|
12
20
|
try:
|
|
13
21
|
artefact_instance = artefact_class.deserialize(content)
|
|
14
22
|
|
|
@@ -19,6 +27,23 @@ def check_file(file_path, artefact_class):
|
|
|
19
27
|
reason = (f"Filename-Title Mismatch: The file name '{file_name_without_ext}' "
|
|
20
28
|
f"does not match the artefact title '{artefact_instance.title}'.")
|
|
21
29
|
return False, reason
|
|
30
|
+
|
|
31
|
+
# Check contribution reference validity
|
|
32
|
+
contribution = artefact_instance.contribution
|
|
33
|
+
if contribution and contribution.artefact_name and contribution.classifier:
|
|
34
|
+
|
|
35
|
+
# Find all artefact names of the referenced classifier
|
|
36
|
+
all_artefact_names = extract_artefact_names_of_classifier(
|
|
37
|
+
classified_files=classified_artefact_info,
|
|
38
|
+
classifier=contribution.classifier
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# Check if the referenced artefact exists
|
|
42
|
+
if contribution.artefact_name not in all_artefact_names:
|
|
43
|
+
reason = (f"Invalid Contribution Reference: The contribution references "
|
|
44
|
+
f"'{contribution.classifier}' artefact '{contribution.artefact_name}' "
|
|
45
|
+
f"which does not exist.")
|
|
46
|
+
return False, reason
|
|
22
47
|
|
|
23
48
|
return True, None
|
|
24
49
|
except (ValidationError, ValueError, AssertionError) as e:
|
|
@@ -37,7 +62,7 @@ def find_invalid_files(classified_artefact_info, classifier):
|
|
|
37
62
|
continue
|
|
38
63
|
if ".data" in artefact_info["file_path"]:
|
|
39
64
|
continue
|
|
40
|
-
is_valid, reason = check_file(artefact_info["file_path"], artefact_class)
|
|
65
|
+
is_valid, reason = check_file(artefact_info["file_path"], artefact_class, classified_artefact_info)
|
|
41
66
|
if not is_valid:
|
|
42
67
|
invalid_files.append((artefact_info["file_path"], reason))
|
|
43
68
|
return invalid_files
|
|
@@ -45,7 +70,7 @@ def find_invalid_files(classified_artefact_info, classifier):
|
|
|
45
70
|
|
|
46
71
|
def show_results(invalid_artefacts):
|
|
47
72
|
has_issues = False
|
|
48
|
-
with open("incompatible_artefacts_report.md", "w") as report:
|
|
73
|
+
with open("incompatible_artefacts_report.md", "w", encoding="utf-8") as report:
|
|
49
74
|
report.write("# Artefact Check Report\n\n")
|
|
50
75
|
for classifier, files in invalid_artefacts.items():
|
|
51
76
|
if files:
|
ara_cli/chat.py
CHANGED
|
@@ -154,7 +154,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
|
|
|
154
154
|
return None
|
|
155
155
|
|
|
156
156
|
def start_non_interactive(self):
|
|
157
|
-
with open(self.chat_name, 'r') as file:
|
|
157
|
+
with open(self.chat_name, 'r', encoding='utf-8') as file:
|
|
158
158
|
content = file.read()
|
|
159
159
|
print(content)
|
|
160
160
|
|
ara_cli/file_classifier.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from ara_cli.classifier import Classifier
|
|
2
2
|
from ara_cli.artefact_models.artefact_model import Artefact
|
|
3
|
-
from ara_cli.artefact_fuzzy_search import
|
|
3
|
+
from ara_cli.artefact_fuzzy_search import find_closest_name_matches
|
|
4
4
|
from functools import lru_cache
|
|
5
5
|
from typing import Optional
|
|
6
6
|
import textwrap
|
|
@@ -17,7 +17,7 @@ class FileClassifier:
|
|
|
17
17
|
info["title"] for info in classified_artefacts.get(classifier, [])]
|
|
18
18
|
if name in all_artefact_names:
|
|
19
19
|
return name
|
|
20
|
-
return
|
|
20
|
+
return find_closest_name_matches(name, all_artefact_names)
|
|
21
21
|
|
|
22
22
|
@lru_cache(maxsize=None)
|
|
23
23
|
def read_file_content(self, file_path):
|
|
@@ -68,7 +68,7 @@ class FileClassifier:
|
|
|
68
68
|
files_by_classifier = {classifier: [] for classifier in Classifier.ordered_classifiers()}
|
|
69
69
|
|
|
70
70
|
for root, _, files in self.file_system.walk("."):
|
|
71
|
-
if root.endswith(".data"):
|
|
71
|
+
if root.endswith(".data") or root.endswith("templates"):
|
|
72
72
|
continue
|
|
73
73
|
for file in files:
|
|
74
74
|
file_path = self.file_system.path.join(root, file)
|
ara_cli/file_lister.py
CHANGED
|
@@ -28,7 +28,7 @@ def generate_markdown_listing(directories, file_types_to_be_listed, output_file_
|
|
|
28
28
|
if any(fnmatch.fnmatch(file, pattern) for pattern in file_types_to_be_listed):
|
|
29
29
|
markdown_lines.append(f"{indent} - [] {file}")
|
|
30
30
|
|
|
31
|
-
with open(output_file_path, "w") as md_file:
|
|
31
|
+
with open(output_file_path, "w", encoding="utf-8") as md_file:
|
|
32
32
|
md_file.write('\n'.join(markdown_lines))
|
|
33
33
|
|
|
34
34
|
|
ara_cli/list_filter.py
CHANGED
|
@@ -35,7 +35,7 @@ class ListFilterMonad:
|
|
|
35
35
|
def default_content_retrieval(file):
|
|
36
36
|
# Default strategy assumes file is a path and attempts to read it
|
|
37
37
|
try:
|
|
38
|
-
with open(file, 'r') as f:
|
|
38
|
+
with open(file, 'r', encoding='utf-8') as f:
|
|
39
39
|
return f.read()
|
|
40
40
|
except Exception as e:
|
|
41
41
|
print(f"Error reading file {file}: {e}")
|
ara_cli/output_suppressor.py
CHANGED
|
@@ -6,7 +6,7 @@ from contextlib import contextmanager
|
|
|
6
6
|
@contextmanager
|
|
7
7
|
def suppress_stdout(suppress=False):
|
|
8
8
|
if suppress:
|
|
9
|
-
with open(os.devnull, "w") as devnull:
|
|
9
|
+
with open(os.devnull, "w", encoding="utf-8") as devnull:
|
|
10
10
|
old_stdout = sys.stdout
|
|
11
11
|
sys.stdout = devnull
|
|
12
12
|
try:
|
ara_cli/prompt_extractor.py
CHANGED
|
@@ -21,7 +21,7 @@ def extract_responses(document_path, relative_to_ara_root=False):
|
|
|
21
21
|
print(f"Debug: Starting extraction from {document_path}")
|
|
22
22
|
block_extraction_counter = 0
|
|
23
23
|
|
|
24
|
-
with open(document_path, 'r') as file:
|
|
24
|
+
with open(document_path, 'r', encoding='utf-8') as file:
|
|
25
25
|
content = file.read()
|
|
26
26
|
|
|
27
27
|
cwd = os.getcwd()
|
|
@@ -81,7 +81,7 @@ def extract_responses(document_path, relative_to_ara_root=False):
|
|
|
81
81
|
artefact_path = artefact.file_path
|
|
82
82
|
directory = os.path.dirname(artefact_path)
|
|
83
83
|
os.makedirs(directory, exist_ok=True)
|
|
84
|
-
with open(artefact_path, 'w') as file:
|
|
84
|
+
with open(artefact_path, 'w', encoding='utf-8') as file:
|
|
85
85
|
file.write(serialized_artefact)
|
|
86
86
|
|
|
87
87
|
os.chdir(original_directory)
|
|
@@ -92,7 +92,7 @@ def extract_responses(document_path, relative_to_ara_root=False):
|
|
|
92
92
|
|
|
93
93
|
os.chdir(cwd)
|
|
94
94
|
# Save the updated markdown content
|
|
95
|
-
with open(document_path, 'w') as file:
|
|
95
|
+
with open(document_path, 'w', encoding='utf-8') as file:
|
|
96
96
|
file.write(updated_content)
|
|
97
97
|
|
|
98
98
|
print(f"End of extraction. Found {block_extraction_counter} blocks.")
|
ara_cli/prompt_handler.py
CHANGED
|
@@ -3,7 +3,7 @@ import litellm
|
|
|
3
3
|
from ara_cli.classifier import Classifier
|
|
4
4
|
from ara_cli.artefact_creator import ArtefactCreator
|
|
5
5
|
from ara_cli.template_manager import TemplatePathManager
|
|
6
|
-
from ara_cli.ara_config import ConfigManager
|
|
6
|
+
from ara_cli.ara_config import ConfigManager, LLMConfigItem
|
|
7
7
|
from ara_cli.file_lister import generate_markdown_listing
|
|
8
8
|
from os.path import exists, join
|
|
9
9
|
import os
|
|
@@ -20,15 +20,15 @@ class LLMSingleton:
|
|
|
20
20
|
|
|
21
21
|
def __init__(self, model_id):
|
|
22
22
|
config = ConfigManager().get_config()
|
|
23
|
-
|
|
24
|
-
selected_config = llm_config[str(model_id)]
|
|
23
|
+
selected_config = config.llm_config.get(str(model_id))
|
|
25
24
|
|
|
26
25
|
if not selected_config:
|
|
27
26
|
raise ValueError(f"No configuration found for the model: {model_id}")
|
|
28
27
|
|
|
29
28
|
LLMSingleton._model = model_id
|
|
30
|
-
|
|
31
|
-
|
|
29
|
+
|
|
30
|
+
# Typesafe for None values inside the config.
|
|
31
|
+
self.config_parameters = selected_config.model_dump(exclude_none=True)
|
|
32
32
|
|
|
33
33
|
LLMSingleton._instance = self
|
|
34
34
|
|
|
@@ -74,15 +74,14 @@ def read_string_from_file(path):
|
|
|
74
74
|
def send_prompt(prompt):
|
|
75
75
|
chat = LLMSingleton.get_instance()
|
|
76
76
|
|
|
77
|
-
# remove provider from config parameters
|
|
78
77
|
config_parameters = chat.config_parameters.copy()
|
|
79
|
-
|
|
78
|
+
if "provider" in config_parameters:
|
|
79
|
+
del config_parameters["provider"]
|
|
80
80
|
|
|
81
81
|
completion = litellm.completion(
|
|
82
82
|
**config_parameters,
|
|
83
83
|
messages=prompt,
|
|
84
|
-
stream=True
|
|
85
|
-
max_tokens=32768
|
|
84
|
+
stream=True
|
|
86
85
|
)
|
|
87
86
|
for chunk in completion:
|
|
88
87
|
yield chunk
|
|
@@ -436,4 +435,4 @@ def generate_config_prompt_givens_file(prompt_data_path, config_prompt_givens_na
|
|
|
436
435
|
|
|
437
436
|
# Write the updated listing back to the file
|
|
438
437
|
with open(config_prompt_givens_path, 'w', encoding='utf-8') as file:
|
|
439
|
-
file.write("".join(updated_listing))
|
|
438
|
+
file.write("".join(updated_listing))
|
ara_cli/prompt_rag.py
CHANGED
|
@@ -16,7 +16,7 @@ def find_files_in_prompt_config_givens(search_file, prompt_givens_file_path):
|
|
|
16
16
|
header_stack = []
|
|
17
17
|
modified_lines = [] # To store the modified file content
|
|
18
18
|
|
|
19
|
-
with open(prompt_givens_file_path, 'r') as file:
|
|
19
|
+
with open(prompt_givens_file_path, 'r', encoding='utf-8') as file:
|
|
20
20
|
for line in file:
|
|
21
21
|
if line.strip().startswith('#'):
|
|
22
22
|
level = line.count('#')
|
|
@@ -37,7 +37,7 @@ def find_files_in_prompt_config_givens(search_file, prompt_givens_file_path):
|
|
|
37
37
|
|
|
38
38
|
if file_found:
|
|
39
39
|
# Rewrite the file with the modified content if any line was changed
|
|
40
|
-
with open(prompt_givens_file_path, 'w') as file:
|
|
40
|
+
with open(prompt_givens_file_path, 'w', encoding='utf-8') as file:
|
|
41
41
|
file.writelines(modified_lines)
|
|
42
42
|
|
|
43
43
|
return file_found
|
ara_cli/template_manager.py
CHANGED
|
@@ -34,7 +34,7 @@ class TemplatePathManager:
|
|
|
34
34
|
|
|
35
35
|
template_path = (base_path / f"template.{classifier}")
|
|
36
36
|
|
|
37
|
-
with template_path.open('r') as file:
|
|
37
|
+
with template_path.open('r', encoding='utf-8') as file:
|
|
38
38
|
content = file.read()
|
|
39
39
|
|
|
40
40
|
return content
|
|
@@ -113,7 +113,7 @@ class ArtefactFileManager:
|
|
|
113
113
|
def save_behave_steps_to_file(self, artefact_name, behave_steps):
|
|
114
114
|
self.navigator.navigate_to_target()
|
|
115
115
|
file_path = f"features/steps/{artefact_name}_steps.py"
|
|
116
|
-
with open(file_path, 'w') as file:
|
|
116
|
+
with open(file_path, 'w', encoding='utf-8') as file:
|
|
117
117
|
file.write(behave_steps)
|
|
118
118
|
|
|
119
119
|
|
|
@@ -24,4 +24,4 @@ Modify and/or create unit tests so this is fully covered:
|
|
|
24
24
|
<snippet you want to cover in the next step>
|
|
25
25
|
```
|
|
26
26
|
|
|
27
|
-
Give me only what is relevant to testing this snippet. Use parametrization where applicable. Mock all dependencies of tested code.
|
|
27
|
+
Give me only what is relevant to testing this snippet. Use parametrization where applicable. Split into multiple tests instead of using if-else blocks. Mock all dependencies of tested code.
|
ara_cli/update_config_prompt.py
CHANGED
|
@@ -5,13 +5,13 @@ from ara_cli.prompt_handler import generate_config_prompt_template_file, generat
|
|
|
5
5
|
|
|
6
6
|
def read_file(filepath):
|
|
7
7
|
"""Read and return the content of a file."""
|
|
8
|
-
with open(filepath, 'r') as file:
|
|
8
|
+
with open(filepath, 'r', encoding='utf-8') as file:
|
|
9
9
|
return file.read()
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def write_file(filepath, content):
|
|
13
13
|
"""Write content to a file."""
|
|
14
|
-
with open(filepath, 'w') as file:
|
|
14
|
+
with open(filepath, 'w', encoding='utf-8') as file:
|
|
15
15
|
file.write(content)
|
|
16
16
|
|
|
17
17
|
|
ara_cli/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
# version.py
|
|
2
|
-
__version__ = "0.1.9.
|
|
2
|
+
__version__ = "0.1.9.71" # fith parameter like .0 for local install test purposes only. official numbers should be 4 digit numbers
|