ara-cli 0.1.9.95__py3-none-any.whl → 0.1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/__init__.py +5 -2
- ara_cli/__main__.py +61 -13
- ara_cli/ara_command_action.py +85 -20
- ara_cli/ara_command_parser.py +42 -2
- ara_cli/ara_config.py +118 -94
- ara_cli/artefact_autofix.py +131 -2
- ara_cli/artefact_creator.py +2 -7
- ara_cli/artefact_deleter.py +2 -4
- ara_cli/artefact_fuzzy_search.py +13 -6
- ara_cli/artefact_models/artefact_templates.py +3 -3
- ara_cli/artefact_models/feature_artefact_model.py +25 -0
- ara_cli/artefact_reader.py +4 -5
- ara_cli/chat.py +210 -150
- ara_cli/commands/extract_command.py +4 -11
- ara_cli/error_handler.py +134 -0
- ara_cli/file_classifier.py +3 -2
- ara_cli/prompt_extractor.py +1 -1
- ara_cli/prompt_handler.py +268 -127
- ara_cli/template_loader.py +245 -0
- ara_cli/version.py +1 -1
- {ara_cli-0.1.9.95.dist-info → ara_cli-0.1.10.0.dist-info}/METADATA +2 -1
- {ara_cli-0.1.9.95.dist-info → ara_cli-0.1.10.0.dist-info}/RECORD +32 -29
- tests/test_ara_command_action.py +66 -52
- tests/test_artefact_autofix.py +361 -5
- tests/test_chat.py +1894 -546
- tests/test_file_classifier.py +23 -0
- tests/test_file_creator.py +3 -5
- tests/test_prompt_handler.py +40 -4
- tests/test_template_loader.py +192 -0
- {ara_cli-0.1.9.95.dist-info → ara_cli-0.1.10.0.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.9.95.dist-info → ara_cli-0.1.10.0.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.9.95.dist-info → ara_cli-0.1.10.0.dist-info}/top_level.txt +0 -0
ara_cli/ara_config.py
CHANGED
|
@@ -10,6 +10,7 @@ import warnings
|
|
|
10
10
|
|
|
11
11
|
DEFAULT_CONFIG_LOCATION = "./ara/.araconfig/ara_config.json"
|
|
12
12
|
|
|
13
|
+
|
|
13
14
|
class LLMConfigItem(BaseModel):
|
|
14
15
|
provider: str
|
|
15
16
|
model: str
|
|
@@ -17,6 +18,7 @@ class LLMConfigItem(BaseModel):
|
|
|
17
18
|
max_tokens: Optional[int] = None
|
|
18
19
|
max_completion_tokens: Optional[int] = None
|
|
19
20
|
|
|
21
|
+
|
|
20
22
|
class ARAconfig(BaseModel):
|
|
21
23
|
ext_code_dirs: List[Dict[str, str]] = Field(default_factory=lambda: [
|
|
22
24
|
{"source_dir": "./src"},
|
|
@@ -28,98 +30,104 @@ class ARAconfig(BaseModel):
|
|
|
28
30
|
local_prompt_templates_dir: str = "./ara/.araconfig"
|
|
29
31
|
custom_prompt_templates_subdir: Optional[str] = "custom-prompt-modules"
|
|
30
32
|
local_ara_templates_dir: str = "./ara/.araconfig/templates/"
|
|
31
|
-
ara_prompt_given_list_includes: List[str] = Field(
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
33
|
+
ara_prompt_given_list_includes: List[str] = Field(
|
|
34
|
+
default_factory=lambda: [
|
|
35
|
+
"*.businessgoal",
|
|
36
|
+
"*.vision",
|
|
37
|
+
"*.capability",
|
|
38
|
+
"*.keyfeature",
|
|
39
|
+
"*.epic",
|
|
40
|
+
"*.userstory",
|
|
41
|
+
"*.example",
|
|
42
|
+
"*.feature",
|
|
43
|
+
"*.task",
|
|
44
|
+
"*.py",
|
|
45
|
+
"*.md",
|
|
46
|
+
"*.png",
|
|
47
|
+
"*.jpg",
|
|
48
|
+
"*.jpeg",
|
|
49
|
+
]
|
|
50
|
+
)
|
|
51
|
+
llm_config: Dict[str, LLMConfigItem] = Field(
|
|
52
|
+
default_factory=lambda: {
|
|
53
|
+
"gpt-5": LLMConfigItem(
|
|
54
|
+
provider="openai",
|
|
55
|
+
model="openai/gpt-5",
|
|
56
|
+
temperature=1,
|
|
57
|
+
max_completion_tokens=16000,
|
|
58
|
+
),
|
|
59
|
+
"gpt-5-mini": LLMConfigItem(
|
|
60
|
+
provider="openai", model="openai/gpt-5-mini-2025-08-07", temperature=1
|
|
61
|
+
),
|
|
62
|
+
"gpt-4o": LLMConfigItem(
|
|
63
|
+
provider="openai",
|
|
64
|
+
model="openai/gpt-4o",
|
|
65
|
+
temperature=0.8,
|
|
66
|
+
max_tokens=16000,
|
|
67
|
+
),
|
|
68
|
+
"gpt-4.1": LLMConfigItem(
|
|
69
|
+
provider="openai",
|
|
70
|
+
model="openai/gpt-4.1",
|
|
71
|
+
temperature=0.8,
|
|
72
|
+
max_tokens=16000,
|
|
73
|
+
),
|
|
74
|
+
"o3-mini": LLMConfigItem(
|
|
75
|
+
provider="openai",
|
|
76
|
+
model="openai/o3-mini",
|
|
77
|
+
temperature=1.0,
|
|
78
|
+
max_tokens=8000,
|
|
79
|
+
),
|
|
80
|
+
"opus-4": LLMConfigItem(
|
|
81
|
+
provider="anthropic",
|
|
82
|
+
model="anthropic/claude-opus-4-20250514",
|
|
83
|
+
temperature=0.5,
|
|
84
|
+
max_tokens=32000,
|
|
85
|
+
),
|
|
86
|
+
"sonnet-4": LLMConfigItem(
|
|
87
|
+
provider="anthropic",
|
|
88
|
+
model="anthropic/claude-sonnet-4-20250514",
|
|
89
|
+
temperature=0.5,
|
|
90
|
+
max_tokens=32000,
|
|
91
|
+
),
|
|
92
|
+
"together-ai-llama-2": LLMConfigItem(
|
|
93
|
+
provider="together_ai",
|
|
94
|
+
model="together_ai/togethercomputer/llama-2-70b",
|
|
95
|
+
temperature=0.8,
|
|
96
|
+
max_tokens=4000,
|
|
97
|
+
),
|
|
98
|
+
"groq-llama-3": LLMConfigItem(
|
|
99
|
+
provider="groq",
|
|
100
|
+
model="groq/llama3-70b-8192",
|
|
101
|
+
temperature=0.8,
|
|
102
|
+
max_tokens=4000,
|
|
103
|
+
),
|
|
104
|
+
}
|
|
105
|
+
)
|
|
102
106
|
default_llm: Optional[str] = None
|
|
103
107
|
extraction_llm: Optional[str] = None
|
|
104
108
|
|
|
105
|
-
@model_validator(mode=
|
|
106
|
-
def check_critical_fields(self) ->
|
|
109
|
+
@model_validator(mode="after")
|
|
110
|
+
def check_critical_fields(self) -> "ARAconfig":
|
|
107
111
|
"""Check for empty critical fields and validate default_llm and extraction_llm."""
|
|
108
112
|
critical_fields = {
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
+
"ext_code_dirs": [{"source_dir": "./src"}, {"source_dir": "./tests"}],
|
|
114
|
+
"local_ara_templates_dir": "./ara/.araconfig/templates/",
|
|
115
|
+
"local_prompt_templates_dir": "./ara/.araconfig",
|
|
116
|
+
"glossary_dir": "./glossary",
|
|
113
117
|
}
|
|
114
118
|
|
|
115
119
|
for field, default_value in critical_fields.items():
|
|
116
120
|
current_value = getattr(self, field)
|
|
117
121
|
if not current_value:
|
|
118
|
-
print(
|
|
122
|
+
print(
|
|
123
|
+
f"Warning: Value for '{field}' is missing or empty. Using default."
|
|
124
|
+
)
|
|
119
125
|
setattr(self, field, default_value)
|
|
120
|
-
|
|
126
|
+
|
|
121
127
|
if not self.llm_config:
|
|
122
|
-
print(
|
|
128
|
+
print(
|
|
129
|
+
"Warning: 'llm_config' is empty. 'default_llm' and 'extraction_llm' cannot be set."
|
|
130
|
+
)
|
|
123
131
|
self.default_llm = None
|
|
124
132
|
self.extraction_llm = None
|
|
125
133
|
return self
|
|
@@ -127,23 +135,34 @@ class ARAconfig(BaseModel):
|
|
|
127
135
|
first_available_llm = next(iter(self.llm_config))
|
|
128
136
|
|
|
129
137
|
if not self.default_llm:
|
|
130
|
-
print(
|
|
138
|
+
print(
|
|
139
|
+
f"Warning: 'default_llm' is not set. Defaulting to the first available model: '{first_available_llm}'."
|
|
140
|
+
)
|
|
131
141
|
self.default_llm = first_available_llm
|
|
132
142
|
elif self.default_llm not in self.llm_config:
|
|
133
|
-
print(
|
|
134
|
-
|
|
143
|
+
print(
|
|
144
|
+
f"Warning: The configured 'default_llm' ('{self.default_llm}') does not exist in 'llm_config'."
|
|
145
|
+
)
|
|
146
|
+
print(
|
|
147
|
+
f"-> Reverting to the first available model: '{first_available_llm}'."
|
|
148
|
+
)
|
|
135
149
|
self.default_llm = first_available_llm
|
|
136
150
|
|
|
137
151
|
if not self.extraction_llm:
|
|
138
|
-
print(
|
|
152
|
+
print(
|
|
153
|
+
f"Warning: 'extraction_llm' is not set. Setting it to the same as 'default_llm': '{self.default_llm}'."
|
|
154
|
+
)
|
|
139
155
|
self.extraction_llm = self.default_llm
|
|
140
156
|
elif self.extraction_llm not in self.llm_config:
|
|
141
|
-
print(
|
|
157
|
+
print(
|
|
158
|
+
f"Warning: The configured 'extraction_llm' ('{self.extraction_llm}') does not exist in 'llm_config'."
|
|
159
|
+
)
|
|
142
160
|
print(f"-> Reverting to the 'default_llm' value: '{self.default_llm}'.")
|
|
143
161
|
self.extraction_llm = self.default_llm
|
|
144
|
-
|
|
162
|
+
|
|
145
163
|
return self
|
|
146
164
|
|
|
165
|
+
|
|
147
166
|
# Function to ensure the necessary directories exist
|
|
148
167
|
@lru_cache(maxsize=None)
|
|
149
168
|
def ensure_directory_exists(directory: str):
|
|
@@ -153,6 +172,7 @@ def ensure_directory_exists(directory: str):
|
|
|
153
172
|
print(f"New directory created at {directory}")
|
|
154
173
|
return directory
|
|
155
174
|
|
|
175
|
+
|
|
156
176
|
def handle_unrecognized_keys(data: dict) -> dict:
|
|
157
177
|
"""Removes unrecognized keys from the data and warns the user."""
|
|
158
178
|
known_fields = set(ARAconfig.model_fields.keys())
|
|
@@ -211,13 +231,15 @@ def read_data(filepath: str) -> ARAconfig:
|
|
|
211
231
|
return config
|
|
212
232
|
except ValidationError as e:
|
|
213
233
|
print("--- Configuration Error Detected ---")
|
|
214
|
-
print(
|
|
215
|
-
|
|
234
|
+
print(
|
|
235
|
+
"Some settings in your configuration file are invalid. Attempting to fix them."
|
|
236
|
+
)
|
|
237
|
+
|
|
216
238
|
corrected_data = data.copy()
|
|
217
239
|
defaults = ARAconfig().model_dump()
|
|
218
|
-
|
|
219
|
-
error_fields = {err[
|
|
220
|
-
|
|
240
|
+
|
|
241
|
+
error_fields = {err["loc"][0] for err in e.errors() if err["loc"]}
|
|
242
|
+
|
|
221
243
|
for field_name in error_fields:
|
|
222
244
|
print(f"-> Field '{field_name}' is invalid and will be reverted to its default value.")
|
|
223
245
|
if field_name in corrected_data:
|
|
@@ -228,15 +250,17 @@ def read_data(filepath: str) -> ARAconfig:
|
|
|
228
250
|
final_config = ARAconfig(**corrected_data)
|
|
229
251
|
save_data(filepath, final_config)
|
|
230
252
|
print(f"Configuration has been corrected and saved to '{filepath}'.")
|
|
231
|
-
|
|
253
|
+
|
|
232
254
|
return final_config
|
|
233
255
|
|
|
256
|
+
|
|
234
257
|
# Function to save the modified configuration back to the JSON file
|
|
235
258
|
def save_data(filepath: str, config: ARAconfig):
|
|
236
259
|
"""Saves the Pydantic config model to a JSON file."""
|
|
237
260
|
with open(filepath, "w", encoding="utf-8") as file:
|
|
238
261
|
json.dump(config.model_dump(), file, indent=4)
|
|
239
262
|
|
|
263
|
+
|
|
240
264
|
# Singleton for configuration management
|
|
241
265
|
class ConfigManager:
|
|
242
266
|
_config_instance = None
|
|
@@ -246,9 +270,9 @@ class ConfigManager:
|
|
|
246
270
|
if cls._config_instance is None:
|
|
247
271
|
cls._config_instance = read_data(filepath)
|
|
248
272
|
return cls._config_instance
|
|
249
|
-
|
|
273
|
+
|
|
250
274
|
@classmethod
|
|
251
275
|
def reset(cls):
|
|
252
276
|
"""Reset the configuration instance (useful for testing)."""
|
|
253
277
|
cls._config_instance = None
|
|
254
|
-
read_data.cache_clear()
|
|
278
|
+
read_data.cache_clear()
|
ara_cli/artefact_autofix.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from ara_cli.error_handler import AraError
|
|
1
2
|
from ara_cli.artefact_scan import check_file
|
|
2
3
|
from ara_cli.artefact_fuzzy_search import (
|
|
3
4
|
find_closest_name_matches,
|
|
@@ -10,6 +11,7 @@ from ara_cli.artefact_models.artefact_model import Artefact
|
|
|
10
11
|
from typing import Optional, Dict, List, Tuple
|
|
11
12
|
import difflib
|
|
12
13
|
import os
|
|
14
|
+
import re
|
|
13
15
|
|
|
14
16
|
|
|
15
17
|
def populate_classified_artefact_info(
|
|
@@ -104,8 +106,9 @@ def determine_artefact_type_and_class(classifier):
|
|
|
104
106
|
|
|
105
107
|
artefact_class = artefact_type_mapping.get(artefact_type)
|
|
106
108
|
if not artefact_class:
|
|
107
|
-
|
|
108
|
-
|
|
109
|
+
raise AraError(f"No artefact class found for {artefact_type}")
|
|
110
|
+
# print(f"No artefact class found for {artefact_type}")
|
|
111
|
+
# return None, None
|
|
109
112
|
|
|
110
113
|
return artefact_type, artefact_class
|
|
111
114
|
|
|
@@ -372,6 +375,131 @@ def set_closest_contribution(
|
|
|
372
375
|
return artefact, True
|
|
373
376
|
|
|
374
377
|
|
|
378
|
+
def fix_scenario_placeholder_mismatch(
|
|
379
|
+
file_path: str, artefact_text: str, artefact_class, **kwargs
|
|
380
|
+
) -> str:
|
|
381
|
+
"""
|
|
382
|
+
Converts a regular Scenario with placeholders to a Scenario Outline.
|
|
383
|
+
This is a deterministic fix that detects placeholders and converts the format.
|
|
384
|
+
"""
|
|
385
|
+
lines = artefact_text.splitlines()
|
|
386
|
+
new_lines = []
|
|
387
|
+
i = 0
|
|
388
|
+
|
|
389
|
+
while i < len(lines):
|
|
390
|
+
line = lines[i]
|
|
391
|
+
stripped_line = line.strip()
|
|
392
|
+
|
|
393
|
+
if stripped_line.startswith('Scenario:'):
|
|
394
|
+
scenario_lines, next_index = _extract_scenario_block(lines, i)
|
|
395
|
+
processed_lines = _process_scenario_block(scenario_lines)
|
|
396
|
+
new_lines.extend(processed_lines)
|
|
397
|
+
i = next_index
|
|
398
|
+
else:
|
|
399
|
+
new_lines.append(line)
|
|
400
|
+
i += 1
|
|
401
|
+
|
|
402
|
+
return "\n".join(new_lines)
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def _extract_scenario_block(lines: list, start_index: int) -> tuple[list, int]:
|
|
406
|
+
"""Extract all lines belonging to a scenario block."""
|
|
407
|
+
scenario_lines = [lines[start_index]]
|
|
408
|
+
j = start_index + 1
|
|
409
|
+
|
|
410
|
+
while j < len(lines):
|
|
411
|
+
next_line = lines[j].strip()
|
|
412
|
+
if _is_scenario_boundary(next_line):
|
|
413
|
+
break
|
|
414
|
+
scenario_lines.append(lines[j])
|
|
415
|
+
j += 1
|
|
416
|
+
|
|
417
|
+
return scenario_lines, j
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def _is_scenario_boundary(line: str) -> bool:
|
|
421
|
+
"""Check if a line marks the boundary of a scenario block."""
|
|
422
|
+
boundaries = ['Scenario:', 'Scenario Outline:', 'Background:', 'Feature:']
|
|
423
|
+
return any(line.startswith(boundary) for boundary in boundaries)
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def _process_scenario_block(scenario_lines: list) -> list:
|
|
427
|
+
"""Process a scenario block and convert to outline if placeholders are found."""
|
|
428
|
+
if not scenario_lines:
|
|
429
|
+
return scenario_lines
|
|
430
|
+
|
|
431
|
+
first_line = scenario_lines[0]
|
|
432
|
+
indentation = _get_line_indentation(first_line)
|
|
433
|
+
placeholders = _extract_placeholders_from_scenario(scenario_lines[1:])
|
|
434
|
+
|
|
435
|
+
if not placeholders:
|
|
436
|
+
return scenario_lines
|
|
437
|
+
|
|
438
|
+
return _convert_to_scenario_outline(scenario_lines, placeholders, indentation)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def _get_line_indentation(line: str) -> str:
|
|
442
|
+
"""Get the indentation of a line."""
|
|
443
|
+
return line[:len(line) - len(line.lstrip())]
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
def _extract_placeholders_from_scenario(step_lines: list) -> set:
|
|
447
|
+
"""Extract placeholders from scenario step lines, ignoring docstrings."""
|
|
448
|
+
placeholders = set()
|
|
449
|
+
in_docstring = False
|
|
450
|
+
|
|
451
|
+
for line in step_lines:
|
|
452
|
+
step_line = line.strip()
|
|
453
|
+
if not step_line:
|
|
454
|
+
continue
|
|
455
|
+
|
|
456
|
+
in_docstring = _update_docstring_state(step_line, in_docstring)
|
|
457
|
+
|
|
458
|
+
if not in_docstring and '"""' not in step_line:
|
|
459
|
+
found = re.findall(r'<([^>]+)>', step_line)
|
|
460
|
+
placeholders.update(found)
|
|
461
|
+
|
|
462
|
+
return placeholders
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
def _update_docstring_state(line: str, current_state: bool) -> bool:
|
|
466
|
+
"""Update the docstring state based on the current line."""
|
|
467
|
+
if '"""' in line:
|
|
468
|
+
return not current_state
|
|
469
|
+
return current_state
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
def _convert_to_scenario_outline(scenario_lines: list, placeholders: set, indentation: str) -> list:
|
|
473
|
+
"""Convert scenario lines to scenario outline format with examples table."""
|
|
474
|
+
first_line = scenario_lines[0]
|
|
475
|
+
title = first_line.strip()[len('Scenario:'):].strip()
|
|
476
|
+
|
|
477
|
+
new_lines = [f"{indentation}Scenario Outline: {title}"]
|
|
478
|
+
new_lines.extend(scenario_lines[1:])
|
|
479
|
+
new_lines.append("")
|
|
480
|
+
|
|
481
|
+
examples_lines = _create_examples_table(placeholders, indentation)
|
|
482
|
+
new_lines.extend(examples_lines)
|
|
483
|
+
|
|
484
|
+
return new_lines
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
def _create_examples_table(placeholders: set, base_indentation: str) -> list:
|
|
488
|
+
"""Create the Examples table for the scenario outline."""
|
|
489
|
+
examples_indentation = base_indentation + " "
|
|
490
|
+
table_indentation = examples_indentation + " "
|
|
491
|
+
|
|
492
|
+
sorted_placeholders = sorted(placeholders)
|
|
493
|
+
header = "| " + " | ".join(sorted_placeholders) + " |"
|
|
494
|
+
sample_row = "| " + " | ".join(f"<{p}_value>" for p in sorted_placeholders) + " |"
|
|
495
|
+
|
|
496
|
+
return [
|
|
497
|
+
f"{examples_indentation}Examples:",
|
|
498
|
+
f"{table_indentation}{header}",
|
|
499
|
+
f"{table_indentation}{sample_row}"
|
|
500
|
+
]
|
|
501
|
+
|
|
502
|
+
|
|
375
503
|
def fix_title_mismatch(
|
|
376
504
|
file_path: str, artefact_text: str, artefact_class, **kwargs
|
|
377
505
|
) -> str:
|
|
@@ -584,6 +712,7 @@ def apply_autofix(
|
|
|
584
712
|
"Filename-Title Mismatch": fix_title_mismatch,
|
|
585
713
|
"Invalid Contribution Reference": fix_contribution,
|
|
586
714
|
"Rule Mismatch": fix_rule,
|
|
715
|
+
"Scenario Contains Placeholders": fix_scenario_placeholder_mismatch,
|
|
587
716
|
}
|
|
588
717
|
|
|
589
718
|
artefact_type, artefact_class = determine_artefact_type_and_class(classifier)
|
ara_cli/artefact_creator.py
CHANGED
|
@@ -38,11 +38,9 @@ class ArtefactCreator:
|
|
|
38
38
|
destination = Path(dir_path) / dest_name
|
|
39
39
|
|
|
40
40
|
if not source.exists():
|
|
41
|
-
print("[ERROR] Source file does not exist!")
|
|
42
41
|
raise FileNotFoundError(f"Source file {source} not found!")
|
|
43
42
|
|
|
44
43
|
if not destination.parent.exists():
|
|
45
|
-
print("[ERROR] Destination directory does not exist!")
|
|
46
44
|
raise NotADirectoryError(f"Destination directory {destination.parent} does not exist!")
|
|
47
45
|
|
|
48
46
|
copyfile(source, destination)
|
|
@@ -70,9 +68,7 @@ class ArtefactCreator:
|
|
|
70
68
|
def validate_template(self, template_path, classifier):
|
|
71
69
|
template_name = f"template.{classifier}"
|
|
72
70
|
if not self.template_exists(template_path, template_name):
|
|
73
|
-
|
|
74
|
-
return False
|
|
75
|
-
return True
|
|
71
|
+
raise FileNotFoundError(f"Template file '{template_name}' not found in the specified template path.")
|
|
76
72
|
|
|
77
73
|
def set_artefact_parent(self, artefact, parent_classifier, parent_file_name) -> Artefact:
|
|
78
74
|
classified_artefacts = ArtefactReader.read_artefacts()
|
|
@@ -94,8 +90,7 @@ class ArtefactCreator:
|
|
|
94
90
|
navigator.navigate_to_target()
|
|
95
91
|
|
|
96
92
|
if not Classifier.is_valid_classifier(classifier):
|
|
97
|
-
|
|
98
|
-
return
|
|
93
|
+
raise ValueError("Invalid classifier provided. Please provide a valid classifier.")
|
|
99
94
|
|
|
100
95
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
101
96
|
file_path = self.file_system.path.join(sub_directory, f"{filename}.{classifier}")
|
ara_cli/artefact_deleter.py
CHANGED
|
@@ -20,16 +20,14 @@ class ArtefactDeleter:
|
|
|
20
20
|
self.navigate_to_target()
|
|
21
21
|
|
|
22
22
|
if not Classifier.is_valid_classifier(classifier):
|
|
23
|
-
|
|
24
|
-
return
|
|
23
|
+
raise ValueError("Invalid classifier provided. Please provide a valid classifier.")
|
|
25
24
|
|
|
26
25
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
27
26
|
file_path = self.file_system.path.join(sub_directory, f"{filename}.{classifier}")
|
|
28
27
|
dir_path = self.file_system.path.join(sub_directory, f"{filename}.data")
|
|
29
28
|
|
|
30
29
|
if not self.file_system.path.exists(file_path):
|
|
31
|
-
|
|
32
|
-
return
|
|
30
|
+
raise FileNotFoundError(f"Artefact {file_path} not found.")
|
|
33
31
|
if not force:
|
|
34
32
|
user_choice = input(f"Are you sure you want to delete the file {filename} and data directory if existing? (y/N): ")
|
|
35
33
|
|
ara_cli/artefact_fuzzy_search.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
|
1
1
|
import difflib
|
|
2
2
|
from textwrap import indent
|
|
3
3
|
from typing import Optional
|
|
4
|
+
from . import error_handler
|
|
5
|
+
from ara_cli.error_handler import AraError
|
|
4
6
|
|
|
5
7
|
|
|
6
|
-
def suggest_close_names(artefact_name: str, all_artefact_names: list[str], message: str, cutoff=0.5):
|
|
8
|
+
def suggest_close_names(artefact_name: str, all_artefact_names: list[str], message: str, cutoff=0.5, report_as_error: bool = False):
|
|
7
9
|
closest_matches = difflib.get_close_matches(artefact_name, all_artefact_names, cutoff=cutoff)
|
|
8
|
-
|
|
10
|
+
if report_as_error:
|
|
11
|
+
error_handler.report_error(AraError(message))
|
|
12
|
+
else:
|
|
13
|
+
print(message)
|
|
9
14
|
if not closest_matches:
|
|
10
15
|
return
|
|
11
16
|
print("Closest matches:")
|
|
@@ -13,23 +18,25 @@ def suggest_close_names(artefact_name: str, all_artefact_names: list[str], messa
|
|
|
13
18
|
print(f" - {match}")
|
|
14
19
|
|
|
15
20
|
|
|
16
|
-
def suggest_close_name_matches(artefact_name: str, all_artefact_names: list[str]):
|
|
21
|
+
def suggest_close_name_matches(artefact_name: str, all_artefact_names: list[str], report_as_error: bool = False):
|
|
17
22
|
message = f"No match found for artefact with name '{artefact_name}'"
|
|
18
23
|
|
|
19
24
|
suggest_close_names(
|
|
20
25
|
artefact_name=artefact_name,
|
|
21
26
|
all_artefact_names=all_artefact_names,
|
|
22
|
-
message=message
|
|
27
|
+
message=message,
|
|
28
|
+
report_as_error=report_as_error
|
|
23
29
|
)
|
|
24
30
|
|
|
25
31
|
|
|
26
|
-
def suggest_close_name_matches_for_parent(artefact_name: str, all_artefact_names: list[str], parent_name: str):
|
|
32
|
+
def suggest_close_name_matches_for_parent(artefact_name: str, all_artefact_names: list[str], parent_name: str, report_as_error: bool = False):
|
|
27
33
|
message = f"No match found for parent of '{artefact_name}' with name '{parent_name}'"
|
|
28
34
|
|
|
29
35
|
suggest_close_names(
|
|
30
36
|
artefact_name=parent_name,
|
|
31
37
|
all_artefact_names=all_artefact_names,
|
|
32
|
-
message=message
|
|
38
|
+
message=message,
|
|
39
|
+
report_as_error=report_as_error
|
|
33
40
|
)
|
|
34
41
|
|
|
35
42
|
|
|
@@ -148,9 +148,9 @@ def _default_feature(title: str, use_default_contribution: bool) -> FeatureArtef
|
|
|
148
148
|
Scenario(
|
|
149
149
|
title="<descriptive_scenario_title>",
|
|
150
150
|
steps=[
|
|
151
|
-
"Given
|
|
152
|
-
"When
|
|
153
|
-
"Then
|
|
151
|
+
"Given [precondition]",
|
|
152
|
+
"When [action]",
|
|
153
|
+
"Then [expected result]"
|
|
154
154
|
],
|
|
155
155
|
),
|
|
156
156
|
ScenarioOutline(
|
|
@@ -148,6 +148,30 @@ class Scenario(BaseModel):
|
|
|
148
148
|
raise ValueError("steps list must not be empty")
|
|
149
149
|
return steps
|
|
150
150
|
|
|
151
|
+
@model_validator(mode='after')
|
|
152
|
+
def check_no_placeholders(cls, values: 'Scenario') -> 'Scenario':
|
|
153
|
+
"""Ensure regular scenarios don't contain placeholders that should be in scenario outlines."""
|
|
154
|
+
placeholders = set()
|
|
155
|
+
for step in values.steps:
|
|
156
|
+
# Skip validation if step contains docstring placeholders (during parsing)
|
|
157
|
+
if '__DOCSTRING_PLACEHOLDER_' in step:
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
# Skip validation if step contains docstring markers (after reinjection)
|
|
161
|
+
if '"""' in step:
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
found = re.findall(r'<([^>]+)>', step)
|
|
165
|
+
placeholders.update(found)
|
|
166
|
+
|
|
167
|
+
if placeholders:
|
|
168
|
+
placeholder_list = ', '.join(f"<{p}>" for p in sorted(placeholders))
|
|
169
|
+
raise ValueError(
|
|
170
|
+
f"Scenario Contains Placeholders ({placeholder_list}) but is not a Scenario Outline. "
|
|
171
|
+
f"Use 'Scenario Outline:' instead of 'Scenario:' and provide an Examples table."
|
|
172
|
+
)
|
|
173
|
+
return values
|
|
174
|
+
|
|
151
175
|
@classmethod
|
|
152
176
|
def from_lines(cls, lines: List[str], start_idx: int) -> Tuple['Scenario', int]:
|
|
153
177
|
"""Parse a Scenario from a list of lines starting at start_idx."""
|
|
@@ -277,6 +301,7 @@ class FeatureArtefact(Artefact):
|
|
|
277
301
|
f"FeatureArtefact must have artefact_type of '{ArtefactType.feature}', not '{v}'")
|
|
278
302
|
return v
|
|
279
303
|
|
|
304
|
+
|
|
280
305
|
@classmethod
|
|
281
306
|
def _title_prefix(cls) -> str:
|
|
282
307
|
return "Feature:"
|
ara_cli/artefact_reader.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from . import error_handler
|
|
1
2
|
from ara_cli.classifier import Classifier
|
|
2
3
|
from ara_cli.file_classifier import FileClassifier
|
|
3
4
|
from ara_cli.artefact_models.artefact_model import Artefact
|
|
@@ -12,8 +13,7 @@ class ArtefactReader:
|
|
|
12
13
|
@staticmethod
|
|
13
14
|
def read_artefact_data(artefact_name, classifier, classified_file_info = None) -> tuple[str, dict[str, str]]:
|
|
14
15
|
if not Classifier.is_valid_classifier(classifier):
|
|
15
|
-
|
|
16
|
-
return None, None
|
|
16
|
+
raise ValueError("Invalid classifier provided. Please provide a valid classifier.")
|
|
17
17
|
|
|
18
18
|
if not classified_file_info:
|
|
19
19
|
file_classifier = FileClassifier(os)
|
|
@@ -74,7 +74,6 @@ class ArtefactReader:
|
|
|
74
74
|
|
|
75
75
|
@staticmethod
|
|
76
76
|
def read_artefacts(classified_artefacts=None, file_system=os, tags=None) -> Dict[str, List[Artefact]]:
|
|
77
|
-
from ara_cli.artefact_models.artefact_load import artefact_from_content
|
|
78
77
|
|
|
79
78
|
if classified_artefacts is None:
|
|
80
79
|
file_classifier = FileClassifier(file_system)
|
|
@@ -89,7 +88,7 @@ class ArtefactReader:
|
|
|
89
88
|
artefact = ArtefactReader.read_artefact(title, artefact_type, classified_artefacts)
|
|
90
89
|
artefacts[artefact_type].append(artefact)
|
|
91
90
|
except Exception as e:
|
|
92
|
-
|
|
91
|
+
error_handler.report_error(e, f"reading {artefact_type} '{title}'")
|
|
93
92
|
continue
|
|
94
93
|
return artefacts
|
|
95
94
|
|
|
@@ -143,7 +142,7 @@ class ArtefactReader:
|
|
|
143
142
|
ArtefactReader._ensure_classifier_key(classifier, artefacts_by_classifier)
|
|
144
143
|
|
|
145
144
|
artefact = ArtefactReader._find_artefact_by_name(
|
|
146
|
-
artefact_name,
|
|
145
|
+
artefact_name,
|
|
147
146
|
classified_artefacts.get(classifier, [])
|
|
148
147
|
)
|
|
149
148
|
|