ara-cli 0.1.9.74__py3-none-any.whl → 0.1.9.75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ara-cli might be problematic. Click here for more details.

ara_cli/ara_config.py CHANGED
@@ -1,33 +1,43 @@
1
- from typing import List, Dict, Optional
2
- from pydantic import BaseModel
1
+ from typing import List, Dict, Optional, Any
2
+ from pydantic import BaseModel, ValidationError, Field, field_validator, model_validator
3
3
  import json
4
4
  import os
5
5
  from os.path import exists, dirname
6
6
  from os import makedirs
7
7
  from functools import lru_cache
8
-
8
+ import sys
9
9
 
10
10
  DEFAULT_CONFIG_LOCATION = "./ara/.araconfig/ara_config.json"
11
11
 
12
-
13
12
  class LLMConfigItem(BaseModel):
14
13
  provider: str
15
14
  model: str
16
- temperature: float
15
+ temperature: float = Field(ge=0.0, le=1.0)
17
16
  max_tokens: Optional[int] = None
17
+
18
+ @field_validator('temperature')
19
+ @classmethod
20
+ def validate_temperature(cls, v: float, info) -> float:
21
+ if not 0.0 <= v <= 1.0:
22
+ print(f"Warning: Temperature is outside the 0.0 to 1.0 range")
23
+ # Return a valid default
24
+ return 0.8
25
+ return v
18
26
 
27
+ class ExtCodeDirItem(BaseModel):
28
+ source_dir: str
19
29
 
20
30
  class ARAconfig(BaseModel):
21
- ext_code_dirs: List[Dict[str, str]] = [
22
- {"source_dir_1": "./src"},
23
- {"source_dir_2": "./tests"},
24
- ]
31
+ ext_code_dirs: List[ExtCodeDirItem] = Field(default_factory=lambda: [
32
+ ExtCodeDirItem(source_dir="./src"),
33
+ ExtCodeDirItem(source_dir="./tests")
34
+ ])
25
35
  glossary_dir: str = "./glossary"
26
36
  doc_dir: str = "./docs"
27
37
  local_prompt_templates_dir: str = "./ara/.araconfig"
28
38
  custom_prompt_templates_subdir: Optional[str] = "custom-prompt-modules"
29
39
  local_ara_templates_dir: str = "./ara/.araconfig/templates/"
30
- ara_prompt_given_list_includes: List[str] = [
40
+ ara_prompt_given_list_includes: List[str] = Field(default_factory=lambda: [
31
41
  "*.businessgoal",
32
42
  "*.vision",
33
43
  "*.capability",
@@ -42,53 +52,76 @@ class ARAconfig(BaseModel):
42
52
  "*.png",
43
53
  "*.jpg",
44
54
  "*.jpeg",
45
- ]
46
- llm_config: Dict[str, LLMConfigItem] = {
47
- "gpt-4o": {
48
- "provider": "openai",
49
- "model": "openai/gpt-4o",
50
- "temperature": 0.8,
51
- "max_tokens": 16384
52
- },
53
- "gpt-4.1": {
54
- "provider": "openai",
55
- "model": "openai/gpt-4.1",
56
- "temperature": 0.8,
57
- "max_tokens": 1024
58
- },
59
- "o3-mini": {
60
- "provider": "openai",
61
- "model": "openai/o3-mini",
62
- "temperature": 1.0,
63
- "max_tokens": 1024
64
- },
65
- "opus-4": {
66
- "provider": "anthropic",
67
- "model": "anthropic/claude-opus-4-20250514",
68
- "temperature": 0.8,
69
- "max_tokens": 32000
70
- },
71
- "sonnet-4": {
72
- "provider": "anthropic",
73
- "model": "anthropic/claude-sonnet-4-20250514",
74
- "temperature": 0.8,
75
- "max_tokens": 1024
76
- },
77
- "together-ai-llama-2": {
78
- "provider": "together_ai",
79
- "model": "together_ai/togethercomputer/llama-2-70b",
80
- "temperature": 0.8,
81
- "max_tokens": 1024
82
- },
83
- "groq-llama-3": {
84
- "provider": "groq",
85
- "model": "groq/llama3-70b-8192",
86
- "temperature": 0.8,
87
- "max_tokens": 1024
88
- }
89
- }
55
+ ])
56
+ llm_config: Dict[str, LLMConfigItem] = Field(default_factory=lambda: {
57
+ "gpt-4o": LLMConfigItem(
58
+ provider="openai",
59
+ model="openai/gpt-4o",
60
+ temperature=0.8,
61
+ max_tokens=16384
62
+ ),
63
+ "gpt-4.1": LLMConfigItem(
64
+ provider="openai",
65
+ model="openai/gpt-4.1",
66
+ temperature=0.8,
67
+ max_tokens=1024
68
+ ),
69
+ "o3-mini": LLMConfigItem(
70
+ provider="openai",
71
+ model="openai/o3-mini",
72
+ temperature=1.0,
73
+ max_tokens=1024
74
+ ),
75
+ "opus-4": LLMConfigItem(
76
+ provider="anthropic",
77
+ model="anthropic/claude-opus-4-20250514",
78
+ temperature=0.8,
79
+ max_tokens=32000
80
+ ),
81
+ "sonnet-4": LLMConfigItem(
82
+ provider="anthropic",
83
+ model="anthropic/claude-sonnet-4-20250514",
84
+ temperature=0.8,
85
+ max_tokens=1024
86
+ ),
87
+ "together-ai-llama-2": LLMConfigItem(
88
+ provider="together_ai",
89
+ model="together_ai/togethercomputer/llama-2-70b",
90
+ temperature=0.8,
91
+ max_tokens=1024
92
+ ),
93
+ "groq-llama-3": LLMConfigItem(
94
+ provider="groq",
95
+ model="groq/llama3-70b-8192",
96
+ temperature=0.8,
97
+ max_tokens=1024
98
+ )
99
+ })
90
100
  default_llm: Optional[str] = "gpt-4o"
101
+
102
+ model_config = {
103
+ "extra": "forbid" # This will help identify unrecognized keys
104
+ }
91
105
 
106
+ @model_validator(mode='after')
107
+ def check_critical_fields(self) -> 'ARAconfig':
108
+ """Check for empty critical fields and use defaults if needed"""
109
+ critical_fields = {
110
+ 'ext_code_dirs': [ExtCodeDirItem(source_dir="./src"), ExtCodeDirItem(source_dir="./tests")],
111
+ 'local_ara_templates_dir': "./ara/.araconfig/templates/",
112
+ 'local_prompt_templates_dir': "./ara/.araconfig",
113
+ 'glossary_dir': "./glossary"
114
+ }
115
+
116
+ for field, default_value in critical_fields.items():
117
+ current_value = getattr(self, field)
118
+ if (not current_value or
119
+ (isinstance(current_value, list) and len(current_value) == 0) or
120
+ (isinstance(current_value, str) and current_value.strip() == "")):
121
+ print(f"Warning: Value for '{field}' is missing or empty.")
122
+ setattr(self, field, default_value)
123
+
124
+ return self
92
125
 
93
126
  # Function to ensure the necessary directories exist
94
127
  @lru_cache(maxsize=None)
@@ -98,37 +131,106 @@ def ensure_directory_exists(directory: str):
98
131
  print(f"New directory created at {directory}")
99
132
  return directory
100
133
 
101
-
102
- def validate_config_data(filepath: str):
103
- with open(filepath, "r", encoding="utf-8") as file:
104
- data = json.load(file)
134
+ def handle_unrecognized_keys(data: dict, known_fields: set) -> dict:
135
+ """Remove unrecognized keys and warn the user"""
136
+ cleaned_data = {}
137
+ for key, value in data.items():
138
+ if key not in known_fields:
139
+ print(f"Warning: {key} is not recognized as a valid configuration option.")
140
+ else:
141
+ cleaned_data[key] = value
142
+ return cleaned_data
143
+
144
+ def fix_llm_temperatures(data: dict) -> dict:
145
+ """Fix invalid temperatures in LLM configurations"""
146
+ if 'llm_config' in data:
147
+ for model_key, model_config in data['llm_config'].items():
148
+ if isinstance(model_config, dict) and 'temperature' in model_config:
149
+ temp = model_config['temperature']
150
+ if not 0.0 <= temp <= 1.0:
151
+ print(f"Warning: Temperature for model '{model_key}' is outside the 0.0 to 1.0 range")
152
+ model_config['temperature'] = 0.8
105
153
  return data
106
154
 
155
+ def validate_and_fix_config_data(filepath: str) -> dict:
156
+ """Load, validate, and fix configuration data"""
157
+ try:
158
+ with open(filepath, "r", encoding="utf-8") as file:
159
+ data = json.load(file)
160
+
161
+ # Get known fields from the ARAconfig model
162
+ known_fields = set(ARAconfig.model_fields.keys())
163
+
164
+ # Handle unrecognized keys
165
+ data = handle_unrecognized_keys(data, known_fields)
166
+
167
+ # Fix LLM temperatures before validation
168
+ data = fix_llm_temperatures(data)
169
+
170
+ return data
171
+ except json.JSONDecodeError as e:
172
+ print(f"Error: Invalid JSON in configuration file: {e}")
173
+ print("Creating new configuration with defaults...")
174
+ return {}
175
+ except Exception as e:
176
+ print(f"Error reading configuration file: {e}")
177
+ return {}
107
178
 
108
179
  # Function to read the JSON file and return an ARAconfig model
109
180
  @lru_cache(maxsize=1)
110
181
  def read_data(filepath: str) -> ARAconfig:
182
+ # Ensure the directory for the config file exists
183
+ config_dir = dirname(filepath)
184
+ ensure_directory_exists(config_dir)
185
+
111
186
  if not exists(filepath):
112
- # If file does not exist, create it with default values
187
+ # If the file does not exist, create it with default values
113
188
  default_config = ARAconfig()
114
-
115
- with open(filepath, "w", encoding="utf-8") as file:
116
- json.dump(default_config.model_dump(mode='json'), file, indent=4)
117
-
189
+ save_data(filepath, default_config)
118
190
  print(
119
- f"ara-cli configuration file '{filepath}' created with default configuration. Please modify it as needed and re-run your command"
191
+ f"ara-cli configuration file '{filepath}' created with default configuration."
192
+ f" Please modify it as needed and re-run your command"
120
193
  )
121
- exit() # Exit the application
122
-
123
- data = validate_config_data(filepath)
124
- return ARAconfig(**data)
125
-
194
+ sys.exit(0) # Exit the application
195
+
196
+ # Validate and load the existing configuration
197
+ data = validate_and_fix_config_data(filepath)
198
+
199
+ try:
200
+ # Try to create the config with the loaded data
201
+ config = ARAconfig(**data)
202
+
203
+ # Save the potentially fixed configuration back
204
+ save_data(filepath, config)
205
+
206
+ return config
207
+ except ValidationError as e:
208
+ print(f"ValidationError: {e}")
209
+ print("Correcting configuration with default values...")
210
+
211
+ # Create a default config
212
+ default_config = ARAconfig()
213
+
214
+ # Try to preserve valid fields from the original data
215
+ for field_name, field_value in data.items():
216
+ if field_name in ARAconfig.model_fields:
217
+ try:
218
+ # Attempt to set the field value
219
+ setattr(default_config, field_name, field_value)
220
+ except:
221
+ # If it fails, keep the default
222
+ pass
223
+
224
+ # Save the corrected configuration
225
+ save_data(filepath, default_config)
226
+ print("Fixed configuration saved to file.")
227
+
228
+ return default_config
126
229
 
127
230
  # Function to save the modified configuration back to the JSON file
128
231
  def save_data(filepath: str, config: ARAconfig):
129
232
  with open(filepath, "w", encoding="utf-8") as file:
130
- json.dump(config.model_dump(mode='json'), file, indent=4)
131
-
233
+ json.dump(config.model_dump(), file, indent=4)
132
234
 
133
235
  # Singleton for configuration management
134
236
  class ConfigManager:
@@ -143,4 +245,10 @@ class ConfigManager:
143
245
  makedirs(config_dir)
144
246
 
145
247
  cls._config_instance = read_data(filepath)
146
- return cls._config_instance
248
+ return cls._config_instance
249
+
250
+ @classmethod
251
+ def reset(cls):
252
+ """Reset the configuration instance (useful for testing)"""
253
+ cls._config_instance = None
254
+ read_data.cache_clear()
@@ -30,36 +30,45 @@ def parse_report(content: str) -> Dict[str, List[Tuple[str, str]]]:
30
30
  Parses the incompatible artefacts report and returns structured data.
31
31
  Returns a dictionary where keys are artefact classifiers, and values are lists of (file_path, reason) tuples.
32
32
  """
33
+ def is_valid_report(lines: List[str]) -> bool:
34
+ return bool(lines) and lines[0] == "# Artefact Check Report"
35
+
36
+ def has_no_problems(lines: List[str]) -> bool:
37
+ return len(lines) >= 3 and lines[2] == "No problems found."
38
+
39
+ def parse_classifier(line: str) -> Optional[str]:
40
+ if line.startswith("## "):
41
+ return line[3:].strip()
42
+ return None
43
+
44
+ def parse_issue(line: str) -> Optional[Tuple[str, str]]:
45
+ if not line.startswith("- "):
46
+ return None
47
+ parts = line.split("`", 2)
48
+ if len(parts) < 3:
49
+ return None
50
+ file_path = parts[1]
51
+ reason = parts[2].split(":", 1)[1].strip() if ":" in parts[2] else ""
52
+ return file_path, reason
53
+
33
54
  lines = content.splitlines()
55
+ if not is_valid_report(lines) or has_no_problems(lines):
56
+ return {}
57
+
34
58
  issues = {}
35
59
  current_classifier = None
36
60
 
37
- if not lines or lines[0] != "# Artefact Check Report":
38
- return issues
39
- return issues
40
-
41
- if len(lines) >= 3 and lines[2] == "No problems found.":
42
- return issues
43
- return issues
44
-
45
- for line in lines[1:]:
46
- line = line.strip()
61
+ for line in map(str.strip, lines[1:]):
47
62
  if not line:
48
63
  continue
49
-
50
- if line.startswith("## "):
51
- current_classifier = line[3:].strip()
64
+ classifier = parse_classifier(line)
65
+ if classifier is not None:
66
+ current_classifier = classifier
52
67
  issues[current_classifier] = []
53
-
54
- elif line.startswith("- ") and current_classifier is not None:
55
- parts = line.split("`", 2)
56
- if len(parts) < 3:
57
- continue
58
-
59
- file_path = parts[1]
60
- reason = parts[2].split(":", 1)[1].strip() if ":" in parts[2] else ""
61
- issues[current_classifier].append((file_path, reason))
62
-
68
+ continue
69
+ issue = parse_issue(line)
70
+ if issue and current_classifier is not None:
71
+ issues[current_classifier].append(issue)
63
72
  return issues
64
73
 
65
74
 
@@ -391,19 +400,69 @@ def apply_autofix(
391
400
  Applies fixes to a single artefact file iteratively until it is valid
392
401
  or a fix cannot be applied. If single_pass is True, it runs for only one attempt.
393
402
  """
403
+ deterministic_markers_to_functions = {
404
+ "Filename-Title Mismatch": fix_title_mismatch,
405
+ "Invalid Contribution Reference": fix_contribution,
406
+ }
407
+
408
+ def populate_classified_artefact_info(force: bool = False):
409
+ nonlocal classified_artefact_info
410
+ if force or classified_artefact_info is None:
411
+ file_classifier = FileClassifier(os)
412
+ classified_artefact_info = file_classifier.classify_files()
413
+
414
+ def determine_attempt_count() -> int:
415
+ nonlocal single_pass, file_path
416
+ if single_pass:
417
+ print(f"Single-pass mode enabled for {file_path}. Running for 1 attempt.")
418
+ return 1
419
+ return 3
420
+
421
+ def apply_deterministic_fix() -> str:
422
+ nonlocal deterministic, deterministic_issue, corrected_text, file_path, artefact_text, artefact_class, classified_artefact_info
423
+ if deterministic and deterministic_issue:
424
+ print(f"Applying deterministic fix for '{deterministic_issue}'...")
425
+ fix_function = deterministic_markers_to_functions[deterministic_issue]
426
+ return fix_function(
427
+ file_path=file_path,
428
+ artefact_text=artefact_text,
429
+ artefact_class=artefact_class,
430
+ classified_artefact_info=classified_artefact_info,
431
+ )
432
+ return corrected_text
433
+
434
+ def apply_non_deterministic_fix() -> Optional[str]:
435
+ """
436
+ Applies LLM fix. Return None in case of an exception
437
+ """
438
+ nonlocal non_deterministic, deterministic_issue, corrected_text, artefact_type, current_reason, file_path, artefact_text
439
+ if non_deterministic and not deterministic_issue:
440
+ print("Applying non-deterministic (LLM) fix...")
441
+ prompt = construct_prompt(artefact_type, current_reason, file_path, artefact_text)
442
+ try:
443
+ corrected_artefact = run_agent(prompt, artefact_class)
444
+ corrected_text = corrected_artefact.serialize()
445
+ except Exception as e:
446
+ print(f" ❌ LLM agent failed to fix artefact at {file_path}: {e}")
447
+ return None
448
+ return corrected_text
449
+
450
+ def should_skip() -> bool:
451
+ nonlocal deterministic_issue, deterministic, non_deterministic
452
+ if not non_deterministic and not deterministic_issue:
453
+ print(f"Skipping non-deterministic fix for {file_path} as per request.")
454
+ return True
455
+ if not deterministic and deterministic_issue:
456
+ print(f"Skipping fix for {file_path} as per request flags.")
457
+ return True
458
+ return False
459
+
394
460
  artefact_type, artefact_class = determine_artefact_type_and_class(classifier)
395
461
  if artefact_type is None or artefact_class is None:
396
462
  return False
397
463
 
398
- if classified_artefact_info is None:
399
- file_classifier = FileClassifier(os)
400
- classified_artefact_info = file_classifier.classify_files()
401
-
402
- if single_pass:
403
- max_attempts = 1
404
- print(f"Single-pass mode enabled for {file_path}. Running for 1 attempt.")
405
- else:
406
- max_attempts = 3
464
+ populate_classified_artefact_info()
465
+ max_attempts = determine_attempt_count()
407
466
 
408
467
  for attempt in range(max_attempts):
409
468
  is_valid, current_reason = check_file(file_path, artefact_class, classified_artefact_info)
@@ -411,7 +470,7 @@ def apply_autofix(
411
470
  if is_valid:
412
471
  print(f"✅ Artefact at {file_path} is now valid.")
413
472
  return True
414
-
473
+
415
474
  print(f"Attempting to fix {file_path} (Attempt {attempt + 1}/{max_attempts})...")
416
475
  print(f" Reason: {current_reason}")
417
476
 
@@ -419,11 +478,6 @@ def apply_autofix(
419
478
  if artefact_text is None:
420
479
  return False
421
480
 
422
- deterministic_markers_to_functions = {
423
- "Filename-Title Mismatch": fix_title_mismatch,
424
- "Invalid Contribution Reference": fix_contribution,
425
- }
426
-
427
481
  deterministic_issue = next(
428
482
  (
429
483
  marker
@@ -433,45 +487,22 @@ def apply_autofix(
433
487
  None,
434
488
  )
435
489
 
436
- corrected_text = None
437
-
438
- if deterministic and deterministic_issue:
439
- print(f"Applying deterministic fix for '{deterministic_issue}'...")
440
- fix_function = deterministic_markers_to_functions[deterministic_issue]
441
- corrected_text = fix_function(
442
- file_path=file_path,
443
- artefact_text=artefact_text,
444
- artefact_class=artefact_class,
445
- classified_artefact_info=classified_artefact_info,
446
- )
447
-
448
- elif non_deterministic and not deterministic_issue:
449
- print("Applying non-deterministic (LLM) fix...")
450
- prompt = construct_prompt(artefact_type, current_reason, file_path, artefact_text)
451
- try:
452
- corrected_artefact = run_agent(prompt, artefact_class)
453
- corrected_text = corrected_artefact.serialize()
454
- except Exception as e:
455
- print(f" ❌ LLM agent failed to fix artefact at {file_path}: {e}")
456
- return False
457
-
458
- else:
459
- if not non_deterministic and not deterministic_issue:
460
- print(f"Skipping non-deterministic fix for {file_path} as per request.")
461
- else:
462
- print(f"Skipping fix for {file_path} as per request flags.")
490
+ if should_skip():
463
491
  return False
464
492
 
465
- if corrected_text is not None and corrected_text.strip() != artefact_text.strip():
466
- write_corrected_artefact(file_path, corrected_text)
493
+ corrected_text = None
467
494
 
468
- print(" File modified. Re-classifying artefact information for next check...")
469
- file_classifier = FileClassifier(os)
470
- classified_artefact_info = file_classifier.classify_files()
495
+ corrected_text = apply_deterministic_fix()
496
+ corrected_text = apply_non_deterministic_fix()
471
497
 
472
- else:
498
+ if corrected_text is None or corrected_text.strip() == artefact_text.strip():
473
499
  print(" Fixing attempt did not alter the file. Stopping to prevent infinite loop.")
474
500
  return False
475
501
 
502
+ write_corrected_artefact(file_path, corrected_text)
503
+
504
+ print(" File modified. Re-classifying artefact information for next check...")
505
+ populate_classified_artefact_info(force=True)
506
+
476
507
  print(f"❌ Failed to fix {file_path} after {max_attempts} attempts.")
477
- return False
508
+ return False
@@ -38,7 +38,7 @@ class BusinessgoalIntent(Intent):
38
38
  lines = []
39
39
 
40
40
  as_a_line = as_a_serializer(self.as_a)
41
-
41
+
42
42
  lines.append(f"In order to {self.in_order_to}")
43
43
  lines.append(as_a_line)
44
44
  lines.append(f"I want {self.i_want}")
@@ -47,39 +47,37 @@ class BusinessgoalIntent(Intent):
47
47
 
48
48
  @classmethod
49
49
  def deserialize_from_lines(cls, lines: List[str], start_index: int = 0) -> 'BusinessgoalIntent':
50
- in_order_to = None
51
- as_a = None
52
- i_want = None
53
-
54
- in_order_to_prefix = "In order to "
55
- as_a_prefix = "As a "
56
- as_a_prefix_alt = "As an "
57
- i_want_prefix = "I want "
50
+ prefixes = [
51
+ ("In order to ", "in_order_to"),
52
+ ("As a ", "as_a"),
53
+ ("As an ", "as_a"),
54
+ ("I want ", "i_want"),
55
+ ]
56
+ found = {"in_order_to": None, "as_a": None, "i_want": None}
57
+
58
+ def match_and_store(line):
59
+ for prefix, field in prefixes:
60
+ if line.startswith(prefix) and found[field] is None:
61
+ found[field] = line[len(prefix):].strip()
62
+ return True
63
+ return False
58
64
 
59
65
  index = start_index
60
- while index < len(lines) and (not in_order_to or not as_a or not i_want):
61
- line = lines[index]
62
- if line.startswith(in_order_to_prefix) and not in_order_to:
63
- in_order_to = line[len(in_order_to_prefix):].strip()
64
- elif line.startswith(as_a_prefix) and not as_a:
65
- as_a = line[len(as_a_prefix):].strip()
66
- elif line.startswith(as_a_prefix_alt) and not as_a:
67
- as_a = line[len(as_a_prefix_alt):].strip()
68
- elif line.startswith(i_want_prefix) and not i_want:
69
- i_want = line[len(i_want_prefix):].strip()
66
+ while index < len(lines) and any(v is None for v in found.values()):
67
+ match_and_store(lines[index])
70
68
  index += 1
71
69
 
72
- if not in_order_to:
70
+ if not found["in_order_to"]:
73
71
  raise ValueError("Could not find 'In order to' line")
74
- if not as_a:
72
+ if not found["as_a"]:
75
73
  raise ValueError("Could not find 'As a' line")
76
- if not i_want:
74
+ if not found["i_want"]:
77
75
  raise ValueError("Could not find 'I want' line")
78
76
 
79
77
  return cls(
80
- in_order_to=in_order_to,
81
- as_a=as_a,
82
- i_want=i_want
78
+ in_order_to=found["in_order_to"],
79
+ as_a=found["as_a"],
80
+ i_want=found["i_want"]
83
81
  )
84
82
 
85
83
 
@@ -48,39 +48,38 @@ class EpicIntent(Intent):
48
48
 
49
49
  @classmethod
50
50
  def deserialize_from_lines(cls, lines: List[str], start_index: int = 0) -> 'EpicIntent':
51
- in_order_to = None
52
- as_a = None
53
- i_want = None
54
-
55
- in_order_to_prefix = "In order to "
56
- as_a_prefix = "As a "
57
- as_a_prefix_alt = "As an "
58
- i_want_prefix = "I want "
51
+ prefixes = [
52
+ ("In order to ", "in_order_to"),
53
+ ("As a ", "as_a"),
54
+ ("As an ", "as_a"),
55
+ ("I want ", "i_want"),
56
+ ]
57
+
58
+ found = {"in_order_to": None, "as_a": None, "i_want": None}
59
+
60
+ def match_and_store(line):
61
+ for prefix, field in prefixes:
62
+ if line.startswith(prefix) and found[field] is None:
63
+ found[field] = line[len(prefix):].strip()
64
+ return True
65
+ return False
59
66
 
60
67
  index = start_index
61
- while index < len(lines) and (not in_order_to or not as_a or not i_want):
62
- line = lines[index]
63
- if line.startswith(in_order_to_prefix) and not in_order_to:
64
- in_order_to = line[len(in_order_to_prefix):].strip()
65
- elif line.startswith(as_a_prefix) and not as_a:
66
- as_a = line[len(as_a_prefix):].strip()
67
- elif line.startswith(as_a_prefix_alt) and not as_a:
68
- as_a = line[len(as_a_prefix_alt):].strip()
69
- elif line.startswith(i_want_prefix) and not i_want:
70
- i_want = line[len(i_want_prefix):].strip()
68
+ while index < len(lines) and any(v is None for v in found.values()):
69
+ match_and_store(lines[index])
71
70
  index += 1
72
71
 
73
- if not in_order_to:
72
+ if not found["in_order_to"]:
74
73
  raise ValueError("Could not find 'In order to' line")
75
- if not as_a:
74
+ if not found["as_a"]:
76
75
  raise ValueError("Could not find 'As a' line")
77
- if not i_want:
76
+ if not found["i_want"]:
78
77
  raise ValueError("Could not find 'I want' line")
79
78
 
80
79
  return cls(
81
- in_order_to=in_order_to,
82
- as_a=as_a,
83
- i_want=i_want
80
+ in_order_to=found["in_order_to"],
81
+ as_a=found["as_a"],
82
+ i_want=found["i_want"]
84
83
  )
85
84
 
86
85