scopemate 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scopemate/__init__.py +16 -0
- scopemate/__main__.py +10 -0
- scopemate/breakdown.py +466 -0
- scopemate/cli.py +174 -0
- scopemate/core.py +23 -0
- scopemate/engine.py +226 -0
- scopemate/interaction.py +292 -0
- scopemate/llm.py +343 -0
- scopemate/models.py +157 -0
- scopemate/storage.py +106 -0
- scopemate/task_analysis.py +357 -0
- scopemate-0.1.0.dist-info/METADATA +410 -0
- scopemate-0.1.0.dist-info/RECORD +17 -0
- scopemate-0.1.0.dist-info/WHEEL +5 -0
- scopemate-0.1.0.dist-info/entry_points.txt +2 -0
- scopemate-0.1.0.dist-info/licenses/LICENSE +21 -0
- scopemate-0.1.0.dist-info/top_level.txt +1 -0
scopemate/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
"""🪜 scopemate - A CLI tool for Purpose/Scope/Outcome planning
|
2
|
+
|
3
|
+
This package provides tools for breaking down complex tasks using
|
4
|
+
the Purpose/Scope/Outcome planning approach.
|
5
|
+
"""
|
6
|
+
|
7
|
+
__version__ = "0.1.1"
|
8
|
+
|
9
|
+
# Public API
|
10
|
+
from .models import (
|
11
|
+
ScopeMateTask, Purpose, Scope, Outcome, Meta, get_utc_now
|
12
|
+
)
|
13
|
+
from .engine import TaskEngine, interactive_builder
|
14
|
+
from .storage import save_plan, load_plan
|
15
|
+
from .llm import estimate_scope
|
16
|
+
from .breakdown import suggest_breakdown
|
scopemate/__main__.py
ADDED
scopemate/breakdown.py
ADDED
@@ -0,0 +1,466 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
scopemate Breakdown - Functions for breaking down tasks into subtasks
|
4
|
+
|
5
|
+
This module provides task breakdown functionality, including subtask generation,
|
6
|
+
alternative approach suggestion, and interactive refinement.
|
7
|
+
"""
|
8
|
+
import uuid
|
9
|
+
from typing import List, Dict, Any, Optional
|
10
|
+
from pydantic import ValidationError
|
11
|
+
|
12
|
+
from .models import (
|
13
|
+
ScopeMateTask, Purpose, Scope, Outcome, Meta,
|
14
|
+
SIZE_COMPLEXITY, TIME_COMPLEXITY, get_utc_now
|
15
|
+
)
|
16
|
+
from .llm import call_llm, suggest_alternative_approaches, update_parent_with_child_context
|
17
|
+
from .interaction import prompt_user, build_custom_subtask, generate_concise_title
|
18
|
+
|
19
|
+
|
20
|
+
def suggest_breakdown(task: ScopeMateTask) -> List[ScopeMateTask]:
|
21
|
+
"""
|
22
|
+
Use LLM to suggest a breakdown of a task into smaller subtasks.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
task: The ScopeMateTask to break down
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
List of ScopeMateTask objects representing subtasks
|
29
|
+
"""
|
30
|
+
# Check if we're breaking down due to size complexity or time estimate
|
31
|
+
is_complex = task.scope.size in ["complex", "uncertain", "pioneering"]
|
32
|
+
is_long_duration = task.scope.time_estimate in ["sprint", "multi-sprint"]
|
33
|
+
is_time_based_breakdown = is_long_duration and not is_complex
|
34
|
+
|
35
|
+
# Add specialized instructions for time-based breakdown
|
36
|
+
time_breakdown_context = ""
|
37
|
+
if is_time_based_breakdown:
|
38
|
+
time_breakdown_context = (
|
39
|
+
f"This task has a time estimate of '{task.scope.time_estimate}' which is longer than ideal. "
|
40
|
+
f"Break this down into smaller time units (week or less) even though it's not technically complex. "
|
41
|
+
f"Focus on sequential stages or parallel workstreams that can be completed independently. "
|
42
|
+
f"Ensure the subtasks represent concrete deliverables that can be completed in a week or less."
|
43
|
+
)
|
44
|
+
|
45
|
+
# Build the prompt for LLM
|
46
|
+
prompt = (
|
47
|
+
f"You are a product manager breaking down a task into smaller, SIMPLER subtasks.\n\n"
|
48
|
+
f"Break the following task into 2-5 smaller subtasks. Each subtask MUST be simpler than the parent task.\n\n"
|
49
|
+
f"IMPORTANT CONSTRAINTS:\n"
|
50
|
+
f"1. Each subtask MUST be smaller in scope than the parent task\n"
|
51
|
+
f"2. Subtask titles should be CONCISE (max 60 chars) and should NOT repeat the entire parent title\n"
|
52
|
+
f"3. If parent task is 'complex' or larger, children should be at most 'straightforward'\n"
|
53
|
+
f"4. If parent task has time_estimate 'sprint' or larger, children should use smaller estimates (week or less)\n"
|
54
|
+
f"5. Each subtask should represent a clear, focused piece of work\n"
|
55
|
+
f"6. CRITICAL: The set of subtasks TOGETHER must cover ALL key aspects needed to accomplish the parent task\n"
|
56
|
+
f"7. It's acceptable if a subtask still needs further breakdown in a future iteration - focus on completeness now\n\n"
|
57
|
+
f"{time_breakdown_context}\n\n"
|
58
|
+
f"For each subtask in the array, follow this EXACT format for field names and values:\n\n"
|
59
|
+
f"```\n"
|
60
|
+
f"{{\n"
|
61
|
+
f" \"subtasks\": [\n"
|
62
|
+
f" {{\n"
|
63
|
+
f" \"id\": \"TASK-abc123\",\n"
|
64
|
+
f" \"title\": \"Short focused subtask title\",\n"
|
65
|
+
f" \"purpose\": {{\n"
|
66
|
+
f" \"detailed_description\": \"Detailed multi-paragraph description of the purpose of this subtask\",\n"
|
67
|
+
f" \"alignment\": [\"Strategic goal 1\", \"Strategic goal 2\"],\n"
|
68
|
+
f" \"urgency\": \"strategic\"\n"
|
69
|
+
f" }},\n"
|
70
|
+
f" \"scope\": {{\n"
|
71
|
+
f" \"size\": \"straightforward\",\n"
|
72
|
+
f" \"time_estimate\": \"week\",\n"
|
73
|
+
f" \"dependencies\": [\"Dependency 1\", \"Dependency 2\"],\n"
|
74
|
+
f" \"risks\": [\"Risk 1\", \"Risk 2\"]\n"
|
75
|
+
f" }},\n"
|
76
|
+
f" \"outcome\": {{\n"
|
77
|
+
f" \"type\": \"customer-facing\",\n"
|
78
|
+
f" \"detailed_outcome_definition\": \"Detailed multi-paragraph description of the outcome for this subtask\",\n"
|
79
|
+
f" \"acceptance_criteria\": [\"Criterion 1\", \"Criterion 2\"],\n"
|
80
|
+
f" \"metric\": \"Success measurement\",\n"
|
81
|
+
f" \"validation_method\": \"How to validate\"\n"
|
82
|
+
f" }},\n"
|
83
|
+
f" \"meta\": {{\n"
|
84
|
+
f" \"status\": \"backlog\",\n"
|
85
|
+
f" \"confidence\": \"medium\",\n"
|
86
|
+
f" \"team\": \"Frontend\"\n"
|
87
|
+
f" }}\n"
|
88
|
+
f" }},\n"
|
89
|
+
f" // Additional subtasks follow the same format\n"
|
90
|
+
f" ]\n"
|
91
|
+
f"}}\n"
|
92
|
+
f"```\n\n"
|
93
|
+
f"For the team field, use one of: Product, Design, Frontend, Backend, ML, Infra, Testing, Other. Choose the most appropriate team for each subtask.\n\n"
|
94
|
+
f"Return your response as a JSON object with a 'subtasks' array of subtask objects.\n\n"
|
95
|
+
f"Here is the task to break down:\n{task.model_dump_json(indent=2)}"
|
96
|
+
)
|
97
|
+
|
98
|
+
# Get LLM response
|
99
|
+
response = call_llm(prompt)
|
100
|
+
raw_list = _extract_subtasks_from_response(response)
|
101
|
+
|
102
|
+
# Process raw subtasks into ScopeMateTask objects
|
103
|
+
parent_size_complexity = SIZE_COMPLEXITY.get(task.scope.size, 3)
|
104
|
+
parent_time_complexity = TIME_COMPLEXITY.get(task.scope.time_estimate, 4)
|
105
|
+
|
106
|
+
subtasks = []
|
107
|
+
for raw in raw_list:
|
108
|
+
try:
|
109
|
+
# Process each subtask with constraints based on parent
|
110
|
+
subtask = _process_raw_subtask(raw, task, parent_size_complexity, parent_time_complexity)
|
111
|
+
subtasks.append(subtask)
|
112
|
+
except ValidationError as e:
|
113
|
+
print(f"[Warning] Skipping invalid subtask: {e}")
|
114
|
+
|
115
|
+
# Get interactive user-driven breakdown instead of just returning the processed tasks
|
116
|
+
return interactive_breakdown(task, subtasks)
|
117
|
+
|
118
|
+
|
119
|
+
def _extract_subtasks_from_response(response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
120
|
+
"""
|
121
|
+
Extract subtasks from LLM response.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
response: LLM response dictionary
|
125
|
+
|
126
|
+
Returns:
|
127
|
+
List of raw subtask dictionaries
|
128
|
+
"""
|
129
|
+
if not isinstance(response, dict):
|
130
|
+
print(f"[Warning] LLM response is not a dictionary: {type(response)}")
|
131
|
+
return []
|
132
|
+
|
133
|
+
# Try to extract subtasks array
|
134
|
+
subtasks = response.get("subtasks", [])
|
135
|
+
if not isinstance(subtasks, list):
|
136
|
+
print(f"[Warning] 'subtasks' field is not an array: {type(subtasks)}")
|
137
|
+
|
138
|
+
# Fallback: try to find any list in the response
|
139
|
+
for k, v in response.items():
|
140
|
+
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict):
|
141
|
+
print(f"[Warning] Using list found in field '{k}' instead of 'subtasks'")
|
142
|
+
return v
|
143
|
+
|
144
|
+
# Last resort: treat the entire response as a single subtask if it looks like one
|
145
|
+
if "title" in response or "id" in response:
|
146
|
+
print("[Warning] No subtasks array found. Creating a single subtask from the entire response.")
|
147
|
+
return [response]
|
148
|
+
|
149
|
+
print(f"[Error] Could not extract subtasks from response: {response}")
|
150
|
+
return []
|
151
|
+
|
152
|
+
# Filter out non-dict items
|
153
|
+
valid_subtasks = [item for item in subtasks if isinstance(item, dict)]
|
154
|
+
|
155
|
+
# Debug output
|
156
|
+
print(f"[Info] Extracted {len(valid_subtasks)} valid subtasks from response")
|
157
|
+
|
158
|
+
# Validate essential fields in each subtask
|
159
|
+
for i, subtask in enumerate(valid_subtasks):
|
160
|
+
# Check for required purpose and outcome structures
|
161
|
+
if "purpose" not in subtask:
|
162
|
+
print(f"[Warning] Subtask {i} missing 'purpose' field, adding empty structure")
|
163
|
+
subtask["purpose"] = {}
|
164
|
+
elif not isinstance(subtask["purpose"], dict):
|
165
|
+
print(f"[Warning] Subtask {i} has non-dict 'purpose' field, replacing with empty dict")
|
166
|
+
subtask["purpose"] = {}
|
167
|
+
|
168
|
+
if "outcome" not in subtask:
|
169
|
+
print(f"[Warning] Subtask {i} missing 'outcome' field, adding empty structure")
|
170
|
+
subtask["outcome"] = {}
|
171
|
+
elif not isinstance(subtask["outcome"], dict):
|
172
|
+
print(f"[Warning] Subtask {i} has non-dict 'outcome' field, replacing with empty dict")
|
173
|
+
subtask["outcome"] = {}
|
174
|
+
|
175
|
+
return valid_subtasks
|
176
|
+
|
177
|
+
|
178
|
+
def _process_raw_subtask(
|
179
|
+
raw: Dict[str, Any],
|
180
|
+
parent_task: ScopeMateTask,
|
181
|
+
parent_size_complexity: int,
|
182
|
+
parent_time_complexity: int
|
183
|
+
) -> ScopeMateTask:
|
184
|
+
"""
|
185
|
+
Process a raw subtask dictionary into a validated ScopeMateTask.
|
186
|
+
|
187
|
+
Args:
|
188
|
+
raw: Raw subtask dictionary
|
189
|
+
parent_task: Parent ScopeMateTask
|
190
|
+
parent_size_complexity: Complexity value for parent size
|
191
|
+
parent_time_complexity: Complexity value for parent time estimate
|
192
|
+
|
193
|
+
Returns:
|
194
|
+
A validated ScopeMateTask
|
195
|
+
"""
|
196
|
+
# Start with basic defaults
|
197
|
+
task_id = raw.get("id", f"TASK-{uuid.uuid4().hex[:8]}")
|
198
|
+
now = get_utc_now()
|
199
|
+
|
200
|
+
# Process the title, making it concise
|
201
|
+
raw_title = raw.get("title", "Untitled subtask")
|
202
|
+
title = generate_concise_title(parent_task.title, raw_title)[:60]
|
203
|
+
|
204
|
+
# Ensure the raw dictionaries exist to avoid attribute access errors
|
205
|
+
raw_purpose = raw.get("purpose", {})
|
206
|
+
if not isinstance(raw_purpose, dict):
|
207
|
+
raw_purpose = {}
|
208
|
+
|
209
|
+
raw_outcome = raw.get("outcome", {})
|
210
|
+
if not isinstance(raw_outcome, dict):
|
211
|
+
raw_outcome = {}
|
212
|
+
|
213
|
+
# Create default purpose, scope, outcome, and meta
|
214
|
+
# Inheriting from parent where appropriate
|
215
|
+
purpose_data = {
|
216
|
+
"detailed_description": raw_purpose.get("detailed_description", f"Subtask for: {parent_task.title}"),
|
217
|
+
"alignment": parent_task.purpose.alignment.copy(),
|
218
|
+
"urgency": parent_task.purpose.urgency
|
219
|
+
}
|
220
|
+
|
221
|
+
# Make sure scope is simpler than parent
|
222
|
+
scope_data = {
|
223
|
+
"size": "straightforward",
|
224
|
+
"time_estimate": "days",
|
225
|
+
"dependencies": [],
|
226
|
+
"risks": []
|
227
|
+
}
|
228
|
+
|
229
|
+
outcome_data = {
|
230
|
+
"type": parent_task.outcome.type,
|
231
|
+
"detailed_outcome_definition": raw_outcome.get("detailed_outcome_definition", f"Delivers part of: {parent_task.title}"),
|
232
|
+
"acceptance_criteria": raw_outcome.get("acceptance_criteria", []),
|
233
|
+
"metric": raw_outcome.get("metric"),
|
234
|
+
"validation_method": raw_outcome.get("validation_method")
|
235
|
+
}
|
236
|
+
|
237
|
+
meta_data = {
|
238
|
+
"status": "backlog",
|
239
|
+
"priority": None,
|
240
|
+
"created": now,
|
241
|
+
"updated": now,
|
242
|
+
"due_date": None,
|
243
|
+
"confidence": "medium",
|
244
|
+
"team": parent_task.meta.team
|
245
|
+
}
|
246
|
+
|
247
|
+
# Override with provided data if available and valid
|
248
|
+
# Scope data overrides
|
249
|
+
raw_scope = raw.get("scope", {})
|
250
|
+
if isinstance(raw_scope, dict):
|
251
|
+
if "size" in raw_scope and raw_scope["size"] in ["trivial", "straightforward", "complex", "uncertain", "pioneering"]:
|
252
|
+
scope_data["size"] = raw_scope["size"]
|
253
|
+
|
254
|
+
if "time_estimate" in raw_scope and raw_scope["time_estimate"] in ["hours", "days", "week", "sprint", "multi-sprint"]:
|
255
|
+
scope_data["time_estimate"] = raw_scope["time_estimate"]
|
256
|
+
|
257
|
+
if "dependencies" in raw_scope and isinstance(raw_scope["dependencies"], list):
|
258
|
+
scope_data["dependencies"] = raw_scope["dependencies"]
|
259
|
+
|
260
|
+
if "risks" in raw_scope and isinstance(raw_scope["risks"], list):
|
261
|
+
scope_data["risks"] = raw_scope["risks"]
|
262
|
+
|
263
|
+
# Create the subtask
|
264
|
+
subtask = ScopeMateTask(
|
265
|
+
id=task_id,
|
266
|
+
title=title,
|
267
|
+
purpose=Purpose(**purpose_data),
|
268
|
+
scope=Scope(**scope_data),
|
269
|
+
outcome=Outcome(**outcome_data),
|
270
|
+
meta=Meta(**meta_data),
|
271
|
+
parent_id=parent_task.id
|
272
|
+
)
|
273
|
+
|
274
|
+
return subtask
|
275
|
+
|
276
|
+
|
277
|
+
def interactive_breakdown(task: ScopeMateTask, suggested_subtasks: List[ScopeMateTask]) -> List[ScopeMateTask]:
|
278
|
+
"""
|
279
|
+
Handle interactive breakdown of a task with user input on alternatives.
|
280
|
+
|
281
|
+
Args:
|
282
|
+
task: The parent ScopeMateTask to break down
|
283
|
+
suggested_subtasks: List of LLM-suggested subtasks
|
284
|
+
|
285
|
+
Returns:
|
286
|
+
List of final ScopeMateTask objects to use as subtasks
|
287
|
+
"""
|
288
|
+
print(f"\n=== Interactive Breakdown for: {task.title} ===")
|
289
|
+
|
290
|
+
# First, check if there are alternative implementation approaches
|
291
|
+
alternatives = suggest_alternative_approaches(task)
|
292
|
+
alt_list = alternatives.get("alternatives", [])
|
293
|
+
|
294
|
+
# If we have meaningful alternatives, present them to the user
|
295
|
+
if alt_list:
|
296
|
+
print("\n=== Alternative Implementation Approaches ===")
|
297
|
+
print("The following alternative approaches have been identified for this task:")
|
298
|
+
|
299
|
+
for i, alt in enumerate(alt_list):
|
300
|
+
# Display the alternative with scope and time estimate
|
301
|
+
print(f"\n{i+1}. {alt['name']}")
|
302
|
+
|
303
|
+
# Add scope and time estimate info
|
304
|
+
scope = alt.get('scope', 'uncertain')
|
305
|
+
time_estimate = alt.get('time_estimate', 'sprint')
|
306
|
+
print(f" Scope: {scope} | Est: {time_estimate}")
|
307
|
+
|
308
|
+
print(f" {alt['description']}")
|
309
|
+
|
310
|
+
# Ask user which approach they want to use
|
311
|
+
choice = prompt_user(
|
312
|
+
"\nWhich approach would you like to use? Enter a number or 'n' for none",
|
313
|
+
default="n",
|
314
|
+
choices=[str(i+1) for i in range(len(alt_list))] + ["n"]
|
315
|
+
)
|
316
|
+
|
317
|
+
# If user selected an alternative, update the task description to reflect their choice
|
318
|
+
if choice.lower() != "n":
|
319
|
+
try:
|
320
|
+
selected_idx = int(choice) - 1
|
321
|
+
if 0 <= selected_idx < len(alt_list):
|
322
|
+
selected_alt = alt_list[selected_idx]
|
323
|
+
print(f"\n✅ Selected: {selected_alt['name']}")
|
324
|
+
|
325
|
+
# Ask if they want to update the parent task to reflect this choice
|
326
|
+
update_parent = prompt_user(
|
327
|
+
"Update parent task description to reflect this choice?",
|
328
|
+
default="y",
|
329
|
+
choices=["y","n"]
|
330
|
+
)
|
331
|
+
|
332
|
+
if update_parent.lower() == "y":
|
333
|
+
# Update the parent task
|
334
|
+
update_text = f"Using approach: {selected_alt['name']} - {selected_alt['description']}"
|
335
|
+
|
336
|
+
# Also update scope and time estimate if available
|
337
|
+
if 'scope' in selected_alt and 'time_estimate' in selected_alt:
|
338
|
+
# Consider updating parent task's scope and time estimate based on selection
|
339
|
+
update_time_scope = prompt_user(
|
340
|
+
"Also update task scope and time estimate to match selected approach?",
|
341
|
+
default="y",
|
342
|
+
choices=["y", "n"]
|
343
|
+
)
|
344
|
+
|
345
|
+
if update_time_scope.lower() == "y":
|
346
|
+
task.scope.size = selected_alt['scope']
|
347
|
+
task.scope.time_estimate = selected_alt['time_estimate']
|
348
|
+
print(f"✅ Updated scope to {selected_alt['scope']} and time estimate to {selected_alt['time_estimate']}")
|
349
|
+
|
350
|
+
task.purpose.detailed_description = f"{task.purpose.detailed_description}\n\n{update_text}"
|
351
|
+
task.meta.updated = get_utc_now()
|
352
|
+
print("✅ Updated parent task with chosen approach")
|
353
|
+
except ValueError:
|
354
|
+
pass
|
355
|
+
|
356
|
+
# Process each suggested subtask with user input
|
357
|
+
final_subtasks = []
|
358
|
+
parent_updated = False
|
359
|
+
|
360
|
+
print("\n=== Suggested Subtasks ===")
|
361
|
+
print("The following subtasks have been suggested:")
|
362
|
+
|
363
|
+
for i, subtask in enumerate(suggested_subtasks):
|
364
|
+
print(f"\n{i+1}. {subtask.title}")
|
365
|
+
print(f" Size: {subtask.scope.size} | Est: {subtask.scope.time_estimate}")
|
366
|
+
|
367
|
+
# Ask user what to do with this subtask
|
368
|
+
choice = prompt_user(
|
369
|
+
f"For subtask {i+1}, do you want to: (a)ccept, (m)odify, (c)ustom, or (s)kip?",
|
370
|
+
default="a",
|
371
|
+
choices=["a", "m", "c", "s"]
|
372
|
+
)
|
373
|
+
|
374
|
+
if choice.lower() == "a":
|
375
|
+
# Accept as-is
|
376
|
+
final_subtasks.append(subtask)
|
377
|
+
print(f"✅ Added: {subtask.title}")
|
378
|
+
|
379
|
+
elif choice.lower() == "m":
|
380
|
+
# Modify title and description (simplified)
|
381
|
+
new_title = prompt_user("New title", default=subtask.title)
|
382
|
+
|
383
|
+
# Update the subtask
|
384
|
+
subtask.title = new_title
|
385
|
+
subtask.meta.updated = get_utc_now()
|
386
|
+
|
387
|
+
final_subtasks.append(subtask)
|
388
|
+
print(f"✅ Added modified: {subtask.title}")
|
389
|
+
|
390
|
+
elif choice.lower() == "c":
|
391
|
+
# Create a totally custom subtask
|
392
|
+
custom_subtask = build_custom_subtask(task)
|
393
|
+
final_subtasks.append(custom_subtask)
|
394
|
+
print(f"✅ Added custom: {custom_subtask.title}")
|
395
|
+
|
396
|
+
# Ask if user wants to update parent task with this custom child task
|
397
|
+
update_choice = prompt_user(
|
398
|
+
"Update parent task details with this custom child context?",
|
399
|
+
default="y",
|
400
|
+
choices=["y", "n"]
|
401
|
+
)
|
402
|
+
|
403
|
+
if update_choice.lower() == "y" and not parent_updated:
|
404
|
+
task = update_parent_with_child_context(task, custom_subtask)
|
405
|
+
parent_updated = True
|
406
|
+
print("✅ Updated parent task with custom child context")
|
407
|
+
|
408
|
+
else: # Skip
|
409
|
+
print(f"⏭️ Skipped: {subtask.title}")
|
410
|
+
|
411
|
+
# Handle case where no subtasks were selected
|
412
|
+
if not final_subtasks:
|
413
|
+
# Create at least one default subtask
|
414
|
+
default_subtask = _create_default_subtask(task)
|
415
|
+
final_subtasks.append(default_subtask)
|
416
|
+
print(f"✅ Added default subtask: {default_subtask.title}")
|
417
|
+
|
418
|
+
return final_subtasks
|
419
|
+
|
420
|
+
|
421
|
+
def _create_default_subtask(parent_task: ScopeMateTask) -> ScopeMateTask:
|
422
|
+
"""
|
423
|
+
Create a default subtask for a parent task when automatic breakdown is required.
|
424
|
+
|
425
|
+
Args:
|
426
|
+
parent_task: The parent task that needs breakdown
|
427
|
+
|
428
|
+
Returns:
|
429
|
+
A new ScopeMateTask as a simpler, shorter subtask
|
430
|
+
"""
|
431
|
+
# Create a generic "first stage" subtask
|
432
|
+
default_subtask = ScopeMateTask(
|
433
|
+
id=f"TASK-{uuid.uuid4().hex[:6]}",
|
434
|
+
title=f"First stage of {parent_task.title}",
|
435
|
+
purpose=Purpose(
|
436
|
+
detailed_description=f"Initial phase of work for {parent_task.title}",
|
437
|
+
alignment=parent_task.purpose.alignment.copy(),
|
438
|
+
urgency=parent_task.purpose.urgency
|
439
|
+
),
|
440
|
+
scope=Scope(
|
441
|
+
# Ensure simpler and shorter than parent
|
442
|
+
size="straightforward" if parent_task.scope.size != "trivial" else "trivial",
|
443
|
+
time_estimate="days",
|
444
|
+
dependencies=[],
|
445
|
+
risks=[]
|
446
|
+
),
|
447
|
+
outcome=Outcome(
|
448
|
+
type=parent_task.outcome.type,
|
449
|
+
detailed_outcome_definition=f"First deliverable for {parent_task.title}",
|
450
|
+
acceptance_criteria=[],
|
451
|
+
metric=None,
|
452
|
+
validation_method=None
|
453
|
+
),
|
454
|
+
meta=Meta(
|
455
|
+
status="backlog",
|
456
|
+
priority=None,
|
457
|
+
created=get_utc_now(),
|
458
|
+
updated=get_utc_now(),
|
459
|
+
due_date=None,
|
460
|
+
confidence=parent_task.meta.confidence or "medium",
|
461
|
+
team=parent_task.meta.team
|
462
|
+
),
|
463
|
+
parent_id=parent_task.id
|
464
|
+
)
|
465
|
+
|
466
|
+
return default_subtask
|
scopemate/cli.py
ADDED
@@ -0,0 +1,174 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
🪜 scopemate CLI - Command-line interface for scopemate
|
4
|
+
|
5
|
+
Provides command-line interface for scopemate with options for setting purpose,
|
6
|
+
outcome, and output file.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import sys
|
10
|
+
import argparse
|
11
|
+
import uuid
|
12
|
+
from typing import List
|
13
|
+
|
14
|
+
from .models import (
|
15
|
+
ScopeMateTask, Purpose, Scope, Outcome, Meta, get_utc_now
|
16
|
+
)
|
17
|
+
from .storage import save_plan
|
18
|
+
from .llm import estimate_scope, generate_title_from_purpose_outcome
|
19
|
+
from .breakdown import suggest_breakdown
|
20
|
+
from .task_analysis import check_and_update_parent_estimates
|
21
|
+
from .engine import TaskEngine
|
22
|
+
|
23
|
+
|
24
|
+
def create_task_from_args(purpose: str, outcome: str) -> ScopeMateTask:
|
25
|
+
"""
|
26
|
+
Create a ScopeMateTask from command line arguments.
|
27
|
+
|
28
|
+
Args:
|
29
|
+
purpose: The purpose description from arguments
|
30
|
+
outcome: The outcome description from arguments
|
31
|
+
|
32
|
+
Returns:
|
33
|
+
A ScopeMateTask object with values from arguments
|
34
|
+
"""
|
35
|
+
# Create a basic task from the command line arguments
|
36
|
+
task_id = f"TASK-{uuid.uuid4().hex[:8]}"
|
37
|
+
now = get_utc_now()
|
38
|
+
|
39
|
+
# Generate a concise title from purpose and outcome
|
40
|
+
title = generate_title_from_purpose_outcome(purpose, outcome)
|
41
|
+
|
42
|
+
# Create task with basic details
|
43
|
+
task = ScopeMateTask(
|
44
|
+
id=task_id,
|
45
|
+
title=title,
|
46
|
+
purpose=Purpose(
|
47
|
+
detailed_description=purpose,
|
48
|
+
alignment=[],
|
49
|
+
urgency="strategic"
|
50
|
+
),
|
51
|
+
scope=Scope(
|
52
|
+
size="uncertain",
|
53
|
+
time_estimate="sprint",
|
54
|
+
dependencies=[],
|
55
|
+
risks=[]
|
56
|
+
),
|
57
|
+
outcome=Outcome(
|
58
|
+
type="customer-facing",
|
59
|
+
detailed_outcome_definition=outcome,
|
60
|
+
acceptance_criteria=[],
|
61
|
+
metric=None,
|
62
|
+
validation_method=None
|
63
|
+
),
|
64
|
+
meta=Meta(
|
65
|
+
status="backlog",
|
66
|
+
priority=None,
|
67
|
+
created=now,
|
68
|
+
updated=now,
|
69
|
+
due_date=None,
|
70
|
+
confidence="medium"
|
71
|
+
),
|
72
|
+
parent_id=None
|
73
|
+
)
|
74
|
+
|
75
|
+
# Use LLM to estimate scope
|
76
|
+
task.scope = estimate_scope(task)
|
77
|
+
|
78
|
+
return task
|
79
|
+
|
80
|
+
|
81
|
+
def process_task_with_breakdown(task: ScopeMateTask) -> List[ScopeMateTask]:
|
82
|
+
"""
|
83
|
+
Process a task by generating subtasks and checking estimates.
|
84
|
+
|
85
|
+
Args:
|
86
|
+
task: The ScopeMateTask to process
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
List of ScopeMateTask objects including the parent and any subtasks
|
90
|
+
"""
|
91
|
+
# Generate subtasks
|
92
|
+
subtasks = suggest_breakdown(task)
|
93
|
+
|
94
|
+
# Add all tasks to a list
|
95
|
+
all_tasks = [task] + subtasks
|
96
|
+
|
97
|
+
# Check and fix estimates
|
98
|
+
all_tasks = check_and_update_parent_estimates(all_tasks)
|
99
|
+
|
100
|
+
return all_tasks
|
101
|
+
|
102
|
+
|
103
|
+
def command_line() -> None:
|
104
|
+
"""Process command line arguments and execute appropriate actions."""
|
105
|
+
parser = argparse.ArgumentParser(
|
106
|
+
description="🪜 scopemate v.0.1.0 - Break down complex projects with LLMs",
|
107
|
+
epilog="Purpose: why it matters\n"
|
108
|
+
"Outcome: what will change once it's done\n"
|
109
|
+
"Scope: how will be delivered (this is where LLM can help)",
|
110
|
+
formatter_class=argparse.RawTextHelpFormatter
|
111
|
+
)
|
112
|
+
|
113
|
+
parser.add_argument(
|
114
|
+
"--interactive",
|
115
|
+
action="store_true",
|
116
|
+
help="Launch guided workflow to define task, generate LLM-powered breakdowns, and estimate scope"
|
117
|
+
)
|
118
|
+
|
119
|
+
parser.add_argument(
|
120
|
+
"--outcome",
|
121
|
+
help="🎯 Outcome: Clearly define what will be delivered and how success will be measured (asks: What will change once this is done?)"
|
122
|
+
)
|
123
|
+
|
124
|
+
parser.add_argument(
|
125
|
+
"--purpose",
|
126
|
+
help="🧭 Purpose: Clearly define why this project matters strategically (asks: Why does this matter now?)"
|
127
|
+
)
|
128
|
+
|
129
|
+
parser.add_argument(
|
130
|
+
"--output",
|
131
|
+
default="scopemate_plan.json",
|
132
|
+
help="JSON file to save the task breakdown and scope estimates (default: scopemate_plan.json)"
|
133
|
+
)
|
134
|
+
args = parser.parse_args()
|
135
|
+
|
136
|
+
# Check if running in interactive mode
|
137
|
+
if args.interactive:
|
138
|
+
# Run interactive builder
|
139
|
+
engine = TaskEngine()
|
140
|
+
engine.run()
|
141
|
+
return
|
142
|
+
|
143
|
+
# Process command-line arguments for non-interactive mode
|
144
|
+
if not args.purpose or not args.outcome:
|
145
|
+
parser.print_help()
|
146
|
+
print("\nError: Both --purpose and --outcome are required in non-interactive mode.")
|
147
|
+
sys.exit(1)
|
148
|
+
|
149
|
+
# Create task from arguments
|
150
|
+
print("Creating task from command line arguments...")
|
151
|
+
task = create_task_from_args(args.purpose, args.outcome)
|
152
|
+
|
153
|
+
# Process task with subtasks
|
154
|
+
print("Generating subtasks...")
|
155
|
+
all_tasks = process_task_with_breakdown(task)
|
156
|
+
|
157
|
+
# Save plan to output file
|
158
|
+
save_plan(all_tasks, args.output)
|
159
|
+
|
160
|
+
|
161
|
+
def main():
|
162
|
+
"""Main entry point for the scopemate command-line tool."""
|
163
|
+
try:
|
164
|
+
command_line()
|
165
|
+
except KeyboardInterrupt:
|
166
|
+
print("\nOperation cancelled.")
|
167
|
+
sys.exit(1)
|
168
|
+
except Exception as e:
|
169
|
+
print(f"Error: {str(e)}")
|
170
|
+
sys.exit(1)
|
171
|
+
|
172
|
+
|
173
|
+
if __name__ == "__main__":
|
174
|
+
main()
|
scopemate/core.py
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""scopemate - CLI Tool for Purpose/Scope/Outcome planning (Legacy module)
|
3
|
+
|
4
|
+
This is a legacy module maintained for backward compatibility.
|
5
|
+
All functionality has been moved to more specialized modules.
|
6
|
+
"""
|
7
|
+
import sys
|
8
|
+
|
9
|
+
def interactive_builder():
|
10
|
+
"""
|
11
|
+
Legacy function that redirects to the new implementation.
|
12
|
+
|
13
|
+
This is kept for backward compatibility. New code should use TaskEngine directly.
|
14
|
+
"""
|
15
|
+
from .engine import interactive_builder as new_builder
|
16
|
+
new_builder()
|
17
|
+
|
18
|
+
if __name__ == "__main__":
|
19
|
+
try:
|
20
|
+
interactive_builder()
|
21
|
+
except KeyboardInterrupt:
|
22
|
+
print("\nOperation cancelled. Progress saved in checkpoint.")
|
23
|
+
sys.exit(1)
|