scopemate 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scopemate/__init__.py +1 -1
- scopemate/breakdown.py +30 -2
- scopemate/cli.py +57 -11
- scopemate/engine.py +188 -10
- scopemate/interaction.py +34 -4
- scopemate/llm.py +257 -38
- scopemate/models.py +63 -1
- scopemate/storage.py +218 -4
- scopemate/task_analysis.py +47 -6
- {scopemate-0.1.0.dist-info → scopemate-0.2.0.dist-info}/METADATA +143 -12
- scopemate-0.2.0.dist-info/RECORD +17 -0
- {scopemate-0.1.0.dist-info → scopemate-0.2.0.dist-info}/WHEEL +1 -1
- scopemate-0.1.0.dist-info/RECORD +0 -17
- {scopemate-0.1.0.dist-info → scopemate-0.2.0.dist-info}/entry_points.txt +0 -0
- {scopemate-0.1.0.dist-info → scopemate-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {scopemate-0.1.0.dist-info → scopemate-0.2.0.dist-info}/top_level.txt +0 -0
scopemate/llm.py
CHANGED
@@ -6,8 +6,14 @@ This module provides functions for interacting with LLMs for task estimation,
|
|
6
6
|
breakdown, and optimization.
|
7
7
|
"""
|
8
8
|
import json
|
9
|
+
import os
|
9
10
|
from typing import Dict, Any, List, Optional
|
11
|
+
from enum import Enum, auto
|
12
|
+
|
13
|
+
# Import LLM providers
|
10
14
|
from openai import OpenAI
|
15
|
+
import google.generativeai as genai
|
16
|
+
from anthropic import Anthropic
|
11
17
|
|
12
18
|
from .models import (
|
13
19
|
ScopeMateTask, Scope, TIME_COMPLEXITY, SIZE_COMPLEXITY,
|
@@ -17,44 +23,258 @@ from .models import (
|
|
17
23
|
# -------------------------------
|
18
24
|
# Configuration
|
19
25
|
# -------------------------------
|
20
|
-
|
26
|
+
class LLMProvider(Enum):
|
27
|
+
"""Supported LLM providers"""
|
28
|
+
OPENAI = auto()
|
29
|
+
GEMINI = auto()
|
30
|
+
CLAUDE = auto()
|
31
|
+
|
32
|
+
# Default configuration
|
33
|
+
DEFAULT_PROVIDER = LLMProvider.OPENAI
|
34
|
+
DEFAULT_OPENAI_MODEL = "o4-mini"
|
35
|
+
DEFAULT_GEMINI_MODEL = "gemini-2.0-flash"
|
36
|
+
DEFAULT_CLAUDE_MODEL = "claude-3-7-sonnet-20250219"
|
37
|
+
|
38
|
+
# Provider-specific model mapping
|
39
|
+
DEFAULT_MODELS = {
|
40
|
+
LLMProvider.OPENAI: DEFAULT_OPENAI_MODEL,
|
41
|
+
LLMProvider.GEMINI: DEFAULT_GEMINI_MODEL,
|
42
|
+
LLMProvider.CLAUDE: DEFAULT_CLAUDE_MODEL
|
43
|
+
}
|
44
|
+
|
45
|
+
# Get provider from environment variable or use default
|
46
|
+
def get_llm_provider() -> LLMProvider:
|
47
|
+
"""Get the LLM provider from environment variable or use default"""
|
48
|
+
provider_str = os.environ.get("SCOPEMATE_LLM_PROVIDER", "").upper()
|
49
|
+
if provider_str == "OPENAI":
|
50
|
+
return LLMProvider.OPENAI
|
51
|
+
elif provider_str == "GEMINI":
|
52
|
+
return LLMProvider.GEMINI
|
53
|
+
elif provider_str == "CLAUDE":
|
54
|
+
return LLMProvider.CLAUDE
|
55
|
+
return DEFAULT_PROVIDER
|
56
|
+
|
57
|
+
# Get model for the provider from environment variable or use default
|
58
|
+
def get_llm_model(provider: LLMProvider = None) -> str:
|
59
|
+
"""Get the LLM model for the provider from environment variable or use default"""
|
60
|
+
if provider is None:
|
61
|
+
provider = get_llm_provider()
|
62
|
+
|
63
|
+
if provider == LLMProvider.OPENAI:
|
64
|
+
return os.environ.get("SCOPEMATE_OPENAI_MODEL", DEFAULT_OPENAI_MODEL)
|
65
|
+
elif provider == LLMProvider.GEMINI:
|
66
|
+
return os.environ.get("SCOPEMATE_GEMINI_MODEL", DEFAULT_GEMINI_MODEL)
|
67
|
+
elif provider == LLMProvider.CLAUDE:
|
68
|
+
return os.environ.get("SCOPEMATE_CLAUDE_MODEL", DEFAULT_CLAUDE_MODEL)
|
69
|
+
|
70
|
+
return DEFAULT_MODELS[DEFAULT_PROVIDER]
|
21
71
|
|
22
72
|
# -------------------------------
|
23
73
|
# LLM Interaction
|
24
74
|
# -------------------------------
|
25
|
-
def call_llm(prompt: str, model: str =
|
75
|
+
def call_llm(prompt: str, system_prompt: str = None, model: str = None, provider: LLMProvider = None) -> dict:
|
26
76
|
"""
|
27
77
|
Invoke LLM to get a structured JSON response.
|
28
78
|
|
79
|
+
This function is the core LLM integration point for scopemate, handling all
|
80
|
+
communication with the supported LLM APIs. It's designed to always return structured
|
81
|
+
JSON data that can be easily processed by the application.
|
82
|
+
|
83
|
+
The function:
|
84
|
+
1. Creates a client for the selected provider (OpenAI, Gemini, or Claude)
|
85
|
+
2. Configures a system prompt that instructs the model to return valid JSON
|
86
|
+
3. Sends the user's prompt with the task-specific instructions
|
87
|
+
4. Parses and returns the JSON response
|
88
|
+
|
89
|
+
Error handling is built in to gracefully handle JSON parsing failures by
|
90
|
+
printing diagnostic information and returning an empty dictionary rather
|
91
|
+
than crashing.
|
92
|
+
|
29
93
|
Args:
|
30
|
-
prompt: The prompt to send to the LLM
|
31
|
-
|
94
|
+
prompt (str): The prompt to send to the LLM, containing full instructions
|
95
|
+
and any task data needed for context
|
96
|
+
model (str, optional): The model identifier to use (defaults to provider's default model)
|
97
|
+
provider (LLMProvider, optional): The LLM provider to use (defaults to configured provider)
|
32
98
|
|
33
99
|
Returns:
|
34
|
-
A dictionary containing the parsed JSON response
|
100
|
+
dict: A dictionary containing the parsed JSON response from the LLM.
|
101
|
+
Returns an empty dict {} if parsing fails.
|
35
102
|
"""
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
"Respond only with valid JSON. Follow the exact requested format in the user's prompt, "
|
44
|
-
"using the exact field names and adhering to all constraints on field values."
|
45
|
-
},
|
46
|
-
{"role": "user", "content": prompt}
|
47
|
-
],
|
48
|
-
response_format={"type": "json_object"}
|
49
|
-
)
|
103
|
+
# Determine which provider to use
|
104
|
+
if provider is None:
|
105
|
+
provider = get_llm_provider()
|
106
|
+
|
107
|
+
# Determine which model to use
|
108
|
+
if model is None:
|
109
|
+
model = get_llm_model(provider)
|
50
110
|
|
111
|
+
# System prompt is common across providers
|
112
|
+
if system_prompt is None:
|
113
|
+
system_prompt = (
|
114
|
+
"You are a JSON assistant specialized in structured data for product management tasks. "
|
115
|
+
"Respond only with valid JSON. Follow the exact requested format in the user's prompt, "
|
116
|
+
"using the exact field names and adhering to all constraints on field values."
|
117
|
+
)
|
118
|
+
|
119
|
+
# Call the appropriate provider with JSON response format
|
120
|
+
response_text = _call_provider(prompt, system_prompt, model, provider, response_format="json")
|
121
|
+
|
122
|
+
# Parse JSON response
|
51
123
|
try:
|
52
|
-
|
124
|
+
if response_text:
|
125
|
+
return json.loads(response_text)
|
126
|
+
return {}
|
53
127
|
except json.JSONDecodeError as e:
|
54
128
|
print(f"[Error] Failed to parse LLM response as JSON: {e}")
|
55
|
-
print(f"Raw response: {
|
129
|
+
print(f"Raw response: {response_text}")
|
56
130
|
return {}
|
57
131
|
|
132
|
+
def call_llm_text(prompt: str, system_prompt: str = None, model: str = None, provider: LLMProvider = None) -> str:
|
133
|
+
"""
|
134
|
+
Invoke LLM to get a plain text response (not JSON).
|
135
|
+
|
136
|
+
This is similar to call_llm but returns plain text instead of JSON.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
prompt (str): The prompt to send to the LLM
|
140
|
+
system_prompt (str, optional): The system prompt to use
|
141
|
+
model (str, optional): The model identifier to use (defaults to provider's default model)
|
142
|
+
provider (LLMProvider, optional): The LLM provider to use (defaults to configured provider)
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
str: The text response from the LLM, or empty string on error
|
146
|
+
"""
|
147
|
+
# Determine which provider to use
|
148
|
+
if provider is None:
|
149
|
+
provider = get_llm_provider()
|
150
|
+
|
151
|
+
# Determine which model to use
|
152
|
+
if model is None:
|
153
|
+
model = get_llm_model(provider)
|
154
|
+
|
155
|
+
# System prompt is common across providers
|
156
|
+
if system_prompt is None:
|
157
|
+
system_prompt = (
|
158
|
+
"You are a helpful assistant that provides clear and concise answers. "
|
159
|
+
"Respond directly to the question without adding additional explanation or context."
|
160
|
+
)
|
161
|
+
|
162
|
+
print(f"Calling LLM (text mode) with provider: {provider}, model: {model}")
|
163
|
+
|
164
|
+
# Call the appropriate provider with text response format
|
165
|
+
return _call_provider(prompt, system_prompt, model, provider, response_format="text")
|
166
|
+
|
167
|
+
def _call_provider(prompt: str, system_prompt: str, model: str, provider: LLMProvider, response_format: str = "json") -> str:
|
168
|
+
"""
|
169
|
+
Internal helper function to call the appropriate LLM provider.
|
170
|
+
|
171
|
+
Args:
|
172
|
+
prompt (str): The prompt to send to the LLM
|
173
|
+
system_prompt (str): The system prompt to use
|
174
|
+
model (str): The model to use
|
175
|
+
provider (LLMProvider): The provider to use
|
176
|
+
response_format (str): Either "json" or "text"
|
177
|
+
|
178
|
+
Returns:
|
179
|
+
str: The raw text response from the LLM
|
180
|
+
"""
|
181
|
+
try:
|
182
|
+
if provider == LLMProvider.OPENAI:
|
183
|
+
return _call_openai_provider(prompt, system_prompt, model, response_format)
|
184
|
+
elif provider == LLMProvider.GEMINI:
|
185
|
+
return _call_gemini_provider(prompt, system_prompt, model, response_format)
|
186
|
+
elif provider == LLMProvider.CLAUDE:
|
187
|
+
return _call_claude_provider(prompt, system_prompt, model, response_format)
|
188
|
+
|
189
|
+
# Fallback to OpenAI if unknown provider
|
190
|
+
print(f"[Warning] Unknown provider {provider}, falling back to OpenAI")
|
191
|
+
return _call_openai_provider(prompt, system_prompt, DEFAULT_OPENAI_MODEL, response_format)
|
192
|
+
except Exception as e:
|
193
|
+
print(f"[Error] LLM API call failed: {e}")
|
194
|
+
return ""
|
195
|
+
|
196
|
+
def _call_openai_provider(prompt: str, system_prompt: str, model: str, response_format: str) -> str:
|
197
|
+
"""Internal helper function to call OpenAI API"""
|
198
|
+
try:
|
199
|
+
client = OpenAI()
|
200
|
+
|
201
|
+
# Configure response format for JSON if requested
|
202
|
+
kwargs = {}
|
203
|
+
if response_format == "json":
|
204
|
+
kwargs["response_format"] = {"type": "json_object"}
|
205
|
+
|
206
|
+
response = client.chat.completions.create(
|
207
|
+
model=model,
|
208
|
+
messages=[
|
209
|
+
{"role": "system", "content": system_prompt},
|
210
|
+
{"role": "user", "content": prompt}
|
211
|
+
],
|
212
|
+
**kwargs
|
213
|
+
)
|
214
|
+
|
215
|
+
# Return raw content text
|
216
|
+
return response.choices[0].message.content.strip()
|
217
|
+
except Exception as e:
|
218
|
+
print(f"[Error] OpenAI API call failed: {e}")
|
219
|
+
return ""
|
220
|
+
|
221
|
+
def _call_gemini_provider(prompt: str, system_prompt: str, model: str, response_format: str) -> str:
|
222
|
+
"""Internal helper function to call Gemini API"""
|
223
|
+
try:
|
224
|
+
# Check for API key in environment
|
225
|
+
api_key = os.environ.get("GEMINI_API_KEY", None)
|
226
|
+
if not api_key:
|
227
|
+
print("[Error] No API key found for Gemini. Set GEMINI_API_KEY environment variable.")
|
228
|
+
return ""
|
229
|
+
# Initialize the Gemini client
|
230
|
+
genai.configure(api_key=api_key)
|
231
|
+
|
232
|
+
# Since system role is not supported, combine system prompt and user prompt
|
233
|
+
combined_prompt = f"{system_prompt}\n\n{prompt}"
|
234
|
+
|
235
|
+
# Configure response format for JSON if requested
|
236
|
+
generation_config = {}
|
237
|
+
if response_format == "json":
|
238
|
+
generation_config["response_mime_type"] = "application/json"
|
239
|
+
|
240
|
+
# Generate response using Gemini
|
241
|
+
model_name = model if model != system_prompt else DEFAULT_GEMINI_MODEL
|
242
|
+
model_obj = genai.GenerativeModel(model_name=model_name, generation_config=generation_config)
|
243
|
+
response = model_obj.generate_content(combined_prompt)
|
244
|
+
|
245
|
+
text = response.text.strip()
|
246
|
+
|
247
|
+
# Remove quotes if present for text responses
|
248
|
+
if response_format == "text" and text.startswith('"') and text.endswith('"'):
|
249
|
+
text = text[1:-1]
|
250
|
+
|
251
|
+
return text
|
252
|
+
except Exception as e:
|
253
|
+
print(f"[Error] Gemini API call failed: {e}")
|
254
|
+
return ""
|
255
|
+
|
256
|
+
def _call_claude_provider(prompt: str, system_prompt: str, model: str, response_format: str) -> str:
|
257
|
+
"""Internal helper function to call Claude API"""
|
258
|
+
try:
|
259
|
+
client = Anthropic()
|
260
|
+
|
261
|
+
# Configure temperature - lower for JSON for more deterministic output
|
262
|
+
temperature = 0.1 if response_format == "json" else 0.2
|
263
|
+
|
264
|
+
response = client.messages.create(
|
265
|
+
model=model,
|
266
|
+
system=system_prompt,
|
267
|
+
max_tokens=4096,
|
268
|
+
messages=[
|
269
|
+
{"role": "user", "content": prompt}
|
270
|
+
],
|
271
|
+
temperature=temperature
|
272
|
+
)
|
273
|
+
|
274
|
+
return response.content[0].text.strip()
|
275
|
+
except Exception as e:
|
276
|
+
print(f"[Error] Claude API call failed: {e}")
|
277
|
+
return ""
|
58
278
|
|
59
279
|
def estimate_scope(task: ScopeMateTask) -> Scope:
|
60
280
|
"""
|
@@ -307,35 +527,34 @@ def update_parent_with_child_context(parent_task: ScopeMateTask, child_task: Sco
|
|
307
527
|
return updated_parent
|
308
528
|
|
309
529
|
|
310
|
-
def generate_title_from_purpose_outcome(purpose: str, outcome: str) -> str:
|
530
|
+
def generate_title_from_purpose_outcome(purpose: str, outcome: str, model: str = None, provider: LLMProvider = None) -> str:
|
311
531
|
"""
|
312
532
|
Use LLM to generate a concise title from purpose and outcome descriptions.
|
313
533
|
|
314
534
|
Args:
|
315
535
|
purpose: The purpose description
|
316
536
|
outcome: The outcome description
|
537
|
+
model (str, optional): The model identifier to use (defaults to provider's default model)
|
538
|
+
provider (LLMProvider, optional): The LLM provider to use (defaults to configured provider)
|
317
539
|
|
318
540
|
Returns:
|
319
541
|
A concise title string
|
320
542
|
"""
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
{
|
326
|
-
"role": "system",
|
327
|
-
"content": "You are a concise title generator. Generate a brief, clear title (maximum 60 characters) "
|
328
|
-
"that captures the essence of a task based on its purpose and outcome description."
|
329
|
-
},
|
330
|
-
{
|
331
|
-
"role": "user",
|
332
|
-
"content": f"Purpose: {purpose}\n\nOutcome: {outcome}\n\nGenerate a concise title (max 60 chars):"
|
333
|
-
}
|
334
|
-
]
|
543
|
+
system_prompt = (
|
544
|
+
"You are a concise title generator. Generate a brief, clear title (maximum 60 characters) "
|
545
|
+
"that captures the essence of a task based on its purpose and outcome description. "
|
546
|
+
"Return ONLY the title with no additional text or quotes."
|
335
547
|
)
|
336
548
|
|
337
|
-
|
338
|
-
|
549
|
+
user_prompt = f"Purpose: {purpose}\n\nOutcome: {outcome}\n\nGenerate a concise title (max 60 chars):"
|
550
|
+
|
551
|
+
# Use the common text-based LLM function
|
552
|
+
title = call_llm_text(user_prompt, system_prompt, model, provider)
|
553
|
+
|
554
|
+
# Handle empty response
|
555
|
+
if not title:
|
556
|
+
return "Task Title"
|
557
|
+
|
339
558
|
# Limit title length if needed
|
340
559
|
if len(title) > 60:
|
341
560
|
title = title[:57] + "..."
|
scopemate/models.py
CHANGED
@@ -144,7 +144,69 @@ class Meta(BaseModel):
|
|
144
144
|
|
145
145
|
|
146
146
|
class ScopeMateTask(BaseModel):
|
147
|
-
"""
|
147
|
+
"""
|
148
|
+
A Purpose/Context/Outcome task representing a unit of work.
|
149
|
+
|
150
|
+
ScopeMateTask is the core data model in scopemate, representing a single unit of work
|
151
|
+
with well-defined purpose, scope, and outcome. The model follows a comprehensive and
|
152
|
+
structured approach to task definition that ensures clarity in task planning and execution.
|
153
|
+
|
154
|
+
Each task has:
|
155
|
+
1. Purpose - the "why" behind the task (detailed_description, alignment, urgency)
|
156
|
+
2. Scope - the "how big" and "what's involved" (size, time_estimate, dependencies, risks)
|
157
|
+
3. Outcome - the "what will be delivered" (type, definition, acceptance criteria, metrics)
|
158
|
+
4. Meta - tracking information (status, priority, dates, confidence, team)
|
159
|
+
|
160
|
+
Tasks can form a hierarchical structure through the parent_id field, allowing complex
|
161
|
+
work to be broken down into manageable subtasks. The hierarchy supports:
|
162
|
+
- Parent tasks: higher-level tasks that can be decomposed
|
163
|
+
- Child tasks: more specific tasks that contribute to a parent
|
164
|
+
- Root tasks: top-level tasks with no parent
|
165
|
+
- Leaf tasks: tasks with no children
|
166
|
+
|
167
|
+
The model enforces validation rules through Pydantic, ensuring data integrity
|
168
|
+
across all fields (e.g., valid size values, time estimates, status, etc.).
|
169
|
+
|
170
|
+
Attributes:
|
171
|
+
id (str): Unique identifier for the task
|
172
|
+
title (str): Short descriptive title
|
173
|
+
purpose (Purpose): Why the task matters
|
174
|
+
scope (Scope): Size, time, dependencies and risks
|
175
|
+
outcome (Outcome): Delivered value and validation methods
|
176
|
+
meta (Meta): Status, timing, and tracking information
|
177
|
+
parent_id (Optional[str]): ID of parent task if this is a subtask
|
178
|
+
|
179
|
+
Example:
|
180
|
+
```python
|
181
|
+
task = ScopeMateTask(
|
182
|
+
id="TASK-abc123",
|
183
|
+
title="Implement user authentication",
|
184
|
+
purpose=Purpose(
|
185
|
+
detailed_description="We need secure authentication for users",
|
186
|
+
alignment=["Security", "User experience"],
|
187
|
+
urgency="strategic"
|
188
|
+
),
|
189
|
+
scope=Scope(
|
190
|
+
size="complex",
|
191
|
+
time_estimate="sprint",
|
192
|
+
dependencies=["API design", "Database setup"],
|
193
|
+
risks=["Security vulnerabilities", "Performance issues"]
|
194
|
+
),
|
195
|
+
outcome=Outcome(
|
196
|
+
type="customer-facing",
|
197
|
+
detailed_outcome_definition="Complete authentication system with login/logout",
|
198
|
+
acceptance_criteria=["User can log in", "User can log out", "Password reset works"]
|
199
|
+
),
|
200
|
+
meta=Meta(
|
201
|
+
status="backlog",
|
202
|
+
priority=1,
|
203
|
+
created=get_utc_now(),
|
204
|
+
updated=get_utc_now(),
|
205
|
+
team="Backend"
|
206
|
+
)
|
207
|
+
)
|
208
|
+
```
|
209
|
+
"""
|
148
210
|
id: str
|
149
211
|
title: str = Field(..., description="Short descriptive title")
|
150
212
|
purpose: Purpose
|
scopemate/storage.py
CHANGED
@@ -6,6 +6,7 @@ This module manages persistence of task data to disk and loading from files.
|
|
6
6
|
"""
|
7
7
|
import os
|
8
8
|
import json
|
9
|
+
from datetime import datetime
|
9
10
|
from typing import List, Dict, Any
|
10
11
|
|
11
12
|
from pydantic import ValidationError
|
@@ -37,28 +38,241 @@ def save_plan(tasks: List[ScopeMateTask], filename: str) -> None:
|
|
37
38
|
"""
|
38
39
|
Save tasks to a plan file.
|
39
40
|
|
41
|
+
This function serializes a list of ScopeMateTask objects to JSON and writes them
|
42
|
+
to a file. The file format uses a consistent structure with a top-level "tasks"
|
43
|
+
array containing serialized task objects. This ensures compatibility with other
|
44
|
+
tooling and future versions of scopemate.
|
45
|
+
|
46
|
+
The function handles all serialization details including proper encoding and
|
47
|
+
indentation for readability. Each task is completely serialized with all its
|
48
|
+
nested structures (purpose, scope, outcome, meta) for complete persistence.
|
49
|
+
|
40
50
|
Args:
|
41
|
-
tasks: List of ScopeMateTask objects to save
|
51
|
+
tasks: List of ScopeMateTask objects to save to disk
|
42
52
|
filename: Path to save the plan file
|
53
|
+
|
54
|
+
Side Effects:
|
55
|
+
- Writes to file system at the specified path
|
56
|
+
- Prints confirmation message upon successful save
|
57
|
+
|
58
|
+
Example:
|
59
|
+
```python
|
60
|
+
tasks = [task1, task2, task3] # List of ScopeMateTask objects
|
61
|
+
save_plan(tasks, "project_alpha_plan.json")
|
62
|
+
# Saves all tasks to project_alpha_plan.json with proper formatting
|
63
|
+
```
|
43
64
|
"""
|
44
65
|
payload = {"tasks": [t.model_dump() for t in tasks]}
|
45
66
|
with open(filename, "w", encoding="utf-8") as f:
|
46
67
|
json.dump(payload, f, indent=2)
|
47
68
|
print(f"✅ Plan saved to {filename}.")
|
69
|
+
|
70
|
+
# Automatically generate markdown version with the same basename
|
71
|
+
md_filename = os.path.splitext(filename)[0] + ".md"
|
72
|
+
save_markdown_plan(payload, md_filename)
|
73
|
+
|
74
|
+
|
75
|
+
def save_markdown_plan(data: Dict[str, Any], filename: str) -> None:
|
76
|
+
"""
|
77
|
+
Save tasks to a Markdown file for human readability.
|
78
|
+
|
79
|
+
This function converts the JSON task data into a well-structured Markdown format
|
80
|
+
for easier reading and sharing with team members who may not use scopemate directly.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
data: Dictionary containing the tasks data (with "tasks" key)
|
84
|
+
filename: Path to save the Markdown file
|
85
|
+
"""
|
86
|
+
markdown = generate_markdown_from_json(data)
|
87
|
+
with open(filename, "w", encoding="utf-8") as f:
|
88
|
+
f.write(markdown)
|
89
|
+
print(f"✅ Markdown version saved to {filename}.")
|
90
|
+
|
91
|
+
|
92
|
+
def generate_markdown_from_json(data: Dict[str, Any]) -> str:
|
93
|
+
"""
|
94
|
+
Convert scopemate JSON data to a well-structured Markdown format.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
data: The scopemate JSON data as a dictionary
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
A string containing the Markdown representation
|
101
|
+
"""
|
102
|
+
# Start building markdown content
|
103
|
+
md = ["# Project Scope Plan\n"]
|
104
|
+
md.append(f"*Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n\n")
|
105
|
+
|
106
|
+
# Add summary section
|
107
|
+
tasks = data.get("tasks", [])
|
108
|
+
md.append(f"## Summary\n\n")
|
109
|
+
md.append(f"This document contains **{len(tasks)}** tasks.\n\n")
|
110
|
+
|
111
|
+
# Get counts by size complexity
|
112
|
+
size_counts = {}
|
113
|
+
for task in tasks:
|
114
|
+
if "scope" in task and "size" in task["scope"]:
|
115
|
+
size = task["scope"]["size"]
|
116
|
+
size_counts[size] = size_counts.get(size, 0) + 1
|
117
|
+
|
118
|
+
if size_counts:
|
119
|
+
md.append("**Complexity Breakdown:**\n\n")
|
120
|
+
for size, count in size_counts.items():
|
121
|
+
md.append(f"- {size.capitalize()}: {count} task(s)\n")
|
122
|
+
md.append("\n")
|
123
|
+
|
124
|
+
# Create hierarchical task structure
|
125
|
+
main_tasks = [t for t in tasks if not t.get("parent_id")]
|
126
|
+
child_tasks = {}
|
127
|
+
for task in tasks:
|
128
|
+
if task.get("parent_id"):
|
129
|
+
if task["parent_id"] not in child_tasks:
|
130
|
+
child_tasks[task["parent_id"]] = []
|
131
|
+
child_tasks[task["parent_id"]].append(task)
|
132
|
+
|
133
|
+
# Add detailed task section
|
134
|
+
md.append("## Task Details\n\n")
|
135
|
+
|
136
|
+
# Process main tasks with their children
|
137
|
+
for task in main_tasks:
|
138
|
+
md.extend(format_task_as_markdown(task, child_tasks, 0))
|
139
|
+
|
140
|
+
return "\n".join(md)
|
141
|
+
|
142
|
+
|
143
|
+
def format_task_as_markdown(task: Dict[str, Any], child_tasks: Dict[str, List[Dict[str, Any]]], level: int) -> List[str]:
|
144
|
+
"""
|
145
|
+
Format a single task and its children as Markdown.
|
146
|
+
|
147
|
+
Args:
|
148
|
+
task: The task data
|
149
|
+
child_tasks: Dictionary mapping parent_id to list of child tasks
|
150
|
+
level: Current indentation level
|
151
|
+
|
152
|
+
Returns:
|
153
|
+
List of markdown formatted lines
|
154
|
+
"""
|
155
|
+
md_lines = []
|
156
|
+
|
157
|
+
# Add task title with appropriate heading level
|
158
|
+
heading_level = "###" + "#" * level
|
159
|
+
task_id = task.get("id", "NO-ID")
|
160
|
+
title = task.get("title", "Untitled Task")
|
161
|
+
md_lines.append(f"{heading_level} {task_id}: {title}\n")
|
162
|
+
|
163
|
+
# Add purpose section
|
164
|
+
if "purpose" in task:
|
165
|
+
purpose = task["purpose"]
|
166
|
+
md_lines.append("**Purpose:**\n\n")
|
167
|
+
if "detailed_description" in purpose:
|
168
|
+
md_lines.append(f"{purpose['detailed_description']}\n\n")
|
169
|
+
if "alignment" in purpose and purpose["alignment"]:
|
170
|
+
md_lines.append("*Strategic Alignment:* ")
|
171
|
+
md_lines.append(", ".join(purpose["alignment"]))
|
172
|
+
md_lines.append("\n\n")
|
173
|
+
if "urgency" in purpose:
|
174
|
+
md_lines.append(f"*Urgency:* {purpose['urgency'].capitalize()}\n\n")
|
175
|
+
|
176
|
+
# Add scope section
|
177
|
+
if "scope" in task:
|
178
|
+
scope = task["scope"]
|
179
|
+
md_lines.append("**Scope:**\n\n")
|
180
|
+
if "size" in scope:
|
181
|
+
md_lines.append(f"*Size:* {scope['size'].capitalize()}\n\n")
|
182
|
+
if "time_estimate" in scope:
|
183
|
+
md_lines.append(f"*Time Estimate:* {scope['time_estimate'].capitalize()}\n\n")
|
184
|
+
if "dependencies" in scope and scope["dependencies"]:
|
185
|
+
md_lines.append("*Dependencies:*\n\n")
|
186
|
+
for dep in scope["dependencies"]:
|
187
|
+
md_lines.append(f"- {dep}\n")
|
188
|
+
md_lines.append("\n")
|
189
|
+
if "risks" in scope and scope["risks"]:
|
190
|
+
md_lines.append("*Risks:*\n\n")
|
191
|
+
for risk in scope["risks"]:
|
192
|
+
md_lines.append(f"- {risk}\n")
|
193
|
+
md_lines.append("\n")
|
194
|
+
|
195
|
+
# Add outcome section
|
196
|
+
if "outcome" in task:
|
197
|
+
outcome = task["outcome"]
|
198
|
+
md_lines.append("**Outcome:**\n\n")
|
199
|
+
if "type" in outcome:
|
200
|
+
md_lines.append(f"*Type:* {outcome['type'].capitalize().replace('-', ' ')}\n\n")
|
201
|
+
if "detailed_outcome_definition" in outcome:
|
202
|
+
md_lines.append(f"{outcome['detailed_outcome_definition']}\n\n")
|
203
|
+
if "acceptance_criteria" in outcome and outcome["acceptance_criteria"]:
|
204
|
+
md_lines.append("*Acceptance Criteria:*\n\n")
|
205
|
+
for ac in outcome["acceptance_criteria"]:
|
206
|
+
md_lines.append(f"- {ac}\n")
|
207
|
+
md_lines.append("\n")
|
208
|
+
if "metric" in outcome and outcome["metric"]:
|
209
|
+
md_lines.append(f"*Success Metric:* {outcome['metric']}\n\n")
|
210
|
+
if "validation_method" in outcome and outcome["validation_method"]:
|
211
|
+
md_lines.append(f"*Validation Method:* {outcome['validation_method']}\n\n")
|
212
|
+
|
213
|
+
# Add meta section
|
214
|
+
if "meta" in task:
|
215
|
+
meta = task["meta"]
|
216
|
+
md_lines.append("**Meta:**\n\n")
|
217
|
+
if "status" in meta:
|
218
|
+
md_lines.append(f"*Status:* {meta['status'].capitalize()}\n")
|
219
|
+
if "priority" in meta and meta["priority"] is not None:
|
220
|
+
md_lines.append(f"*Priority:* {meta['priority']}\n")
|
221
|
+
if "confidence" in meta:
|
222
|
+
md_lines.append(f"*Confidence:* {meta['confidence'].capitalize()}\n")
|
223
|
+
if "team" in meta and meta["team"]:
|
224
|
+
md_lines.append(f"*Team:* {meta['team']}\n")
|
225
|
+
md_lines.append("\n")
|
226
|
+
|
227
|
+
# Add separator line if not the last task
|
228
|
+
md_lines.append("---\n\n")
|
229
|
+
|
230
|
+
# Process children recursively
|
231
|
+
if task.get("id") in child_tasks:
|
232
|
+
for child in child_tasks[task["id"]]:
|
233
|
+
md_lines.extend(format_task_as_markdown(child, child_tasks, level + 1))
|
234
|
+
|
235
|
+
return md_lines
|
48
236
|
|
49
237
|
|
50
238
|
def load_plan(filename: str) -> List[ScopeMateTask]:
|
51
239
|
"""
|
52
240
|
Load tasks from a plan file.
|
53
241
|
|
242
|
+
This function reads a JSON file containing serialized tasks and deserializes them
|
243
|
+
into ScopeMateTask objects. It handles various backward compatibility issues and
|
244
|
+
performs validation on the loaded data to ensure integrity.
|
245
|
+
|
246
|
+
The function is robust against various common issues:
|
247
|
+
- It properly handles missing parent_id fields for backward compatibility
|
248
|
+
- It removes legacy fields that may exist in older files
|
249
|
+
- It skips invalid tasks with validation errors rather than failing entirely
|
250
|
+
- It provides clear warnings about skipped tasks
|
251
|
+
|
54
252
|
Args:
|
55
|
-
filename: Path to the plan file
|
253
|
+
filename: Path to the plan file to load
|
56
254
|
|
57
255
|
Returns:
|
58
|
-
List of ScopeMateTask objects
|
256
|
+
List of validated ScopeMateTask objects from the file
|
59
257
|
|
60
258
|
Raises:
|
61
|
-
FileNotFoundError: If the file doesn't exist
|
259
|
+
FileNotFoundError: If the specified file doesn't exist
|
260
|
+
|
261
|
+
Example:
|
262
|
+
```python
|
263
|
+
try:
|
264
|
+
tasks = load_plan("project_alpha_plan.json")
|
265
|
+
print(f"Loaded {len(tasks)} tasks successfully")
|
266
|
+
|
267
|
+
# Process loaded tasks
|
268
|
+
for task in tasks:
|
269
|
+
if task.meta.status == "backlog":
|
270
|
+
# Do something with backlog tasks...
|
271
|
+
pass
|
272
|
+
except FileNotFoundError:
|
273
|
+
print("Plan file not found, starting with empty task list")
|
274
|
+
tasks = []
|
275
|
+
```
|
62
276
|
"""
|
63
277
|
if not os.path.exists(filename):
|
64
278
|
raise FileNotFoundError(f"File not found: {filename}")
|