scopemate 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scopemate/llm.py ADDED
@@ -0,0 +1,343 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ scopemate LLM - Handles interactions with Large Language Models
4
+
5
+ This module provides functions for interacting with LLMs for task estimation,
6
+ breakdown, and optimization.
7
+ """
8
+ import json
9
+ from typing import Dict, Any, List, Optional
10
+ from openai import OpenAI
11
+
12
+ from .models import (
13
+ ScopeMateTask, Scope, TIME_COMPLEXITY, SIZE_COMPLEXITY,
14
+ VALID_SIZE_TYPES, VALID_TIME_ESTIMATES, get_utc_now
15
+ )
16
+
17
+ # -------------------------------
18
+ # Configuration
19
+ # -------------------------------
20
+ DEFAULT_MODEL = "o4-mini"
21
+
22
+ # -------------------------------
23
+ # LLM Interaction
24
+ # -------------------------------
25
+ def call_llm(prompt: str, model: str = DEFAULT_MODEL) -> dict:
26
+ """
27
+ Invoke LLM to get a structured JSON response.
28
+
29
+ Args:
30
+ prompt: The prompt to send to the LLM
31
+ model: The model to use (defaults to DEFAULT_MODEL)
32
+
33
+ Returns:
34
+ A dictionary containing the parsed JSON response
35
+ """
36
+ client = OpenAI()
37
+ response = client.chat.completions.create(
38
+ model=model,
39
+ messages=[
40
+ {
41
+ "role": "system",
42
+ "content": "You are a JSON assistant specialized in structured data for product management tasks. "
43
+ "Respond only with valid JSON. Follow the exact requested format in the user's prompt, "
44
+ "using the exact field names and adhering to all constraints on field values."
45
+ },
46
+ {"role": "user", "content": prompt}
47
+ ],
48
+ response_format={"type": "json_object"}
49
+ )
50
+
51
+ try:
52
+ return json.loads(response.choices[0].message.content)
53
+ except json.JSONDecodeError as e:
54
+ print(f"[Error] Failed to parse LLM response as JSON: {e}")
55
+ print(f"Raw response: {response.choices[0].message.content}")
56
+ return {}
57
+
58
+
59
+ def estimate_scope(task: ScopeMateTask) -> Scope:
60
+ """
61
+ Use LLM to estimate the scope of a task.
62
+
63
+ Args:
64
+ task: The ScopeMateTask to estimate scope for
65
+
66
+ Returns:
67
+ A Scope object with the estimated values
68
+ """
69
+ # Add parent context to prompt for subtasks
70
+ parent_context = ""
71
+ if task.parent_id:
72
+ parent_context = (
73
+ f"\nIMPORTANT: This is a subtask with parent_id: {task.parent_id}. "
74
+ f"Subtasks should be SIMPLER than their parent tasks. "
75
+ f"If the parent task is complex, a subtask should typically be straightforward or simpler. "
76
+ f"If the parent task has a multi-sprint or sprint time estimate, a subtask should have a shorter estimate."
77
+ )
78
+
79
+ prompt = (
80
+ f"You are an AI assistant helping a product manager estimate the scope of an engineering task.\n\n"
81
+ f"Based on this task description, estimate its scope with the following fields:\n"
82
+ f"- 'size': one of [\"trivial\", \"straightforward\", \"complex\", \"uncertain\", \"pioneering\"]\n"
83
+ f"- 'time_estimate': one of [\"hours\", \"days\", \"week\", \"sprint\", \"multi-sprint\"]\n"
84
+ f"- 'dependencies': array of strings describing dependencies\n"
85
+ f"- 'risks': array of strings describing potential blockers or challenges\n\n"
86
+ f"Provide detailed reasoning for your estimates, considering:\n"
87
+ f"1. The task complexity and unknowns\n"
88
+ f"2. Skills and expertise required\n"
89
+ f"3. Potential dependencies and risks\n"
90
+ f"4. Similar tasks from typical product development\n\n"
91
+ f"{parent_context}\n\n"
92
+ f"Return your analysis as a JSON object with the fields above, plus a 'reasoning' field explaining your thinking.\n\n"
93
+ f"Here is the task:\n{task.model_dump_json(indent=2)}"
94
+ )
95
+
96
+ # Get response from LLM
97
+ response = call_llm(prompt)
98
+
99
+ try:
100
+ # Extract any reasoning to show the user
101
+ if "reasoning" in response:
102
+ print(f"\n[AI Scope Analysis]\n{response['reasoning']}\n")
103
+ del response["reasoning"]
104
+
105
+ # Ensure required fields are present with defaults
106
+ if "size" not in response:
107
+ response["size"] = "uncertain"
108
+ if "time_estimate" not in response:
109
+ response["time_estimate"] = "sprint"
110
+ if "dependencies" not in response:
111
+ response["dependencies"] = []
112
+ if "risks" not in response:
113
+ response["risks"] = []
114
+
115
+ # Remove legacy fields if present
116
+ for legacy_field in ["owner", "team"]:
117
+ if legacy_field in response:
118
+ del response[legacy_field]
119
+
120
+ # If the task already has risks defined, merge them
121
+ if task.scope.risks:
122
+ combined_risks = set(task.scope.risks)
123
+ combined_risks.update(response["risks"])
124
+ response["risks"] = list(combined_risks)
125
+
126
+ # Create new scope with validated data
127
+ return Scope(**response)
128
+ except Exception as e:
129
+ print(f"[Warning] Scope validation failed; keeping original scope: {e}")
130
+ return task.scope
131
+
132
+
133
+ def suggest_alternative_approaches(task: ScopeMateTask) -> Dict[str, Any]:
134
+ """
135
+ Get a list of alternative approaches to implementing the task from the LLM.
136
+
137
+ Args:
138
+ task: The ScopeMateTask to get alternatives for
139
+
140
+ Returns:
141
+ A dictionary containing suggested alternative approaches
142
+ """
143
+ # Build the prompt for LLM
144
+ prompt = (
145
+ f"You are a product manager helping to identify alternative approaches to a task.\n\n"
146
+ f"For the following task, suggest 2-5 ALTERNATIVE APPROACHES or implementation methods. "
147
+ f"For example, if the task is 'Implement authentication', you might suggest:\n"
148
+ f"1. Username/password based authentication with email verification\n"
149
+ f"2. Social authentication using OAuth with platforms like Google, Facebook, etc.\n"
150
+ f"3. Passwordless authentication using magic links sent to email\n\n"
151
+ f"Each approach should be meaningfully different in IMPLEMENTATION STRATEGY, not just small variations.\n"
152
+ f"Give each approach a short, clear name and a detailed description explaining the pros and cons.\n\n"
153
+ f"IMPORTANT: For each approach, also include:\n"
154
+ f"- 'scope': One of [\"trivial\", \"straightforward\", \"complex\", \"uncertain\", \"pioneering\"] indicating complexity\n"
155
+ f"- 'time_estimate': One of [\"hours\", \"days\", \"week\", \"sprint\", \"multi-sprint\"] indicating time required\n\n"
156
+ f"Return your response as a JSON object with this structure:\n"
157
+ f"{{\n"
158
+ f" \"alternatives\": [\n"
159
+ f" {{\n"
160
+ f" \"name\": \"Short name for approach 1\",\n"
161
+ f" \"description\": \"Detailed description of approach 1 with pros and cons\",\n"
162
+ f" \"scope\": \"straightforward\",\n"
163
+ f" \"time_estimate\": \"days\"\n"
164
+ f" }},\n"
165
+ f" {{\n"
166
+ f" \"name\": \"Short name for approach 2\",\n"
167
+ f" \"description\": \"Detailed description of approach 2 with pros and cons\",\n"
168
+ f" \"scope\": \"complex\",\n"
169
+ f" \"time_estimate\": \"sprint\"\n"
170
+ f" }},\n"
171
+ f" ...\n"
172
+ f" ]\n"
173
+ f"}}\n\n"
174
+ f"Here is the task:\n{task.model_dump_json(indent=2)}"
175
+ )
176
+
177
+ # Get LLM response
178
+ response = call_llm(prompt)
179
+
180
+ # Check if response contains alternatives
181
+ if not isinstance(response, dict) or "alternatives" not in response:
182
+ print("[Warning] LLM did not return proper alternatives structure")
183
+ return {"alternatives": []}
184
+
185
+ alternatives = response.get("alternatives", [])
186
+
187
+ # Validate and process alternatives
188
+ valid_alternatives = []
189
+ for idx, alt in enumerate(alternatives):
190
+ if not isinstance(alt, dict):
191
+ continue
192
+
193
+ name = alt.get("name", f"Alternative {idx+1}")
194
+ description = alt.get("description", "No description provided")
195
+
196
+ # Extract scope and time_estimate with defaults
197
+ scope = alt.get("scope", "uncertain")
198
+ if scope not in ["trivial", "straightforward", "complex", "uncertain", "pioneering"]:
199
+ scope = "uncertain"
200
+
201
+ time_estimate = alt.get("time_estimate", "sprint")
202
+ if time_estimate not in ["hours", "days", "week", "sprint", "multi-sprint"]:
203
+ time_estimate = "sprint"
204
+
205
+ valid_alternatives.append({
206
+ "name": name,
207
+ "description": description,
208
+ "scope": scope,
209
+ "time_estimate": time_estimate
210
+ })
211
+
212
+ return {"alternatives": valid_alternatives}
213
+
214
+
215
+ def update_parent_with_child_context(parent_task: ScopeMateTask, child_task: ScopeMateTask) -> ScopeMateTask:
216
+ """
217
+ Update parent task details when a custom child task is added by passing context to LLM.
218
+
219
+ Args:
220
+ parent_task: The parent ScopeMateTask to update
221
+ child_task: The child ScopeMateTask that was just created
222
+
223
+ Returns:
224
+ Updated parent ScopeMateTask
225
+ """
226
+ # Build the prompt for LLM
227
+ prompt = (
228
+ f"You are a product manager updating a parent task based on a new child task that was just created.\n\n"
229
+ f"Review the parent task and the new child task details. Then update the parent task to:\n"
230
+ f"1. Include any important details from the child task not already reflected in the parent\n"
231
+ f"2. Ensure the parent's purpose and outcome descriptions accurately reflect all child tasks\n"
232
+ f"3. Add any new risks or dependencies that this child task implies for the parent\n"
233
+ f"4. Consider if the team assignment should be updated based on the child task\n\n"
234
+ f"Return a JSON object with these updated fields, keeping most of the parent task the same, but updating:\n"
235
+ f"- purpose.detailed_description: Generated enhanced description including child context\n"
236
+ f"- scope.risks: Updated list of risks (merged from both parent and any new ones)\n"
237
+ f"- outcome.detailed_outcome_definition: Generated enhanced description including child outcome\n"
238
+ f"- meta.team: One of (Product, Design, Frontend, Backend, ML, Infra, Testing, Other), if it should be changed\n\n"
239
+ f"Here is the parent task:\n{parent_task.model_dump_json(indent=2)}\n\n"
240
+ f"Here is the new child task:\n{child_task.model_dump_json(indent=2)}\n\n"
241
+ f"Return ONLY these updated fields in a JSON structure like:\n"
242
+ f"{{\n"
243
+ f" \"purpose\": {{\n"
244
+ f" \"detailed_description\": \"Generated enhanced description...\"\n"
245
+ f" }},\n"
246
+ f" \"scope\": {{\n"
247
+ f" \"risks\": [\"Risk 1\", \"Risk 2\", \"New risk from child\"]\n"
248
+ f" }},\n"
249
+ f" \"outcome\": {{\n"
250
+ f" \"detailed_outcome_definition\": \"Generated enhanced outcome description...\"\n"
251
+ f" }},\n"
252
+ f" \"meta\": {{\n"
253
+ f" \"team\": \"Frontend\"\n"
254
+ f" }}\n"
255
+ f"}}\n"
256
+ )
257
+
258
+ # Get LLM response
259
+ response = call_llm(prompt)
260
+
261
+ # Make a copy of the parent task to update
262
+ updated_parent = parent_task.model_copy(deep=True)
263
+
264
+ # Update purpose description if provided
265
+ if (
266
+ isinstance(response, dict)
267
+ and "purpose" in response
268
+ and isinstance(response["purpose"], dict)
269
+ and "detailed_description" in response["purpose"]
270
+ ):
271
+ updated_parent.purpose.detailed_description = response["purpose"]["detailed_description"]
272
+
273
+ # Update risks if provided
274
+ if (
275
+ isinstance(response, dict)
276
+ and "scope" in response
277
+ and isinstance(response["scope"], dict)
278
+ and "risks" in response["scope"]
279
+ ):
280
+ # Combine existing risks with new ones while removing duplicates
281
+ combined_risks = set(updated_parent.scope.risks)
282
+ combined_risks.update(response["scope"]["risks"])
283
+ updated_parent.scope.risks = list(combined_risks)
284
+
285
+ # Update outcome definition if provided
286
+ if (
287
+ isinstance(response, dict)
288
+ and "outcome" in response
289
+ and isinstance(response["outcome"], dict)
290
+ and "detailed_outcome_definition" in response["outcome"]
291
+ ):
292
+ updated_parent.outcome.detailed_outcome_definition = response["outcome"]["detailed_outcome_definition"]
293
+
294
+ # Update team if provided
295
+ if (
296
+ isinstance(response, dict)
297
+ and "meta" in response
298
+ and isinstance(response["meta"], dict)
299
+ and "team" in response["meta"]
300
+ and response["meta"]["team"] in ["Product", "Design", "Frontend", "Backend", "ML", "Infra", "Testing", "Other"]
301
+ ):
302
+ updated_parent.meta.team = response["meta"]["team"]
303
+
304
+ # Update the timestamp
305
+ updated_parent.meta.updated = get_utc_now()
306
+
307
+ return updated_parent
308
+
309
+
310
+ def generate_title_from_purpose_outcome(purpose: str, outcome: str) -> str:
311
+ """
312
+ Use LLM to generate a concise title from purpose and outcome descriptions.
313
+
314
+ Args:
315
+ purpose: The purpose description
316
+ outcome: The outcome description
317
+
318
+ Returns:
319
+ A concise title string
320
+ """
321
+ client = OpenAI()
322
+ response = client.chat.completions.create(
323
+ model=DEFAULT_MODEL,
324
+ messages=[
325
+ {
326
+ "role": "system",
327
+ "content": "You are a concise title generator. Generate a brief, clear title (maximum 60 characters) "
328
+ "that captures the essence of a task based on its purpose and outcome description."
329
+ },
330
+ {
331
+ "role": "user",
332
+ "content": f"Purpose: {purpose}\n\nOutcome: {outcome}\n\nGenerate a concise title (max 60 chars):"
333
+ }
334
+ ]
335
+ )
336
+
337
+ # Extract title from LLM response
338
+ title = response.choices[0].message.content.strip()
339
+ # Limit title length if needed
340
+ if len(title) > 60:
341
+ title = title[:57] + "..."
342
+
343
+ return title
scopemate/models.py ADDED
@@ -0,0 +1,157 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ scopemate Models - Pydantic models for task representation
4
+
5
+ This module contains the data models used throughout scopemate for representing
6
+ tasks, their purpose, scope, outcome, and metadata.
7
+ """
8
+ import datetime as dt
9
+ from datetime import UTC
10
+ from typing import Dict, List, Optional, Any
11
+ from pydantic import BaseModel, Field, ValidationError
12
+
13
+ # -------------------------------
14
+ # Constants and lookup tables
15
+ # -------------------------------
16
+ # Complexity rankings for time estimates
17
+ TIME_COMPLEXITY = {
18
+ "hours": 1,
19
+ "days": 2,
20
+ "week": 3,
21
+ "sprint": 4,
22
+ "multi-sprint": 5
23
+ }
24
+
25
+ # Complexity rankings for task size
26
+ SIZE_COMPLEXITY = {
27
+ "trivial": 1,
28
+ "straightforward": 2,
29
+ "complex": 3,
30
+ "uncertain": 4,
31
+ "pioneering": 5
32
+ }
33
+
34
+ # Valid values for data validation
35
+ VALID_OUTCOME_TYPES = ["customer-facing", "business-metric", "technical-debt", "operational", "learning"]
36
+ VALID_URGENCY_TYPES = ["mission-critical", "strategic", "growth", "maintenance", "exploratory"]
37
+ VALID_SIZE_TYPES = ["trivial", "straightforward", "complex", "uncertain", "pioneering"]
38
+ VALID_TIME_ESTIMATES = ["hours", "days", "week", "sprint", "multi-sprint"]
39
+ VALID_STATUSES = ["backlog", "discovery", "in-progress", "review", "validated", "shipped", "killed"]
40
+ VALID_CONFIDENCE_LEVELS = ["high", "medium", "low"]
41
+ VALID_TEAMS = ["Product", "Design", "Frontend", "Backend", "ML", "Infra", "Testing", "Other"]
42
+
43
+ # Common mappings for fixing incorrect values
44
+ OUTCOME_TYPE_MAPPING = {
45
+ "internal": "operational",
46
+ "deliverable": "customer-facing",
47
+ "experiment": "learning",
48
+ "enabler": "technical-debt",
49
+ "maintenance": "technical-debt",
50
+ "stability": "technical-debt"
51
+ }
52
+
53
+ # -------------------------------
54
+ # Helper Functions
55
+ # -------------------------------
56
+ def get_utc_now() -> str:
57
+ """Returns current UTC time in ISO format with Z suffix."""
58
+ return dt.datetime.now(UTC).isoformat(timespec="seconds") + "Z"
59
+
60
+ # -------------------------------
61
+ # Pydantic Models
62
+ # -------------------------------
63
+ class Purpose(BaseModel):
64
+ """Purpose of a task - why it matters."""
65
+ detailed_description: str
66
+ alignment: List[str] = Field(
67
+ default_factory=list,
68
+ description="Strategic goals this task aligns with"
69
+ )
70
+ urgency: str = Field(
71
+ ...,
72
+ pattern="^(mission-critical|strategic|growth|maintenance|exploratory)$",
73
+ description="Strategic importance"
74
+ )
75
+
76
+
77
+ class Scope(BaseModel):
78
+ """Scope of a task - how big it is and what's required."""
79
+ size: str = Field(
80
+ ...,
81
+ pattern="^(trivial|straightforward|complex|uncertain|pioneering)$",
82
+ description="Complexity and effort"
83
+ )
84
+ time_estimate: str = Field(
85
+ ...,
86
+ pattern="^(hours|days|week|sprint|multi-sprint)$",
87
+ description="Estimated time to complete"
88
+ )
89
+ dependencies: List[str] = Field(default_factory=list)
90
+ risks: List[str] = Field(
91
+ default_factory=list,
92
+ description="Potential blockers or challenges"
93
+ )
94
+
95
+
96
+ class Outcome(BaseModel):
97
+ """Outcome of a task - what's delivered and how it's measured."""
98
+ type: str = Field(
99
+ ...,
100
+ pattern="^(customer-facing|business-metric|technical-debt|operational|learning)$",
101
+ description="Type of value created"
102
+ )
103
+ detailed_outcome_definition: str
104
+ acceptance_criteria: List[str] = Field(
105
+ default_factory=list,
106
+ description="How we'll know this is done"
107
+ )
108
+ metric: Optional[str] = Field(
109
+ default=None,
110
+ description="How success will be measured"
111
+ )
112
+ validation_method: Optional[str] = Field(
113
+ default=None,
114
+ description="How to validate success (qualitative/quantitative)"
115
+ )
116
+
117
+
118
+ class Meta(BaseModel):
119
+ """Metadata about a task - status, priority, dates, etc."""
120
+ status: str = Field(
121
+ ...,
122
+ pattern="^(backlog|discovery|in-progress|review|validated|shipped|killed)$"
123
+ )
124
+ priority: Optional[int] = Field(
125
+ default=None,
126
+ description="Relative priority (lower is higher)"
127
+ )
128
+ created: str
129
+ updated: str
130
+ due_date: Optional[str] = Field(
131
+ default=None,
132
+ description="Target completion date"
133
+ )
134
+ confidence: Optional[str] = Field(
135
+ default=None,
136
+ pattern="^(high|medium|low)$",
137
+ description="Confidence in estimates"
138
+ )
139
+ team: Optional[str] = Field(
140
+ default=None,
141
+ pattern="^(Product|Design|Frontend|Backend|ML|Infra|Testing|Other)$",
142
+ description="Team responsible for this task"
143
+ )
144
+
145
+
146
+ class ScopeMateTask(BaseModel):
147
+ """A Purpose/Context/Outcome task representing a unit of work."""
148
+ id: str
149
+ title: str = Field(..., description="Short descriptive title")
150
+ purpose: Purpose
151
+ scope: Scope
152
+ outcome: Outcome
153
+ meta: Meta
154
+ parent_id: Optional[str] = Field(
155
+ default=None,
156
+ description="ID of parent task if this is a subtask"
157
+ )
scopemate/storage.py ADDED
@@ -0,0 +1,106 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ scopemate Storage - Functions for saving and loading task data
4
+
5
+ This module manages persistence of task data to disk and loading from files.
6
+ """
7
+ import os
8
+ import json
9
+ from typing import List, Dict, Any
10
+
11
+ from pydantic import ValidationError
12
+ from .models import ScopeMateTask
13
+
14
+ # -------------------------------
15
+ # Configuration
16
+ # -------------------------------
17
+ CHECKPOINT_FILE = ".scopemate_checkpoint.json"
18
+
19
+ # -------------------------------
20
+ # File Operations
21
+ # -------------------------------
22
+ def save_checkpoint(tasks: List[ScopeMateTask], filename: str = CHECKPOINT_FILE) -> None:
23
+ """
24
+ Save tasks to a checkpoint file for later resumption.
25
+
26
+ Args:
27
+ tasks: List of ScopeMateTask objects to save
28
+ filename: Path to save the checkpoint file
29
+ """
30
+ payload = {"tasks": [t.model_dump() for t in tasks]}
31
+ with open(filename, "w", encoding="utf-8") as f:
32
+ json.dump(payload, f, indent=2)
33
+ print(f"[Checkpoint saved to {filename}]")
34
+
35
+
36
+ def save_plan(tasks: List[ScopeMateTask], filename: str) -> None:
37
+ """
38
+ Save tasks to a plan file.
39
+
40
+ Args:
41
+ tasks: List of ScopeMateTask objects to save
42
+ filename: Path to save the plan file
43
+ """
44
+ payload = {"tasks": [t.model_dump() for t in tasks]}
45
+ with open(filename, "w", encoding="utf-8") as f:
46
+ json.dump(payload, f, indent=2)
47
+ print(f"✅ Plan saved to {filename}.")
48
+
49
+
50
+ def load_plan(filename: str) -> List[ScopeMateTask]:
51
+ """
52
+ Load tasks from a plan file.
53
+
54
+ Args:
55
+ filename: Path to the plan file
56
+
57
+ Returns:
58
+ List of ScopeMateTask objects
59
+
60
+ Raises:
61
+ FileNotFoundError: If the file doesn't exist
62
+ """
63
+ if not os.path.exists(filename):
64
+ raise FileNotFoundError(f"File not found: {filename}")
65
+
66
+ with open(filename, "r", encoding="utf-8") as f:
67
+ data = json.load(f)
68
+
69
+ tasks = []
70
+ for raw in data.get("tasks", []):
71
+ try:
72
+ # Ensure parent_id field exists for backward compatibility
73
+ if "parent_id" not in raw:
74
+ raw["parent_id"] = None
75
+
76
+ # Handle legacy fields in scope
77
+ if "scope" in raw and isinstance(raw["scope"], dict):
78
+ for legacy_field in ["owner", "team"]:
79
+ if legacy_field in raw["scope"]:
80
+ del raw["scope"][legacy_field]
81
+
82
+ tasks.append(ScopeMateTask(**raw))
83
+ except ValidationError as e:
84
+ print(f"[Warning] Skipping invalid task: {e}")
85
+
86
+ print(f"✅ Loaded {len(tasks)} tasks from {filename}.")
87
+ return tasks
88
+
89
+
90
+ def checkpoint_exists() -> bool:
91
+ """
92
+ Check if a checkpoint file exists.
93
+
94
+ Returns:
95
+ True if checkpoint file exists, False otherwise
96
+ """
97
+ return os.path.exists(CHECKPOINT_FILE)
98
+
99
+
100
+ def delete_checkpoint() -> None:
101
+ """
102
+ Delete the checkpoint file if it exists.
103
+ """
104
+ if checkpoint_exists():
105
+ os.remove(CHECKPOINT_FILE)
106
+ print(f"Checkpoint file {CHECKPOINT_FILE} deleted.")