@rookiestar/eng-lang-tutor 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @rookiestar/eng-lang-tutor might be problematic. Click here for more details.
- package/CLAUDE.md +259 -0
- package/README.md +224 -0
- package/README_EN.md +224 -0
- package/SKILL.md +495 -0
- package/bin/eng-lang-tutor.js +177 -0
- package/docs/OPENCLAW_DEPLOYMENT.md +228 -0
- package/examples/sample_keypoint.json +130 -0
- package/examples/sample_quiz.json +92 -0
- package/npm-scripts/install.js +132 -0
- package/package.json +46 -0
- package/references/resources.md +292 -0
- package/requirements.txt +9 -0
- package/scripts/command_parser.py +336 -0
- package/scripts/cron_push.py +226 -0
- package/scripts/dedup.py +325 -0
- package/scripts/gamification.py +406 -0
- package/scripts/scorer.py +323 -0
- package/scripts/state_manager.py +1025 -0
- package/scripts/tts/__init__.py +30 -0
- package/scripts/tts/base.py +109 -0
- package/scripts/tts/manager.py +290 -0
- package/scripts/tts/providers/__init__.py +10 -0
- package/scripts/tts/providers/xunfei.py +192 -0
- package/templates/keypoint_schema.json +420 -0
- package/templates/prompt_templates.md +1261 -0
- package/templates/quiz_schema.json +201 -0
- package/templates/state_schema.json +241 -0
|
@@ -0,0 +1,1025 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
State Manager - Handles state persistence and event logging for eng-lang-tutor.
|
|
4
|
+
|
|
5
|
+
Responsibilities:
|
|
6
|
+
- Load/save state.json
|
|
7
|
+
- Append events to monthly log files (events_YYYY-MM.jsonl)
|
|
8
|
+
- Provide atomic write operations for crash recovery
|
|
9
|
+
- Manage daily content directories
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import os
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from datetime import datetime, date
|
|
16
|
+
from typing import Dict, Any, Optional, List
|
|
17
|
+
import shutil
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_default_state_dir() -> Path:
|
|
21
|
+
"""
|
|
22
|
+
Get the default state directory path.
|
|
23
|
+
|
|
24
|
+
Priority:
|
|
25
|
+
1. OPENCLAW_STATE_DIR environment variable (if set)
|
|
26
|
+
2. ~/.openclaw/state/eng-lang-tutor/ (default)
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Path to the state directory
|
|
30
|
+
"""
|
|
31
|
+
env_dir = os.environ.get('OPENCLAW_STATE_DIR')
|
|
32
|
+
if env_dir:
|
|
33
|
+
return Path(env_dir)
|
|
34
|
+
return Path.home() / '.openclaw' / 'state' / 'eng-lang-tutor'
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class StateManager:
|
|
38
|
+
"""Manages state persistence and event logging."""
|
|
39
|
+
|
|
40
|
+
def __init__(self, data_dir: str = None):
|
|
41
|
+
"""
|
|
42
|
+
Initialize the state manager.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
data_dir: Path to the data directory (relative or absolute).
|
|
46
|
+
If None, uses OPENCLAW_STATE_DIR env var or
|
|
47
|
+
~/.openclaw/state/eng-lang-tutor/ as default.
|
|
48
|
+
"""
|
|
49
|
+
if data_dir is None:
|
|
50
|
+
self.data_dir = get_default_state_dir()
|
|
51
|
+
else:
|
|
52
|
+
self.data_dir = Path(data_dir)
|
|
53
|
+
|
|
54
|
+
self.state_file = self.data_dir / "state.json"
|
|
55
|
+
self.logs_dir = self.data_dir / "logs"
|
|
56
|
+
self.daily_dir = self.data_dir / "daily"
|
|
57
|
+
self.audio_dir = self.data_dir / "audio"
|
|
58
|
+
|
|
59
|
+
# Migrate from old data/ directory if needed
|
|
60
|
+
self._migrate_from_old_location()
|
|
61
|
+
|
|
62
|
+
# Ensure directories exist
|
|
63
|
+
self._ensure_directories()
|
|
64
|
+
|
|
65
|
+
def _migrate_from_old_location(self) -> None:
|
|
66
|
+
"""
|
|
67
|
+
Migrate data from old data/ directory to new state directory.
|
|
68
|
+
|
|
69
|
+
This is a one-time migration that runs if:
|
|
70
|
+
1. The new state directory doesn't have state.json
|
|
71
|
+
2. The old data/ directory exists with state.json
|
|
72
|
+
"""
|
|
73
|
+
# Only migrate if using default state directory
|
|
74
|
+
if self.data_dir != get_default_state_dir():
|
|
75
|
+
return
|
|
76
|
+
|
|
77
|
+
# Check if new location already has data
|
|
78
|
+
if self.state_file.exists():
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
# Find old data directory (relative to this script's location)
|
|
82
|
+
script_dir = Path(__file__).parent.parent
|
|
83
|
+
old_data_dir = script_dir / "data"
|
|
84
|
+
|
|
85
|
+
if not old_data_dir.exists():
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
old_state_file = old_data_dir / "state.json"
|
|
89
|
+
if not old_state_file.exists():
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
# Perform migration
|
|
93
|
+
print(f"Migrating data from {old_data_dir} to {self.data_dir}...")
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
# Create new directory
|
|
97
|
+
self.data_dir.mkdir(parents=True, exist_ok=True)
|
|
98
|
+
|
|
99
|
+
# Copy all contents
|
|
100
|
+
for item in old_data_dir.iterdir():
|
|
101
|
+
dest = self.data_dir / item.name
|
|
102
|
+
if item.is_dir():
|
|
103
|
+
if dest.exists():
|
|
104
|
+
shutil.rmtree(dest)
|
|
105
|
+
shutil.copytree(item, dest)
|
|
106
|
+
else:
|
|
107
|
+
shutil.copy2(item, dest)
|
|
108
|
+
|
|
109
|
+
# Rename old directory to backup
|
|
110
|
+
backup_dir = script_dir / "data.backup"
|
|
111
|
+
if backup_dir.exists():
|
|
112
|
+
shutil.rmtree(backup_dir)
|
|
113
|
+
old_data_dir.rename(backup_dir)
|
|
114
|
+
|
|
115
|
+
print(f"Migration complete. Old data backed up to {backup_dir}")
|
|
116
|
+
except Exception as e:
|
|
117
|
+
print(f"Warning: Migration failed: {e}")
|
|
118
|
+
print("Will use new empty state directory.")
|
|
119
|
+
|
|
120
|
+
def _ensure_directories(self) -> None:
|
|
121
|
+
"""Create necessary directories if they don't exist."""
|
|
122
|
+
self.data_dir.mkdir(parents=True, exist_ok=True)
|
|
123
|
+
self.logs_dir.mkdir(parents=True, exist_ok=True)
|
|
124
|
+
self.daily_dir.mkdir(parents=True, exist_ok=True)
|
|
125
|
+
self.audio_dir.mkdir(parents=True, exist_ok=True)
|
|
126
|
+
|
|
127
|
+
def _default_state(self) -> Dict[str, Any]:
|
|
128
|
+
"""Return the default state structure."""
|
|
129
|
+
return {
|
|
130
|
+
"version": 2,
|
|
131
|
+
"initialized": False,
|
|
132
|
+
"onboarding_step": 0,
|
|
133
|
+
"completion_status": {
|
|
134
|
+
"quiz_completed_date": None,
|
|
135
|
+
"keypoint_view_history": []
|
|
136
|
+
},
|
|
137
|
+
"schedule": {
|
|
138
|
+
"keypoint_time": "06:45",
|
|
139
|
+
"quiz_time": "22:45",
|
|
140
|
+
"timezone": "Asia/Shanghai"
|
|
141
|
+
},
|
|
142
|
+
"user": {
|
|
143
|
+
"xp": 0,
|
|
144
|
+
"level": 1,
|
|
145
|
+
"streak": 0,
|
|
146
|
+
"streak_freeze": 0,
|
|
147
|
+
"gems": 0,
|
|
148
|
+
"badges": []
|
|
149
|
+
},
|
|
150
|
+
"preferences": {
|
|
151
|
+
"cefr_level": "B1",
|
|
152
|
+
"oral_written_ratio": 0.7,
|
|
153
|
+
"topics": {
|
|
154
|
+
"movies": 0.2,
|
|
155
|
+
"news": 0.15,
|
|
156
|
+
"gaming": 0.15,
|
|
157
|
+
"sports": 0.1,
|
|
158
|
+
"workplace": 0.2,
|
|
159
|
+
"social": 0.1,
|
|
160
|
+
"daily_life": 0.1
|
|
161
|
+
},
|
|
162
|
+
"tutor_style": "humorous",
|
|
163
|
+
"dedup_days": 14
|
|
164
|
+
},
|
|
165
|
+
"progress": {
|
|
166
|
+
"total_points": 0,
|
|
167
|
+
"total_quizzes": 0,
|
|
168
|
+
"correct_rate": 0.0,
|
|
169
|
+
"last_study_date": None,
|
|
170
|
+
"perfect_quizzes": 0,
|
|
171
|
+
"expressions_learned": 0
|
|
172
|
+
},
|
|
173
|
+
"recent_topics": [],
|
|
174
|
+
"error_notebook": [],
|
|
175
|
+
"error_archive": []
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
def load_state(self) -> Dict[str, Any]:
|
|
179
|
+
"""
|
|
180
|
+
Load current state from file.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
State dictionary with all user data, preferences, and progress.
|
|
184
|
+
"""
|
|
185
|
+
if not self.state_file.exists():
|
|
186
|
+
return self._default_state()
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
with open(self.state_file, 'r', encoding='utf-8') as f:
|
|
190
|
+
state = json.load(f)
|
|
191
|
+
# Merge with defaults to ensure all fields exist
|
|
192
|
+
return self._merge_with_defaults(state)
|
|
193
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
194
|
+
print(f"Error loading state: {e}. Using defaults.")
|
|
195
|
+
return self._default_state()
|
|
196
|
+
|
|
197
|
+
def _merge_with_defaults(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
|
198
|
+
"""Merge loaded state with defaults to ensure all fields exist."""
|
|
199
|
+
defaults = self._default_state()
|
|
200
|
+
merged = defaults.copy()
|
|
201
|
+
|
|
202
|
+
# Handle top-level boolean/integer fields
|
|
203
|
+
for key in ['initialized', 'onboarding_step', 'version']:
|
|
204
|
+
if key in state:
|
|
205
|
+
merged[key] = state[key]
|
|
206
|
+
|
|
207
|
+
# Handle nested objects
|
|
208
|
+
for key in ['user', 'preferences', 'progress', 'completion_status', 'schedule']:
|
|
209
|
+
if key in state and isinstance(state[key], dict):
|
|
210
|
+
merged[key] = {**defaults.get(key, {}), **state[key]}
|
|
211
|
+
|
|
212
|
+
# Handle arrays
|
|
213
|
+
for key in ['recent_topics', 'error_notebook', 'error_archive']:
|
|
214
|
+
if key in state:
|
|
215
|
+
merged[key] = state[key]
|
|
216
|
+
|
|
217
|
+
return merged
|
|
218
|
+
|
|
219
|
+
def save_state(self, state: Dict[str, Any]) -> None:
|
|
220
|
+
"""
|
|
221
|
+
Save state to file with atomic write for crash recovery.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
state: Complete state dictionary to save
|
|
225
|
+
"""
|
|
226
|
+
# Ensure directories exist
|
|
227
|
+
self._ensure_directories()
|
|
228
|
+
|
|
229
|
+
# Write to temp file first, then rename for atomicity
|
|
230
|
+
temp_file = self.state_file.with_suffix('.tmp')
|
|
231
|
+
|
|
232
|
+
try:
|
|
233
|
+
with open(temp_file, 'w', encoding='utf-8') as f:
|
|
234
|
+
json.dump(state, f, ensure_ascii=False, indent=2)
|
|
235
|
+
|
|
236
|
+
# Atomic rename
|
|
237
|
+
temp_file.rename(self.state_file)
|
|
238
|
+
except IOError as e:
|
|
239
|
+
print(f"Error saving state: {e}")
|
|
240
|
+
if temp_file.exists():
|
|
241
|
+
temp_file.unlink()
|
|
242
|
+
raise
|
|
243
|
+
|
|
244
|
+
def append_event(self, event_type: str, data: Dict[str, Any]) -> None:
|
|
245
|
+
"""
|
|
246
|
+
Append an event to the monthly log file.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
event_type: Type of event (e.g., 'keypoint_generated', 'quiz_completed')
|
|
250
|
+
data: Event-specific data
|
|
251
|
+
"""
|
|
252
|
+
self.logs_dir.mkdir(parents=True, exist_ok=True)
|
|
253
|
+
|
|
254
|
+
today = datetime.now()
|
|
255
|
+
log_file = self.logs_dir / f"events_{today.strftime('%Y-%m')}.jsonl"
|
|
256
|
+
|
|
257
|
+
event = {
|
|
258
|
+
"timestamp": today.isoformat(),
|
|
259
|
+
"type": event_type,
|
|
260
|
+
"data": data
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
try:
|
|
264
|
+
with open(log_file, 'a', encoding='utf-8') as f:
|
|
265
|
+
f.write(json.dumps(event, ensure_ascii=False) + '\n')
|
|
266
|
+
except IOError as e:
|
|
267
|
+
print(f"Error appending event: {e}")
|
|
268
|
+
raise
|
|
269
|
+
|
|
270
|
+
def get_today_dir(self) -> Path:
|
|
271
|
+
"""Get the directory for today's content."""
|
|
272
|
+
today = date.today().strftime('%Y-%m-%d')
|
|
273
|
+
today_dir = self.daily_dir / today
|
|
274
|
+
today_dir.mkdir(parents=True, exist_ok=True)
|
|
275
|
+
return today_dir
|
|
276
|
+
|
|
277
|
+
def get_daily_dir(self, target_date: date) -> Path:
|
|
278
|
+
"""
|
|
279
|
+
Get the directory for a specific date's content.
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
target_date: The date to get the directory for
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Path to the daily directory
|
|
286
|
+
"""
|
|
287
|
+
date_str = target_date.strftime('%Y-%m-%d')
|
|
288
|
+
daily_path = self.daily_dir / date_str
|
|
289
|
+
return daily_path
|
|
290
|
+
|
|
291
|
+
def save_daily_content(self, content_type: str, content: Dict[str, Any],
|
|
292
|
+
target_date: Optional[date] = None) -> Path:
|
|
293
|
+
"""
|
|
294
|
+
Save content to the daily directory.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
content_type: Type of content ('keypoint', 'quiz', 'user_answers')
|
|
298
|
+
content: Content dictionary to save
|
|
299
|
+
target_date: Date for the content (defaults to today)
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
Path to the saved file
|
|
303
|
+
"""
|
|
304
|
+
if target_date is None:
|
|
305
|
+
target_date = date.today()
|
|
306
|
+
|
|
307
|
+
daily_path = self.get_daily_dir(target_date)
|
|
308
|
+
daily_path.mkdir(parents=True, exist_ok=True)
|
|
309
|
+
|
|
310
|
+
file_path = daily_path / f"{content_type}.json"
|
|
311
|
+
|
|
312
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
313
|
+
json.dump(content, f, ensure_ascii=False, indent=2)
|
|
314
|
+
|
|
315
|
+
return file_path
|
|
316
|
+
|
|
317
|
+
def load_daily_content(self, content_type: str,
|
|
318
|
+
target_date: Optional[date] = None) -> Optional[Dict[str, Any]]:
|
|
319
|
+
"""
|
|
320
|
+
Load content from the daily directory.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
content_type: Type of content to load
|
|
324
|
+
target_date: Date for the content (defaults to today)
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
Content dictionary or None if not found
|
|
328
|
+
"""
|
|
329
|
+
if target_date is None:
|
|
330
|
+
target_date = date.today()
|
|
331
|
+
|
|
332
|
+
file_path = self.get_daily_dir(target_date) / f"{content_type}.json"
|
|
333
|
+
|
|
334
|
+
if not file_path.exists():
|
|
335
|
+
return None
|
|
336
|
+
|
|
337
|
+
try:
|
|
338
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
339
|
+
return json.load(f)
|
|
340
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
341
|
+
print(f"Error loading {content_type}: {e}")
|
|
342
|
+
return None
|
|
343
|
+
|
|
344
|
+
def get_recent_daily_content(self, days: int = 14) -> List[Dict[str, Any]]:
|
|
345
|
+
"""
|
|
346
|
+
Get content from recent N days for deduplication.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
days: Number of days to look back
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
List of content dictionaries from recent days
|
|
353
|
+
"""
|
|
354
|
+
recent_content = []
|
|
355
|
+
today = date.today()
|
|
356
|
+
|
|
357
|
+
for i in range(days):
|
|
358
|
+
target_date = today - __import__('datetime').timedelta(days=i)
|
|
359
|
+
content = self.load_daily_content('keypoint', target_date)
|
|
360
|
+
if content:
|
|
361
|
+
recent_content.append(content)
|
|
362
|
+
|
|
363
|
+
return recent_content
|
|
364
|
+
|
|
365
|
+
def get_recent_topics(self, days: int = 14) -> List[str]:
|
|
366
|
+
"""
|
|
367
|
+
Get topic fingerprints from recent days for deduplication.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
days: Number of days to look back
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
List of topic fingerprints
|
|
374
|
+
"""
|
|
375
|
+
recent_content = self.get_recent_daily_content(days)
|
|
376
|
+
return [c.get('topic_fingerprint', '') for c in recent_content if c.get('topic_fingerprint')]
|
|
377
|
+
|
|
378
|
+
def add_to_error_notebook(self, state: Dict[str, Any],
|
|
379
|
+
error: Dict[str, Any]) -> Dict[str, Any]:
|
|
380
|
+
"""
|
|
381
|
+
Add an error to the error notebook.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
state: Current state
|
|
385
|
+
error: Error entry with date, question, user_answer, correct_answer, explanation
|
|
386
|
+
|
|
387
|
+
Returns:
|
|
388
|
+
Updated state
|
|
389
|
+
"""
|
|
390
|
+
error['date'] = date.today().isoformat()
|
|
391
|
+
error['reviewed'] = False
|
|
392
|
+
error['wrong_count'] = error.get('wrong_count', 1)
|
|
393
|
+
# keypoint_date and question_type are optional
|
|
394
|
+
state['error_notebook'].append(error)
|
|
395
|
+
return state
|
|
396
|
+
|
|
397
|
+
def get_errors_page(self, state: Dict[str, Any],
|
|
398
|
+
page: int = 1,
|
|
399
|
+
per_page: int = 5,
|
|
400
|
+
month: str = None,
|
|
401
|
+
random: int = None) -> Dict[str, Any]:
|
|
402
|
+
"""
|
|
403
|
+
Get paginated errors from the error notebook.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
state: Current state
|
|
407
|
+
page: Page number (1-indexed)
|
|
408
|
+
per_page: Items per page (default 5)
|
|
409
|
+
month: Filter by month (YYYY-MM format)
|
|
410
|
+
random: Return N random errors instead of paginated
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
Dictionary with pagination info and error items:
|
|
414
|
+
{
|
|
415
|
+
'total': total_count,
|
|
416
|
+
'page': current_page,
|
|
417
|
+
'per_page': items_per_page,
|
|
418
|
+
'total_pages': total_pages,
|
|
419
|
+
'has_more': bool,
|
|
420
|
+
'errors': [error_items]
|
|
421
|
+
}
|
|
422
|
+
"""
|
|
423
|
+
import random as random_module
|
|
424
|
+
|
|
425
|
+
errors = state.get('error_notebook', [])
|
|
426
|
+
|
|
427
|
+
# Sort by date descending (newest first)
|
|
428
|
+
errors = sorted(errors, key=lambda x: x.get('date', ''), reverse=True)
|
|
429
|
+
|
|
430
|
+
# Filter by month if specified
|
|
431
|
+
if month:
|
|
432
|
+
errors = [e for e in errors if e.get('date', '').startswith(month)]
|
|
433
|
+
|
|
434
|
+
total = len(errors)
|
|
435
|
+
|
|
436
|
+
# Random mode
|
|
437
|
+
if random and random > 0:
|
|
438
|
+
random_count = min(random, total)
|
|
439
|
+
selected = random_module.sample(errors, random_count) if total > 0 else []
|
|
440
|
+
return {
|
|
441
|
+
'total': total,
|
|
442
|
+
'page': 1,
|
|
443
|
+
'per_page': random_count,
|
|
444
|
+
'total_pages': 1,
|
|
445
|
+
'has_more': total > random_count,
|
|
446
|
+
'mode': 'random',
|
|
447
|
+
'errors': selected
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
# Pagination
|
|
451
|
+
total_pages = (total + per_page - 1) // per_page if per_page > 0 else 1
|
|
452
|
+
page = max(1, min(page, total_pages)) if total_pages > 0 else 1
|
|
453
|
+
|
|
454
|
+
start = (page - 1) * per_page
|
|
455
|
+
end = start + per_page
|
|
456
|
+
|
|
457
|
+
return {
|
|
458
|
+
'total': total,
|
|
459
|
+
'page': page,
|
|
460
|
+
'per_page': per_page,
|
|
461
|
+
'total_pages': total_pages,
|
|
462
|
+
'has_more': page < total_pages,
|
|
463
|
+
'has_prev': page > 1,
|
|
464
|
+
'mode': 'paginated',
|
|
465
|
+
'errors': errors[start:end]
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
def get_error_stats(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
|
469
|
+
"""
|
|
470
|
+
Get statistics about the error notebook.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
state: Current state
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
Dictionary with error statistics
|
|
477
|
+
"""
|
|
478
|
+
from collections import Counter
|
|
479
|
+
|
|
480
|
+
errors = state.get('error_notebook', [])
|
|
481
|
+
|
|
482
|
+
if not errors:
|
|
483
|
+
return {
|
|
484
|
+
'total': 0,
|
|
485
|
+
'reviewed': 0,
|
|
486
|
+
'unreviewed': 0,
|
|
487
|
+
'by_month': {}
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
reviewed = sum(1 for e in errors if e.get('reviewed', False))
|
|
491
|
+
unreviewed = len(errors) - reviewed
|
|
492
|
+
|
|
493
|
+
# Group by month
|
|
494
|
+
by_month = Counter()
|
|
495
|
+
for e in errors:
|
|
496
|
+
date_str = e.get('date', '')
|
|
497
|
+
if date_str:
|
|
498
|
+
month = date_str[:7] # YYYY-MM
|
|
499
|
+
by_month[month] += 1
|
|
500
|
+
|
|
501
|
+
# Sort by month descending
|
|
502
|
+
by_month_sorted = dict(sorted(by_month.items(), reverse=True))
|
|
503
|
+
|
|
504
|
+
return {
|
|
505
|
+
'total': len(errors),
|
|
506
|
+
'reviewed': reviewed,
|
|
507
|
+
'unreviewed': unreviewed,
|
|
508
|
+
'by_month': by_month_sorted
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
# === Error Review Methods ===
|
|
512
|
+
|
|
513
|
+
def review_error(self, state: Dict[str, Any], error_index: int,
|
|
514
|
+
correct: bool) -> Dict[str, Any]:
|
|
515
|
+
"""
|
|
516
|
+
Mark an error as reviewed (correct answer) or increment wrong count.
|
|
517
|
+
|
|
518
|
+
Args:
|
|
519
|
+
state: Current state
|
|
520
|
+
error_index: Index of error in error_notebook
|
|
521
|
+
correct: Whether the user answered correctly
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
Updated state
|
|
525
|
+
"""
|
|
526
|
+
errors = state.get('error_notebook', [])
|
|
527
|
+
if 0 <= error_index < len(errors):
|
|
528
|
+
if correct:
|
|
529
|
+
# Mark as reviewed - contributes to Error Slayer badge
|
|
530
|
+
errors[error_index]['reviewed'] = True
|
|
531
|
+
else:
|
|
532
|
+
# Increment wrong count
|
|
533
|
+
errors[error_index]['wrong_count'] = errors[error_index].get('wrong_count', 1) + 1
|
|
534
|
+
state['error_notebook'] = errors
|
|
535
|
+
return state
|
|
536
|
+
|
|
537
|
+
def increment_wrong_count(self, state: Dict[str, Any],
|
|
538
|
+
error_index: int) -> Dict[str, Any]:
|
|
539
|
+
"""
|
|
540
|
+
Increment wrong count for an error (convenience method).
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
state: Current state
|
|
544
|
+
error_index: Index of error in error_notebook
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
Updated state
|
|
548
|
+
"""
|
|
549
|
+
return self.review_error(state, error_index, correct=False)
|
|
550
|
+
|
|
551
|
+
def get_review_errors(self, state: Dict[str, Any],
|
|
552
|
+
count: int = 5) -> List[Dict[str, Any]]:
|
|
553
|
+
"""
|
|
554
|
+
Get unreviewed errors for a review session.
|
|
555
|
+
|
|
556
|
+
Args:
|
|
557
|
+
state: Current state
|
|
558
|
+
count: Maximum number of errors to return
|
|
559
|
+
|
|
560
|
+
Returns:
|
|
561
|
+
List of unreviewed errors (most recent first)
|
|
562
|
+
"""
|
|
563
|
+
errors = state.get('error_notebook', [])
|
|
564
|
+
# Filter unreviewed errors
|
|
565
|
+
unreviewed = [e for e in errors if not e.get('reviewed', False)]
|
|
566
|
+
# Sort by date descending (newest first)
|
|
567
|
+
unreviewed = sorted(unreviewed, key=lambda x: x.get('date', ''), reverse=True)
|
|
568
|
+
# Return up to count items
|
|
569
|
+
return unreviewed[:count]
|
|
570
|
+
|
|
571
|
+
def archive_stale_errors(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
|
572
|
+
"""
|
|
573
|
+
Archive errors with wrong_count >= 3 and over 30 days old.
|
|
574
|
+
|
|
575
|
+
Args:
|
|
576
|
+
state: Current state
|
|
577
|
+
|
|
578
|
+
Returns:
|
|
579
|
+
Updated state with stale errors moved to error_archive
|
|
580
|
+
"""
|
|
581
|
+
from datetime import timedelta
|
|
582
|
+
|
|
583
|
+
errors = state.get('error_notebook', [])
|
|
584
|
+
archive = state.get('error_archive', [])
|
|
585
|
+
today = date.today()
|
|
586
|
+
|
|
587
|
+
remaining = []
|
|
588
|
+
archived_count = 0
|
|
589
|
+
|
|
590
|
+
for error in errors:
|
|
591
|
+
# Skip already reviewed
|
|
592
|
+
if error.get('reviewed', False):
|
|
593
|
+
remaining.append(error)
|
|
594
|
+
continue
|
|
595
|
+
|
|
596
|
+
wrong_count = error.get('wrong_count', 1)
|
|
597
|
+
error_date_str = error.get('date', '')
|
|
598
|
+
|
|
599
|
+
# Calculate days since error was created
|
|
600
|
+
try:
|
|
601
|
+
error_date = datetime.strptime(error_date_str, '%Y-%m-%d').date()
|
|
602
|
+
days_old = (today - error_date).days
|
|
603
|
+
except (ValueError, TypeError):
|
|
604
|
+
days_old = 0
|
|
605
|
+
|
|
606
|
+
# Archive if wrong_count >= 3 and over 30 days old
|
|
607
|
+
if wrong_count >= 3 and days_old >= 30:
|
|
608
|
+
error['archived_at'] = today.isoformat()
|
|
609
|
+
archive.append(error)
|
|
610
|
+
archived_count += 1
|
|
611
|
+
else:
|
|
612
|
+
remaining.append(error)
|
|
613
|
+
|
|
614
|
+
state['error_notebook'] = remaining
|
|
615
|
+
state['error_archive'] = archive
|
|
616
|
+
|
|
617
|
+
# Log archival event
|
|
618
|
+
if archived_count > 0:
|
|
619
|
+
self.append_event("errors_archived", {"count": archived_count})
|
|
620
|
+
|
|
621
|
+
return state
|
|
622
|
+
|
|
623
|
+
def clear_reviewed_errors(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
|
624
|
+
"""
|
|
625
|
+
Remove reviewed errors from the notebook (keep them in archive for stats).
|
|
626
|
+
|
|
627
|
+
Args:
|
|
628
|
+
state: Current state
|
|
629
|
+
|
|
630
|
+
Returns:
|
|
631
|
+
Updated state
|
|
632
|
+
"""
|
|
633
|
+
errors = state.get('error_notebook', [])
|
|
634
|
+
# Keep only unreviewed errors
|
|
635
|
+
state['error_notebook'] = [
|
|
636
|
+
e for e in errors if not e.get('reviewed', False)
|
|
637
|
+
]
|
|
638
|
+
return state
|
|
639
|
+
|
|
640
|
+
# === Completion Tracking Methods ===
|
|
641
|
+
|
|
642
|
+
def can_take_quiz(self, state: Dict[str, Any]) -> bool:
|
|
643
|
+
"""
|
|
644
|
+
Check if user can take today's quiz.
|
|
645
|
+
|
|
646
|
+
Args:
|
|
647
|
+
state: Current state
|
|
648
|
+
|
|
649
|
+
Returns:
|
|
650
|
+
True if quiz can be taken, False if already completed today
|
|
651
|
+
"""
|
|
652
|
+
today = date.today().isoformat()
|
|
653
|
+
completed_date = state.get("completion_status", {}).get("quiz_completed_date")
|
|
654
|
+
return completed_date != today
|
|
655
|
+
|
|
656
|
+
def mark_quiz_completed(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
|
657
|
+
"""
|
|
658
|
+
Mark today's quiz as completed.
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
state: Current state
|
|
662
|
+
|
|
663
|
+
Returns:
|
|
664
|
+
Updated state
|
|
665
|
+
"""
|
|
666
|
+
today = date.today().isoformat()
|
|
667
|
+
if "completion_status" not in state:
|
|
668
|
+
state["completion_status"] = {}
|
|
669
|
+
state["completion_status"]["quiz_completed_date"] = today
|
|
670
|
+
return state
|
|
671
|
+
|
|
672
|
+
# === Keypoint View Tracking ===
|
|
673
|
+
|
|
674
|
+
def record_keypoint_view(self, state: Dict[str, Any], view_date: date = None) -> Dict[str, Any]:
|
|
675
|
+
"""
|
|
676
|
+
Record that a keypoint was viewed.
|
|
677
|
+
|
|
678
|
+
Args:
|
|
679
|
+
state: Current state
|
|
680
|
+
view_date: Date of viewed keypoint (defaults to today)
|
|
681
|
+
|
|
682
|
+
Returns:
|
|
683
|
+
Updated state
|
|
684
|
+
"""
|
|
685
|
+
if view_date is None:
|
|
686
|
+
view_date = date.today()
|
|
687
|
+
|
|
688
|
+
if "completion_status" not in state:
|
|
689
|
+
state["completion_status"] = {}
|
|
690
|
+
|
|
691
|
+
if "keypoint_view_history" not in state["completion_status"]:
|
|
692
|
+
state["completion_status"]["keypoint_view_history"] = []
|
|
693
|
+
|
|
694
|
+
# Add view record
|
|
695
|
+
state["completion_status"]["keypoint_view_history"].append({
|
|
696
|
+
"date": view_date.isoformat(),
|
|
697
|
+
"viewed_at": datetime.now().isoformat()
|
|
698
|
+
})
|
|
699
|
+
|
|
700
|
+
# Keep last 30 entries to avoid unbounded growth
|
|
701
|
+
state["completion_status"]["keypoint_view_history"] = \
|
|
702
|
+
state["completion_status"]["keypoint_view_history"][-30:]
|
|
703
|
+
|
|
704
|
+
return state
|
|
705
|
+
|
|
706
|
+
def has_viewed_keypoint(self, state: Dict[str, Any], view_date: date) -> bool:
|
|
707
|
+
"""
|
|
708
|
+
Check if a keypoint for a specific date has been viewed/pushed.
|
|
709
|
+
|
|
710
|
+
Args:
|
|
711
|
+
state: Current state
|
|
712
|
+
view_date: Date to check
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
True if keypoint for that date has been viewed
|
|
716
|
+
"""
|
|
717
|
+
history = state.get("completion_status", {}).get("keypoint_view_history", [])
|
|
718
|
+
return any(entry.get("date") == view_date.isoformat() for entry in history)
|
|
719
|
+
|
|
720
|
+
# === Initialization and Preferences ===
|
|
721
|
+
|
|
722
|
+
def update_onboarding_step(self, state: Dict[str, Any], step: int) -> Dict[str, Any]:
|
|
723
|
+
"""
|
|
724
|
+
Update onboarding step.
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
state: Current state
|
|
728
|
+
step: New step (0-6)
|
|
729
|
+
|
|
730
|
+
Returns:
|
|
731
|
+
Updated state
|
|
732
|
+
"""
|
|
733
|
+
state["onboarding_step"] = max(0, min(6, step))
|
|
734
|
+
if step >= 6:
|
|
735
|
+
state["initialized"] = True
|
|
736
|
+
return state
|
|
737
|
+
|
|
738
|
+
def update_preferences(self, state: Dict[str, Any],
|
|
739
|
+
cefr_level: str = None,
|
|
740
|
+
tutor_style: str = None,
|
|
741
|
+
oral_written_ratio: float = None,
|
|
742
|
+
topics: Dict[str, float] = None) -> Dict[str, Any]:
|
|
743
|
+
"""
|
|
744
|
+
Update user preferences.
|
|
745
|
+
|
|
746
|
+
Args:
|
|
747
|
+
state: Current state
|
|
748
|
+
cefr_level: New CEFR level (optional)
|
|
749
|
+
tutor_style: New tutor style (optional)
|
|
750
|
+
oral_written_ratio: New oral/written ratio (optional)
|
|
751
|
+
topics: New topic weights (optional)
|
|
752
|
+
|
|
753
|
+
Returns:
|
|
754
|
+
Updated state
|
|
755
|
+
"""
|
|
756
|
+
if "preferences" not in state:
|
|
757
|
+
state["preferences"] = {}
|
|
758
|
+
|
|
759
|
+
if cefr_level:
|
|
760
|
+
state["preferences"]["cefr_level"] = cefr_level
|
|
761
|
+
if tutor_style:
|
|
762
|
+
state["preferences"]["tutor_style"] = tutor_style
|
|
763
|
+
if oral_written_ratio is not None:
|
|
764
|
+
state["preferences"]["oral_written_ratio"] = oral_written_ratio
|
|
765
|
+
if topics:
|
|
766
|
+
state["preferences"]["topics"] = topics
|
|
767
|
+
|
|
768
|
+
return state
|
|
769
|
+
|
|
770
|
+
def update_schedule(self, state: Dict[str, Any],
|
|
771
|
+
keypoint_time: str = None,
|
|
772
|
+
quiz_time: str = None,
|
|
773
|
+
timezone: str = None) -> Dict[str, Any]:
|
|
774
|
+
"""
|
|
775
|
+
Update schedule preferences.
|
|
776
|
+
|
|
777
|
+
Args:
|
|
778
|
+
state: Current state
|
|
779
|
+
keypoint_time: Keypoint push time (HH:MM)
|
|
780
|
+
quiz_time: Quiz push time (HH:MM)
|
|
781
|
+
timezone: Timezone string
|
|
782
|
+
|
|
783
|
+
Returns:
|
|
784
|
+
Updated state
|
|
785
|
+
"""
|
|
786
|
+
if "schedule" not in state:
|
|
787
|
+
state["schedule"] = {}
|
|
788
|
+
|
|
789
|
+
if keypoint_time:
|
|
790
|
+
state["schedule"]["keypoint_time"] = keypoint_time
|
|
791
|
+
if quiz_time:
|
|
792
|
+
state["schedule"]["quiz_time"] = quiz_time
|
|
793
|
+
if timezone:
|
|
794
|
+
state["schedule"]["timezone"] = timezone
|
|
795
|
+
|
|
796
|
+
return state
|
|
797
|
+
|
|
798
|
+
def backup_state(self, backup_dir: str = "backups") -> Path:
|
|
799
|
+
"""
|
|
800
|
+
Create a backup of the current state.
|
|
801
|
+
|
|
802
|
+
Args:
|
|
803
|
+
backup_dir: Directory to store backups
|
|
804
|
+
|
|
805
|
+
Returns:
|
|
806
|
+
Path to the backup file
|
|
807
|
+
"""
|
|
808
|
+
backup_path = Path(backup_dir)
|
|
809
|
+
backup_path.mkdir(parents=True, exist_ok=True)
|
|
810
|
+
|
|
811
|
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
|
812
|
+
backup_file = backup_path / f"state_backup_{timestamp}.json"
|
|
813
|
+
|
|
814
|
+
state = self.load_state()
|
|
815
|
+
with open(backup_file, 'w', encoding='utf-8') as f:
|
|
816
|
+
json.dump(state, f, ensure_ascii=False, indent=2)
|
|
817
|
+
|
|
818
|
+
return backup_file
|
|
819
|
+
|
|
820
|
+
def restore_from_backup(self, backup_file: Path) -> bool:
|
|
821
|
+
"""
|
|
822
|
+
Restore state from a backup file.
|
|
823
|
+
|
|
824
|
+
Args:
|
|
825
|
+
backup_file: Path to the backup file
|
|
826
|
+
|
|
827
|
+
Returns:
|
|
828
|
+
True if successful, False otherwise
|
|
829
|
+
"""
|
|
830
|
+
try:
|
|
831
|
+
with open(backup_file, 'r', encoding='utf-8') as f:
|
|
832
|
+
state = json.load(f)
|
|
833
|
+
self.save_state(state)
|
|
834
|
+
return True
|
|
835
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
836
|
+
print(f"Error restoring from backup: {e}")
|
|
837
|
+
return False
|
|
838
|
+
|
|
839
|
+
|
|
840
|
+
# CLI interface
|
|
841
|
+
if __name__ == "__main__":
|
|
842
|
+
import argparse
|
|
843
|
+
|
|
844
|
+
parser = argparse.ArgumentParser(description="State Manager for eng-lang-tutor")
|
|
845
|
+
parser.add_argument('--data-dir', default=None,
|
|
846
|
+
help='Data directory path (default: ~/.openclaw/state/eng-lang-tutor or OPENCLAW_STATE_DIR env)')
|
|
847
|
+
parser.add_argument('command', nargs='?',
|
|
848
|
+
choices=['show', 'backup', 'save_daily', 'record_view',
|
|
849
|
+
'stats', 'config', 'errors', 'schedule'],
|
|
850
|
+
help='Command to execute')
|
|
851
|
+
parser.add_argument('--content-type', help='Content type for save_daily (keypoint, quiz)')
|
|
852
|
+
parser.add_argument('--content', help='JSON content for save_daily')
|
|
853
|
+
parser.add_argument('--date', help='Date for content (YYYY-MM-DD format)')
|
|
854
|
+
# Errors command options
|
|
855
|
+
parser.add_argument('--page', type=int, default=1, help='Page number for errors list')
|
|
856
|
+
parser.add_argument('--per-page', type=int, default=5, help='Items per page for errors')
|
|
857
|
+
parser.add_argument('--month', help='Filter errors by month (YYYY-MM)')
|
|
858
|
+
parser.add_argument('--random', type=int, help='Get N random errors')
|
|
859
|
+
parser.add_argument('--stats', action='store_true', help='Get error statistics')
|
|
860
|
+
parser.add_argument('--review', type=int, help='Get N errors for review session')
|
|
861
|
+
# Config command options
|
|
862
|
+
parser.add_argument('--cefr', help='Set CEFR level (A1-C2)')
|
|
863
|
+
parser.add_argument('--style', help='Set tutor style')
|
|
864
|
+
parser.add_argument('--oral-ratio', type=int, help='Set oral/written ratio (0-100)')
|
|
865
|
+
# Schedule command options
|
|
866
|
+
parser.add_argument('--keypoint-time', help='Set keypoint push time (HH:MM)')
|
|
867
|
+
parser.add_argument('--quiz-time', help='Set quiz push time (HH:MM)')
|
|
868
|
+
|
|
869
|
+
args = parser.parse_args()
|
|
870
|
+
|
|
871
|
+
sm = StateManager(args.data_dir)
|
|
872
|
+
|
|
873
|
+
if args.command == 'show' or not args.command:
|
|
874
|
+
state = sm.load_state()
|
|
875
|
+
print(json.dumps(state, indent=2, ensure_ascii=False))
|
|
876
|
+
|
|
877
|
+
elif args.command == 'backup':
|
|
878
|
+
backup_path = sm.backup_state()
|
|
879
|
+
print(f"Backup created: {backup_path}")
|
|
880
|
+
|
|
881
|
+
elif args.command == 'save_daily':
|
|
882
|
+
if not args.content_type or not args.content:
|
|
883
|
+
print("Error: --content-type and --content are required for save_daily")
|
|
884
|
+
exit(1)
|
|
885
|
+
|
|
886
|
+
try:
|
|
887
|
+
content = json.loads(args.content)
|
|
888
|
+
except json.JSONDecodeError as e:
|
|
889
|
+
print(f"Error: Invalid JSON content: {e}")
|
|
890
|
+
exit(1)
|
|
891
|
+
|
|
892
|
+
target_date = None
|
|
893
|
+
if args.date:
|
|
894
|
+
try:
|
|
895
|
+
target_date = datetime.strptime(args.date, '%Y-%m-%d').date()
|
|
896
|
+
except ValueError:
|
|
897
|
+
print("Error: Invalid date format. Use YYYY-MM-DD")
|
|
898
|
+
exit(1)
|
|
899
|
+
|
|
900
|
+
path = sm.save_daily_content(args.content_type, content, target_date)
|
|
901
|
+
print(f"Saved to: {path}")
|
|
902
|
+
|
|
903
|
+
elif args.command == 'record_view':
|
|
904
|
+
target_date = None
|
|
905
|
+
if args.date:
|
|
906
|
+
try:
|
|
907
|
+
target_date = datetime.strptime(args.date, '%Y-%m-%d').date()
|
|
908
|
+
except ValueError:
|
|
909
|
+
print("Error: Invalid date format. Use YYYY-MM-DD")
|
|
910
|
+
exit(1)
|
|
911
|
+
|
|
912
|
+
state = sm.load_state()
|
|
913
|
+
sm.record_keypoint_view(state, target_date)
|
|
914
|
+
sm.save_state(state)
|
|
915
|
+
print("View recorded successfully")
|
|
916
|
+
|
|
917
|
+
elif args.command == 'stats':
|
|
918
|
+
"""Display learning progress summary."""
|
|
919
|
+
from gamification import GamificationManager
|
|
920
|
+
state = sm.load_state()
|
|
921
|
+
gm = GamificationManager()
|
|
922
|
+
summary = gm.get_progress_summary(state)
|
|
923
|
+
print(json.dumps(summary, indent=2, ensure_ascii=False))
|
|
924
|
+
|
|
925
|
+
elif args.command == 'config':
|
|
926
|
+
"""Display or update user configuration."""
|
|
927
|
+
state = sm.load_state()
|
|
928
|
+
|
|
929
|
+
# If no update options, just show current config
|
|
930
|
+
if not any([args.cefr, args.style, args.oral_ratio is not None]):
|
|
931
|
+
config = {
|
|
932
|
+
"cefr_level": state.get("preferences", {}).get("cefr_level", "B1"),
|
|
933
|
+
"tutor_style": state.get("preferences", {}).get("tutor_style", "humorous"),
|
|
934
|
+
"oral_ratio": state.get("preferences", {}).get("oral_ratio", 70),
|
|
935
|
+
"topic_weights": state.get("preferences", {}).get("topic_weights", {}),
|
|
936
|
+
"schedule": state.get("schedule", {})
|
|
937
|
+
}
|
|
938
|
+
print(json.dumps(config, indent=2, ensure_ascii=False))
|
|
939
|
+
else:
|
|
940
|
+
# Update configuration
|
|
941
|
+
if args.cefr:
|
|
942
|
+
if args.cefr not in ['A1', 'A2', 'B1', 'B2', 'C1', 'C2']:
|
|
943
|
+
print("Error: Invalid CEFR level. Must be A1, A2, B1, B2, C1, or C2")
|
|
944
|
+
exit(1)
|
|
945
|
+
state = sm.update_preferences(state, cefr_level=args.cefr)
|
|
946
|
+
print(f"Updated CEFR level to: {args.cefr}")
|
|
947
|
+
|
|
948
|
+
if args.style:
|
|
949
|
+
if args.style not in ['humorous', 'rigorous', 'casual', 'professional']:
|
|
950
|
+
print("Error: Invalid style. Must be humorous, rigorous, casual, or professional")
|
|
951
|
+
exit(1)
|
|
952
|
+
state = sm.update_preferences(state, tutor_style=args.style)
|
|
953
|
+
print(f"Updated tutor style to: {args.style}")
|
|
954
|
+
|
|
955
|
+
if args.oral_ratio is not None:
|
|
956
|
+
if not 0 <= args.oral_ratio <= 100:
|
|
957
|
+
print("Error: Oral ratio must be between 0 and 100")
|
|
958
|
+
exit(1)
|
|
959
|
+
state = sm.update_preferences(state, oral_ratio=args.oral_ratio)
|
|
960
|
+
print(f"Updated oral ratio to: {args.oral_ratio}%")
|
|
961
|
+
|
|
962
|
+
sm.save_state(state)
|
|
963
|
+
print("Configuration updated successfully")
|
|
964
|
+
|
|
965
|
+
elif args.command == 'errors':
|
|
966
|
+
"""Error notebook operations."""
|
|
967
|
+
state = sm.load_state()
|
|
968
|
+
|
|
969
|
+
if args.stats:
|
|
970
|
+
# Get error statistics
|
|
971
|
+
stats = sm.get_error_stats(state)
|
|
972
|
+
print(json.dumps(stats, indent=2, ensure_ascii=False))
|
|
973
|
+
|
|
974
|
+
elif args.review is not None:
|
|
975
|
+
# Get errors for review session
|
|
976
|
+
errors = sm.get_review_errors(state, count=args.review)
|
|
977
|
+
result = {
|
|
978
|
+
"count": len(errors),
|
|
979
|
+
"errors": errors
|
|
980
|
+
}
|
|
981
|
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
982
|
+
|
|
983
|
+
else:
|
|
984
|
+
# Get paginated errors list
|
|
985
|
+
result = sm.get_errors_page(
|
|
986
|
+
state,
|
|
987
|
+
page=args.page,
|
|
988
|
+
per_page=args.per_page,
|
|
989
|
+
month=args.month,
|
|
990
|
+
random=args.random
|
|
991
|
+
)
|
|
992
|
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
993
|
+
|
|
994
|
+
elif args.command == 'schedule':
|
|
995
|
+
"""Display or update schedule configuration."""
|
|
996
|
+
state = sm.load_state()
|
|
997
|
+
|
|
998
|
+
# If no update options, just show current schedule
|
|
999
|
+
if not any([args.keypoint_time, args.quiz_time]):
|
|
1000
|
+
schedule = state.get("schedule", {})
|
|
1001
|
+
print(json.dumps(schedule, indent=2, ensure_ascii=False))
|
|
1002
|
+
else:
|
|
1003
|
+
# Validate quiz_time must be later than keypoint_time
|
|
1004
|
+
current_keypoint = state.get("schedule", {}).get("keypoint_time", "06:45")
|
|
1005
|
+
current_quiz = state.get("schedule", {}).get("quiz_time", "22:45")
|
|
1006
|
+
|
|
1007
|
+
new_keypoint = args.keypoint_time or current_keypoint
|
|
1008
|
+
new_quiz = args.quiz_time or current_quiz
|
|
1009
|
+
|
|
1010
|
+
# Time validation
|
|
1011
|
+
def parse_time(t):
|
|
1012
|
+
h, m = map(int, t.split(':'))
|
|
1013
|
+
return h * 60 + m
|
|
1014
|
+
|
|
1015
|
+
if parse_time(new_quiz) <= parse_time(new_keypoint):
|
|
1016
|
+
print("Error: Quiz time must be later than keypoint time")
|
|
1017
|
+
exit(1)
|
|
1018
|
+
|
|
1019
|
+
state = sm.update_schedule(
|
|
1020
|
+
state,
|
|
1021
|
+
keypoint_time=new_keypoint,
|
|
1022
|
+
quiz_time=new_quiz
|
|
1023
|
+
)
|
|
1024
|
+
sm.save_state(state)
|
|
1025
|
+
print(f"Schedule updated: keypoint at {new_keypoint}, quiz at {new_quiz}")
|