iflow-mcp_democratize-technology-chronos-mcp 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. chronos_mcp/__init__.py +5 -0
  2. chronos_mcp/__main__.py +9 -0
  3. chronos_mcp/accounts.py +410 -0
  4. chronos_mcp/bulk.py +946 -0
  5. chronos_mcp/caldav_utils.py +149 -0
  6. chronos_mcp/calendars.py +204 -0
  7. chronos_mcp/config.py +187 -0
  8. chronos_mcp/credentials.py +190 -0
  9. chronos_mcp/events.py +515 -0
  10. chronos_mcp/exceptions.py +477 -0
  11. chronos_mcp/journals.py +477 -0
  12. chronos_mcp/logging_config.py +23 -0
  13. chronos_mcp/models.py +202 -0
  14. chronos_mcp/py.typed +0 -0
  15. chronos_mcp/rrule.py +259 -0
  16. chronos_mcp/search.py +315 -0
  17. chronos_mcp/server.py +121 -0
  18. chronos_mcp/tasks.py +518 -0
  19. chronos_mcp/tools/__init__.py +29 -0
  20. chronos_mcp/tools/accounts.py +151 -0
  21. chronos_mcp/tools/base.py +59 -0
  22. chronos_mcp/tools/bulk.py +557 -0
  23. chronos_mcp/tools/calendars.py +142 -0
  24. chronos_mcp/tools/events.py +698 -0
  25. chronos_mcp/tools/journals.py +310 -0
  26. chronos_mcp/tools/tasks.py +414 -0
  27. chronos_mcp/utils.py +163 -0
  28. chronos_mcp/validation.py +636 -0
  29. iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/METADATA +299 -0
  30. iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/RECORD +68 -0
  31. iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/WHEEL +5 -0
  32. iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/entry_points.txt +2 -0
  33. iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/licenses/LICENSE +21 -0
  34. iflow_mcp_democratize_technology_chronos_mcp-2.0.0.dist-info/top_level.txt +2 -0
  35. tests/__init__.py +0 -0
  36. tests/conftest.py +91 -0
  37. tests/unit/__init__.py +0 -0
  38. tests/unit/test_accounts.py +380 -0
  39. tests/unit/test_accounts_ssrf.py +134 -0
  40. tests/unit/test_base.py +135 -0
  41. tests/unit/test_bulk.py +380 -0
  42. tests/unit/test_bulk_create.py +408 -0
  43. tests/unit/test_bulk_delete.py +341 -0
  44. tests/unit/test_bulk_resource_limits.py +74 -0
  45. tests/unit/test_caldav_utils.py +300 -0
  46. tests/unit/test_calendars.py +286 -0
  47. tests/unit/test_config.py +111 -0
  48. tests/unit/test_config_validation.py +128 -0
  49. tests/unit/test_credentials_security.py +189 -0
  50. tests/unit/test_cryptography_security.py +178 -0
  51. tests/unit/test_events.py +536 -0
  52. tests/unit/test_exceptions.py +58 -0
  53. tests/unit/test_journals.py +1097 -0
  54. tests/unit/test_models.py +95 -0
  55. tests/unit/test_race_conditions.py +202 -0
  56. tests/unit/test_recurring_events.py +156 -0
  57. tests/unit/test_rrule.py +217 -0
  58. tests/unit/test_search.py +372 -0
  59. tests/unit/test_search_advanced.py +333 -0
  60. tests/unit/test_server_input_validation.py +219 -0
  61. tests/unit/test_ssrf_protection.py +505 -0
  62. tests/unit/test_tasks.py +918 -0
  63. tests/unit/test_thread_safety.py +301 -0
  64. tests/unit/test_tools_journals.py +617 -0
  65. tests/unit/test_tools_tasks.py +968 -0
  66. tests/unit/test_url_validation_security.py +234 -0
  67. tests/unit/test_utils.py +180 -0
  68. tests/unit/test_validation.py +983 -0
chronos_mcp/bulk.py ADDED
@@ -0,0 +1,946 @@
1
+ """Bulk operations for Chronos MCP."""
2
+
3
+ import concurrent.futures
4
+ import threading
5
+ import time
6
+ from concurrent.futures import ThreadPoolExecutor
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime
9
+ from enum import Enum
10
+ from typing import Any, Dict, List, Optional, Tuple, Union
11
+
12
+ from .exceptions import ErrorSanitizer
13
+ from .logging_config import setup_logging
14
+ from .models import TaskStatus
15
+ from .utils import parse_datetime
16
+
17
+ logger = setup_logging()
18
+
19
+
20
+ class BulkOperationMode(Enum):
21
+ """Modes for bulk operation execution."""
22
+
23
+ ATOMIC = "atomic"
24
+ CONTINUE_ON_ERROR = "continue"
25
+ FAIL_FAST = "fail_fast"
26
+
27
+
28
+ @dataclass
29
+ class BulkOptions:
30
+ """Configuration for bulk operations."""
31
+
32
+ mode: BulkOperationMode = BulkOperationMode.CONTINUE_ON_ERROR
33
+ max_parallel: int = 5
34
+ timeout_per_operation: int = 30
35
+ validate_before_execute: bool = True
36
+ dry_run: bool = False
37
+ adaptive_scaling: bool = True # Enable adaptive parallel scaling
38
+ backpressure_threshold_ms: float = 1000.0 # Reduce parallelism if ops take longer
39
+ min_parallel: int = 1
40
+ max_parallel_limit: int = 20
41
+
42
+
43
+ @dataclass
44
+ class OperationResult:
45
+ """Result of a single operation within bulk."""
46
+
47
+ index: int
48
+ success: bool
49
+ uid: Optional[str] = None
50
+ error: Optional[str] = None
51
+ duration_ms: float = 0.0
52
+
53
+
54
+ @dataclass
55
+ class BulkResult:
56
+ """Aggregate result of bulk operation."""
57
+
58
+ total: int
59
+ successful: int
60
+ failed: int
61
+ results: List[OperationResult] = field(default_factory=list)
62
+ errors: List[str] = field(default_factory=list)
63
+ duration_ms: float = 0.0
64
+
65
+ @property
66
+ def success_rate(self) -> float:
67
+ return (self.successful / self.total * 100) if self.total > 0 else 0.0
68
+
69
+ def get_failures(self) -> List[OperationResult]:
70
+ return [r for r in self.results if not r.success]
71
+
72
+ def get_successes(self) -> List[OperationResult]:
73
+ return [r for r in self.results if r.success]
74
+
75
+
76
+ class BulkOperationManager:
77
+ """Manages bulk CalDAV operations with adaptive concurrency control."""
78
+
79
+ def __init__(self, event_manager=None, task_manager=None, journal_manager=None):
80
+ self.event_manager = event_manager
81
+ self.task_manager = task_manager
82
+ self.journal_manager = journal_manager
83
+ self.executor = ThreadPoolExecutor(max_workers=20)
84
+
85
+ # Adaptive concurrency control
86
+ self._performance_tracker = {}
87
+ self._backpressure_lock = threading.Lock()
88
+
89
+ def _calculate_adaptive_parallelism(
90
+ self, options: BulkOptions, operation_type: str, recent_performance: List[float]
91
+ ) -> int:
92
+ """Calculate optimal parallelism based on recent performance"""
93
+ if not options.adaptive_scaling or not recent_performance:
94
+ return options.max_parallel
95
+
96
+ # Calculate average operation time
97
+ avg_time_ms = sum(recent_performance) / len(recent_performance)
98
+
99
+ if avg_time_ms > options.backpressure_threshold_ms:
100
+ # Operations are slow, reduce parallelism
101
+ new_parallel = max(options.min_parallel, options.max_parallel // 2)
102
+ elif avg_time_ms < options.backpressure_threshold_ms / 2:
103
+ # Operations are fast, increase parallelism
104
+ new_parallel = min(options.max_parallel_limit, options.max_parallel + 2)
105
+ else:
106
+ # Operations are within acceptable range
107
+ new_parallel = options.max_parallel
108
+
109
+ return new_parallel
110
+
111
+ def _track_operation_performance(self, operation_type: str, duration_ms: float):
112
+ """Track operation performance for adaptive scaling"""
113
+ with self._backpressure_lock:
114
+ if operation_type not in self._performance_tracker:
115
+ self._performance_tracker[operation_type] = []
116
+
117
+ perf_list = self._performance_tracker[operation_type]
118
+ perf_list.append(duration_ms)
119
+
120
+ # Keep only last 50 measurements for sliding window
121
+ if len(perf_list) > 50:
122
+ perf_list.pop(0)
123
+
124
+ def _get_recent_performance(self, operation_type: str) -> List[float]:
125
+ """Get recent performance measurements"""
126
+ with self._backpressure_lock:
127
+ return self._performance_tracker.get(operation_type, []).copy()
128
+
129
+ def bulk_create_events(
130
+ self,
131
+ calendar_uid: str,
132
+ events: List[Dict[str, Any]],
133
+ options: BulkOptions = None,
134
+ account_alias: Optional[str] = None,
135
+ ) -> BulkResult:
136
+ """Create multiple events with configurable error handling."""
137
+ if options is None:
138
+ options = BulkOptions()
139
+
140
+ start_time = time.time()
141
+ result = BulkResult(total=len(events), successful=0, failed=0)
142
+
143
+ if options.validate_before_execute:
144
+ validation_errors = self._validate_events(events)
145
+ if validation_errors and options.mode == BulkOperationMode.ATOMIC:
146
+ for idx, error in validation_errors:
147
+ result.results.append(
148
+ OperationResult(
149
+ index=idx,
150
+ success=False,
151
+ error=f"Validation failed: {error}",
152
+ )
153
+ )
154
+ result.failed = len(validation_errors)
155
+ result.duration_ms = (time.time() - start_time) * 1000
156
+ return result
157
+
158
+ if options.dry_run:
159
+ for idx in range(len(events)):
160
+ result.results.append(
161
+ OperationResult(
162
+ index=idx,
163
+ success=True,
164
+ uid=f"dry-run-uid-{idx}",
165
+ duration_ms=0.1,
166
+ )
167
+ )
168
+ result.successful = len(events)
169
+ else:
170
+ created_uids = []
171
+ current_parallel = options.max_parallel
172
+ batch_start = 0
173
+
174
+ while batch_start < len(events):
175
+ # Adaptive scaling - adjust parallelism based on performance
176
+ if options.adaptive_scaling and batch_start > 0:
177
+ recent_perf = self._get_recent_performance("create_event")
178
+ current_parallel = self._calculate_adaptive_parallelism(
179
+ options, "create_event", recent_perf
180
+ )
181
+
182
+ batch_end = min(batch_start + current_parallel, len(events))
183
+ batch = events[batch_start:batch_end]
184
+
185
+ batch_results = self._execute_batch_create(
186
+ calendar_uid, batch, batch_start, options, account_alias
187
+ )
188
+
189
+ # Track batch performance
190
+ for op_result in batch_results:
191
+ self._track_operation_performance(
192
+ "create_event", op_result.duration_ms
193
+ )
194
+
195
+ for op_result in batch_results:
196
+ result.results.append(op_result)
197
+ if op_result.success:
198
+ result.successful += 1
199
+ created_uids.append(op_result.uid)
200
+ else:
201
+ result.failed += 1
202
+
203
+ if options.mode == BulkOperationMode.FAIL_FAST:
204
+ break
205
+ elif options.mode == BulkOperationMode.ATOMIC:
206
+ failed_rollbacks = self._rollback_created_events(
207
+ calendar_uid, created_uids
208
+ )
209
+ result.successful = 0
210
+ result.failed = len(events)
211
+ if failed_rollbacks:
212
+ result.errors.append(
213
+ f"CRITICAL: Atomic rollback incomplete - {len(failed_rollbacks)} "
214
+ f"events could not be deleted: {', '.join(failed_rollbacks[:5])}"
215
+ + ("..." if len(failed_rollbacks) > 5 else "")
216
+ )
217
+ break
218
+
219
+ if (
220
+ options.mode == BulkOperationMode.FAIL_FAST and result.failed > 0
221
+ ) or (options.mode == BulkOperationMode.ATOMIC and result.failed > 0):
222
+ break
223
+
224
+ # Move to next batch
225
+ batch_start = batch_end
226
+
227
+ result.duration_ms = (time.time() - start_time) * 1000
228
+ return result
229
+
230
+ def bulk_create_tasks(
231
+ self,
232
+ calendar_uid: str,
233
+ tasks: List[Dict[str, Any]],
234
+ options: BulkOptions = None,
235
+ account_alias: Optional[str] = None,
236
+ ) -> BulkResult:
237
+ """Create multiple tasks with configurable error handling."""
238
+ if options is None:
239
+ options = BulkOptions()
240
+
241
+ start_time = time.time()
242
+ result = BulkResult(total=len(tasks), successful=0, failed=0)
243
+
244
+ if options.validate_before_execute:
245
+ validation_errors = self._validate_tasks(tasks)
246
+ if validation_errors and options.mode == BulkOperationMode.ATOMIC:
247
+ for idx, error in validation_errors:
248
+ result.results.append(
249
+ OperationResult(
250
+ index=idx,
251
+ success=False,
252
+ error=f"Validation failed: {error}",
253
+ )
254
+ )
255
+ result.failed = len(validation_errors)
256
+ result.duration_ms = (time.time() - start_time) * 1000
257
+ return result
258
+
259
+ if options.dry_run:
260
+ for idx in range(len(tasks)):
261
+ result.results.append(
262
+ OperationResult(
263
+ index=idx,
264
+ success=True,
265
+ uid=f"dry-run-task-uid-{idx}",
266
+ duration_ms=0.1,
267
+ )
268
+ )
269
+ result.successful = len(tasks)
270
+ else:
271
+ created_uids = []
272
+
273
+ for batch_start in range(0, len(tasks), options.max_parallel):
274
+ batch_end = min(batch_start + options.max_parallel, len(tasks))
275
+ batch = tasks[batch_start:batch_end]
276
+
277
+ batch_results = self._execute_batch_create_tasks(
278
+ calendar_uid, batch, batch_start, options, account_alias
279
+ )
280
+
281
+ for op_result in batch_results:
282
+ result.results.append(op_result)
283
+ if op_result.success:
284
+ result.successful += 1
285
+ created_uids.append(op_result.uid)
286
+ else:
287
+ result.failed += 1
288
+
289
+ if options.mode == BulkOperationMode.FAIL_FAST:
290
+ break
291
+ elif options.mode == BulkOperationMode.ATOMIC:
292
+ failed_rollbacks = self._rollback_created_tasks(
293
+ calendar_uid, created_uids
294
+ )
295
+ result.successful = 0
296
+ result.failed = len(tasks)
297
+ if failed_rollbacks:
298
+ result.errors.append(
299
+ f"CRITICAL: Atomic rollback incomplete - {len(failed_rollbacks)} "
300
+ f"tasks could not be deleted: {', '.join(failed_rollbacks[:5])}"
301
+ + ("..." if len(failed_rollbacks) > 5 else "")
302
+ )
303
+ break
304
+
305
+ if (
306
+ options.mode == BulkOperationMode.FAIL_FAST and result.failed > 0
307
+ ) or (options.mode == BulkOperationMode.ATOMIC and result.failed > 0):
308
+ break
309
+
310
+ result.duration_ms = (time.time() - start_time) * 1000
311
+ return result
312
+
313
+ def bulk_create_journals(
314
+ self,
315
+ calendar_uid: str,
316
+ journals: List[Dict[str, Any]],
317
+ options: BulkOptions = None,
318
+ account_alias: Optional[str] = None,
319
+ ) -> BulkResult:
320
+ """Create multiple journals with configurable error handling."""
321
+ if options is None:
322
+ options = BulkOptions()
323
+
324
+ start_time = time.time()
325
+ result = BulkResult(total=len(journals), successful=0, failed=0)
326
+
327
+ if options.validate_before_execute:
328
+ validation_errors = self._validate_journals(journals)
329
+ if validation_errors and options.mode == BulkOperationMode.ATOMIC:
330
+ for idx, error in validation_errors:
331
+ result.results.append(
332
+ OperationResult(
333
+ index=idx,
334
+ success=False,
335
+ error=f"Validation failed: {error}",
336
+ )
337
+ )
338
+ result.failed = len(validation_errors)
339
+ result.duration_ms = (time.time() - start_time) * 1000
340
+ return result
341
+
342
+ if options.dry_run:
343
+ for idx in range(len(journals)):
344
+ result.results.append(
345
+ OperationResult(
346
+ index=idx,
347
+ success=True,
348
+ uid=f"dry-run-journal-uid-{idx}",
349
+ duration_ms=0.1,
350
+ )
351
+ )
352
+ result.successful = len(journals)
353
+ else:
354
+ created_uids = []
355
+
356
+ for batch_start in range(0, len(journals), options.max_parallel):
357
+ batch_end = min(batch_start + options.max_parallel, len(journals))
358
+ batch = journals[batch_start:batch_end]
359
+
360
+ batch_results = self._execute_batch_create_journals(
361
+ calendar_uid, batch, batch_start, options, account_alias
362
+ )
363
+
364
+ for op_result in batch_results:
365
+ result.results.append(op_result)
366
+ if op_result.success:
367
+ result.successful += 1
368
+ created_uids.append(op_result.uid)
369
+ else:
370
+ result.failed += 1
371
+
372
+ if options.mode == BulkOperationMode.FAIL_FAST:
373
+ break
374
+ elif options.mode == BulkOperationMode.ATOMIC:
375
+ failed_rollbacks = self._rollback_created_journals(
376
+ calendar_uid, created_uids
377
+ )
378
+ result.successful = 0
379
+ result.failed = len(journals)
380
+ if failed_rollbacks:
381
+ result.errors.append(
382
+ f"CRITICAL: Atomic rollback incomplete - {len(failed_rollbacks)} "
383
+ f"journals could not be deleted: {', '.join(failed_rollbacks[:5])}"
384
+ + ("..." if len(failed_rollbacks) > 5 else "")
385
+ )
386
+ break
387
+
388
+ if (
389
+ options.mode == BulkOperationMode.FAIL_FAST and result.failed > 0
390
+ ) or (options.mode == BulkOperationMode.ATOMIC and result.failed > 0):
391
+ break
392
+
393
+ result.duration_ms = (time.time() - start_time) * 1000
394
+ return result
395
+
396
+ def _validate_events(self, events: List[Dict[str, Any]]) -> List[Tuple[int, str]]:
397
+ """Validate event data before execution."""
398
+ errors = []
399
+
400
+ for idx, event in enumerate(events):
401
+ if not event.get("summary"):
402
+ errors.append((idx, "Missing required field: summary"))
403
+ if not event.get("dtstart"):
404
+ errors.append((idx, "Missing required field: dtstart"))
405
+ if not event.get("dtend"):
406
+ errors.append((idx, "Missing required field: dtend"))
407
+
408
+ try:
409
+ start = datetime.fromisoformat(
410
+ str(event.get("dtstart", "")).replace("Z", "+00:00")
411
+ )
412
+ end = datetime.fromisoformat(
413
+ str(event.get("dtend", "")).replace("Z", "+00:00")
414
+ )
415
+ if end < start:
416
+ errors.append((idx, "End time before start time"))
417
+ except Exception:
418
+ errors.append((idx, "Invalid date format"))
419
+
420
+ return errors
421
+
422
+ def _validate_tasks(self, tasks: List[Dict[str, Any]]) -> List[Tuple[int, str]]:
423
+ """Validate task data before execution."""
424
+ errors = []
425
+
426
+ for idx, task in enumerate(tasks):
427
+ if not task.get("summary"):
428
+ errors.append((idx, "Missing required field: summary"))
429
+
430
+ # Validate priority if provided
431
+ priority = task.get("priority")
432
+ if priority is not None:
433
+ try:
434
+ priority_val = int(priority)
435
+ if priority_val < 1 or priority_val > 9:
436
+ errors.append((idx, "Priority must be between 1-9"))
437
+ except (ValueError, TypeError):
438
+ errors.append((idx, "Priority must be an integer"))
439
+
440
+ # Validate status if provided
441
+ status = task.get("status")
442
+ if status is not None:
443
+ try:
444
+ TaskStatus(status)
445
+ except ValueError:
446
+ valid_statuses = [s.value for s in TaskStatus]
447
+ errors.append(
448
+ (idx, f"Invalid status. Must be one of: {valid_statuses}")
449
+ )
450
+
451
+ # Validate percent_complete if provided
452
+ percent = task.get("percent_complete")
453
+ if percent is not None:
454
+ try:
455
+ percent_val = int(percent)
456
+ if percent_val < 0 or percent_val > 100:
457
+ errors.append((idx, "Percent complete must be between 0-100"))
458
+ except (ValueError, TypeError):
459
+ errors.append((idx, "Percent complete must be an integer"))
460
+
461
+ # Validate due date if provided
462
+ due = task.get("due")
463
+ if due is not None:
464
+ try:
465
+ if isinstance(due, str):
466
+ datetime.fromisoformat(due.replace("Z", "+00:00"))
467
+ except Exception:
468
+ errors.append((idx, "Invalid due date format"))
469
+
470
+ return errors
471
+
472
+ def _validate_journals(
473
+ self, journals: List[Dict[str, Any]]
474
+ ) -> List[Tuple[int, str]]:
475
+ """Validate journal data before execution."""
476
+ errors = []
477
+
478
+ for idx, journal in enumerate(journals):
479
+ if not journal.get("summary"):
480
+ errors.append((idx, "Missing required field: summary"))
481
+
482
+ # Validate dtstart if provided
483
+ dtstart = journal.get("dtstart")
484
+ if dtstart is not None:
485
+ try:
486
+ if isinstance(dtstart, str):
487
+ datetime.fromisoformat(dtstart.replace("Z", "+00:00"))
488
+ except Exception:
489
+ errors.append((idx, "Invalid dtstart date format"))
490
+
491
+ return errors
492
+
493
+ def _execute_batch_create(
494
+ self,
495
+ calendar_uid: str,
496
+ batch: List[Dict],
497
+ start_idx: int,
498
+ options: BulkOptions,
499
+ account_alias: Optional[str] = None,
500
+ ) -> List[OperationResult]:
501
+ """Execute a batch of create operations in parallel using ThreadPoolExecutor."""
502
+ if not self.event_manager:
503
+ raise ValueError("EventManager not provided to BulkOperationManager")
504
+
505
+ # Handle empty batch edge case
506
+ if not batch:
507
+ return []
508
+
509
+ def create_single_event(idx_event_tuple):
510
+ idx, event = idx_event_tuple
511
+ op_start = time.time()
512
+ try:
513
+ created_event = self.event_manager.create_event(
514
+ calendar_uid=calendar_uid,
515
+ summary=event.get("summary"),
516
+ start=event.get("dtstart"),
517
+ end=event.get("dtend"),
518
+ description=event.get("description"),
519
+ location=event.get("location"),
520
+ all_day=event.get("all_day", False),
521
+ alarm_minutes=event.get("alarm_minutes"),
522
+ recurrence_rule=event.get("recurrence_rule"),
523
+ attendees=event.get("attendees", []),
524
+ related_to=event.get("related_to", []),
525
+ account_alias=account_alias,
526
+ )
527
+
528
+ return OperationResult(
529
+ index=start_idx + idx,
530
+ success=True,
531
+ uid=created_event.uid,
532
+ duration_ms=(time.time() - op_start) * 1000,
533
+ )
534
+ except Exception as e:
535
+ return OperationResult(
536
+ index=start_idx + idx,
537
+ success=False,
538
+ error=ErrorSanitizer.sanitize_message(str(e)),
539
+ duration_ms=(time.time() - op_start) * 1000,
540
+ )
541
+
542
+ # Use ThreadPoolExecutor with timeout control
543
+ # CRITICAL: Cap max_workers to prevent resource exhaustion with large batches
544
+ # 1000+ events should NOT create 1000+ threads
545
+ # Default of 10 workers is optimal for I/O-bound CalDAV operations
546
+ max_workers = min(len(batch), options.max_parallel or 10)
547
+ indexed_batch = list(enumerate(batch))
548
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
549
+ future_to_idx = {
550
+ executor.submit(create_single_event, idx_event): idx
551
+ for idx, idx_event in enumerate(indexed_batch)
552
+ }
553
+
554
+ results: List[Optional[OperationResult]] = [None] * len(batch)
555
+
556
+ try:
557
+ for future in concurrent.futures.as_completed(
558
+ future_to_idx, timeout=options.timeout_per_operation * len(batch)
559
+ ):
560
+ try:
561
+ result = future.result(timeout=options.timeout_per_operation)
562
+ # Maintain original order based on batch index
563
+ batch_idx = result.index - start_idx
564
+ results[batch_idx] = result
565
+ except concurrent.futures.TimeoutError:
566
+ # Handle individual operation timeout
567
+ batch_idx = future_to_idx[future]
568
+ results[batch_idx] = OperationResult(
569
+ index=start_idx + batch_idx,
570
+ success=False,
571
+ error=f"Operation timeout after {options.timeout_per_operation}s",
572
+ duration_ms=options.timeout_per_operation * 1000,
573
+ )
574
+ except Exception as e:
575
+ # Handle executor-level exceptions
576
+ batch_idx = future_to_idx[future]
577
+ results[batch_idx] = OperationResult(
578
+ index=start_idx + batch_idx,
579
+ success=False,
580
+ error=ErrorSanitizer.sanitize_message(f"Executor error: {e}"),
581
+ duration_ms=0,
582
+ )
583
+ except concurrent.futures.TimeoutError:
584
+ # Handle batch-level timeout
585
+ for idx, future in enumerate(future_to_idx.keys()):
586
+ if not future.done():
587
+ future.cancel()
588
+ if results[idx] is None:
589
+ results[idx] = OperationResult(
590
+ index=start_idx + idx,
591
+ success=False,
592
+ error="Batch operation timeout",
593
+ duration_ms=0,
594
+ )
595
+
596
+ return [r for r in results if r is not None]
597
+
598
+ def _execute_batch_create_tasks(
599
+ self,
600
+ calendar_uid: str,
601
+ batch: List[Dict],
602
+ start_idx: int,
603
+ options: BulkOptions,
604
+ account_alias: Optional[str] = None,
605
+ ) -> List[OperationResult]:
606
+ """Execute a batch of task create operations."""
607
+ results = []
608
+
609
+ # Use the provided task manager
610
+ if not self.task_manager:
611
+ raise ValueError("TaskManager not provided to BulkOperationManager")
612
+
613
+ for idx, task in enumerate(batch):
614
+ op_start = time.time()
615
+ try:
616
+ # Parse status if provided
617
+ status = None
618
+ if task.get("status"):
619
+ status = TaskStatus(task.get("status"))
620
+
621
+ # Parse due date if provided as string
622
+ due_dt = None
623
+ if task.get("due"):
624
+ due_value = task.get("due")
625
+ if isinstance(due_value, str):
626
+ due_dt = parse_datetime(due_value)
627
+ else:
628
+ due_dt = due_value
629
+
630
+ created_task = self.task_manager.create_task(
631
+ calendar_uid=calendar_uid,
632
+ summary=task.get("summary"),
633
+ description=task.get("description"),
634
+ due=due_dt,
635
+ priority=task.get("priority"),
636
+ status=status or TaskStatus.NEEDS_ACTION,
637
+ related_to=task.get("related_to", []),
638
+ account_alias=account_alias,
639
+ )
640
+
641
+ results.append(
642
+ OperationResult(
643
+ index=start_idx + idx,
644
+ success=True,
645
+ uid=created_task.uid,
646
+ duration_ms=(time.time() - op_start) * 1000,
647
+ )
648
+ )
649
+ except Exception as e:
650
+ results.append(
651
+ OperationResult(
652
+ index=start_idx + idx,
653
+ success=False,
654
+ error=ErrorSanitizer.sanitize_message(str(e)),
655
+ duration_ms=(time.time() - op_start) * 1000,
656
+ )
657
+ )
658
+
659
+ return results
660
+
661
+ def _execute_batch_create_journals(
662
+ self,
663
+ calendar_uid: str,
664
+ batch: List[Dict],
665
+ start_idx: int,
666
+ options: BulkOptions,
667
+ account_alias: Optional[str] = None,
668
+ ) -> List[OperationResult]:
669
+ """Execute a batch of journal create operations."""
670
+ results = []
671
+
672
+ # Use the provided journal manager
673
+ if not self.journal_manager:
674
+ raise ValueError("JournalManager not provided to BulkOperationManager")
675
+
676
+ for idx, journal in enumerate(batch):
677
+ op_start = time.time()
678
+ try:
679
+ # Parse dtstart if provided as string
680
+ dtstart = journal.get("dtstart")
681
+ if dtstart and isinstance(dtstart, str):
682
+ dtstart = parse_datetime(dtstart)
683
+
684
+ # Create the journal using JournalManager
685
+ created_journal = self.journal_manager.create_journal(
686
+ calendar_uid=calendar_uid,
687
+ summary=journal.get("summary"),
688
+ description=journal.get("description"),
689
+ dtstart=dtstart,
690
+ related_to=journal.get("related_to", []),
691
+ account_alias=account_alias,
692
+ )
693
+
694
+ results.append(
695
+ OperationResult(
696
+ index=start_idx + idx,
697
+ success=True,
698
+ uid=created_journal.uid,
699
+ duration_ms=(time.time() - op_start) * 1000,
700
+ )
701
+ )
702
+ except Exception as e:
703
+ results.append(
704
+ OperationResult(
705
+ index=start_idx + idx,
706
+ success=False,
707
+ error=ErrorSanitizer.sanitize_message(str(e)),
708
+ duration_ms=(time.time() - op_start) * 1000,
709
+ )
710
+ )
711
+
712
+ return results
713
+
714
+ def _rollback_created_events(self, calendar_uid: str, uids: List[str]) -> List[str]:
715
+ """Rollback created events in case of atomic operation failure.
716
+
717
+ Returns:
718
+ List of UIDs that failed to rollback (orphaned data)
719
+ """
720
+ failed_rollbacks = []
721
+ if self.event_manager:
722
+ for uid in uids:
723
+ try:
724
+ self.event_manager.delete_event(calendar_uid, uid)
725
+ logger.debug(f"Successfully rolled back event {uid}")
726
+ except Exception as e:
727
+ logger.error(f"CRITICAL: Failed to rollback event {uid}: {e}")
728
+ failed_rollbacks.append(uid)
729
+ return failed_rollbacks
730
+
731
+ def _rollback_created_tasks(self, calendar_uid: str, uids: List[str]) -> List[str]:
732
+ """Rollback created tasks in case of atomic operation failure.
733
+
734
+ Returns:
735
+ List of UIDs that failed to rollback (orphaned data)
736
+ """
737
+ failed_rollbacks = []
738
+ if self.task_manager:
739
+ for uid in uids:
740
+ try:
741
+ self.task_manager.delete_task(calendar_uid, uid)
742
+ logger.debug(f"Successfully rolled back task {uid}")
743
+ except Exception as e:
744
+ logger.error(f"CRITICAL: Failed to rollback task {uid}: {e}")
745
+ failed_rollbacks.append(uid)
746
+ return failed_rollbacks
747
+
748
+ def _rollback_created_journals(self, calendar_uid: str, uids: List[str]) -> List[str]:
749
+ """Rollback created journals in case of atomic operation failure.
750
+
751
+ Returns:
752
+ List of UIDs that failed to rollback (orphaned data)
753
+ """
754
+ failed_rollbacks = []
755
+ if self.journal_manager:
756
+ for uid in uids:
757
+ try:
758
+ self.journal_manager.delete_journal(calendar_uid, uid)
759
+ logger.debug(f"Successfully rolled back journal {uid}")
760
+ except Exception as e:
761
+ logger.error(f"CRITICAL: Failed to rollback journal {uid}: {e}")
762
+ failed_rollbacks.append(uid)
763
+ return failed_rollbacks
764
+
765
+ def bulk_delete_events(
766
+ self, calendar_uid: str, event_uids: List[str], options: BulkOptions = None
767
+ ) -> BulkResult:
768
+ """Delete multiple events efficiently."""
769
+ if options is None:
770
+ options = BulkOptions()
771
+
772
+ start_time = time.time()
773
+ result = BulkResult(total=len(event_uids), successful=0, failed=0)
774
+
775
+ if not self.event_manager:
776
+ raise ValueError("EventManager not provided to BulkOperationManager")
777
+
778
+ if options.dry_run:
779
+ for idx in range(len(event_uids)):
780
+ result.results.append(
781
+ OperationResult(
782
+ index=idx, success=True, uid=event_uids[idx], duration_ms=0.1
783
+ )
784
+ )
785
+ result.successful = len(event_uids)
786
+ else:
787
+ for batch_start in range(0, len(event_uids), options.max_parallel):
788
+ batch_end = min(batch_start + options.max_parallel, len(event_uids))
789
+ batch_uids = event_uids[batch_start:batch_end]
790
+
791
+ for idx, uid in enumerate(batch_uids):
792
+ op_start = time.time()
793
+ try:
794
+ self.event_manager.delete_event(calendar_uid, uid)
795
+ result.results.append(
796
+ OperationResult(
797
+ index=batch_start + idx,
798
+ success=True,
799
+ uid=uid,
800
+ duration_ms=(time.time() - op_start) * 1000,
801
+ )
802
+ )
803
+ result.successful += 1
804
+ except Exception as e:
805
+
806
+ result.results.append(
807
+ OperationResult(
808
+ index=batch_start + idx,
809
+ success=False,
810
+ uid=uid,
811
+ error=ErrorSanitizer.sanitize_message(str(e)),
812
+ duration_ms=(time.time() - op_start) * 1000,
813
+ )
814
+ )
815
+ result.failed += 1
816
+
817
+ if options.mode == BulkOperationMode.FAIL_FAST:
818
+ break
819
+
820
+ if options.mode == BulkOperationMode.FAIL_FAST and result.failed > 0:
821
+ break
822
+
823
+ result.duration_ms = (time.time() - start_time) * 1000
824
+ return result
825
+
826
+ def bulk_delete_tasks(
827
+ self, calendar_uid: str, task_uids: List[str], options: BulkOptions = None
828
+ ) -> BulkResult:
829
+ """Delete multiple tasks efficiently."""
830
+ if options is None:
831
+ options = BulkOptions()
832
+
833
+ start_time = time.time()
834
+ result = BulkResult(total=len(task_uids), successful=0, failed=0)
835
+
836
+ if not self.task_manager:
837
+ raise ValueError("TaskManager not provided to BulkOperationManager")
838
+
839
+ if options.dry_run:
840
+ for idx in range(len(task_uids)):
841
+ result.results.append(
842
+ OperationResult(
843
+ index=idx, success=True, uid=task_uids[idx], duration_ms=0.1
844
+ )
845
+ )
846
+ result.successful = len(task_uids)
847
+ else:
848
+ for batch_start in range(0, len(task_uids), options.max_parallel):
849
+ batch_end = min(batch_start + options.max_parallel, len(task_uids))
850
+ batch_uids = task_uids[batch_start:batch_end]
851
+
852
+ for idx, uid in enumerate(batch_uids):
853
+ op_start = time.time()
854
+ try:
855
+ self.task_manager.delete_task(calendar_uid, uid)
856
+ result.results.append(
857
+ OperationResult(
858
+ index=batch_start + idx,
859
+ success=True,
860
+ uid=uid,
861
+ duration_ms=(time.time() - op_start) * 1000,
862
+ )
863
+ )
864
+ result.successful += 1
865
+ except Exception as e:
866
+
867
+ result.results.append(
868
+ OperationResult(
869
+ index=batch_start + idx,
870
+ success=False,
871
+ uid=uid,
872
+ error=ErrorSanitizer.sanitize_message(str(e)),
873
+ duration_ms=(time.time() - op_start) * 1000,
874
+ )
875
+ )
876
+ result.failed += 1
877
+
878
+ if options.mode == BulkOperationMode.FAIL_FAST:
879
+ break
880
+
881
+ if options.mode == BulkOperationMode.FAIL_FAST and result.failed > 0:
882
+ break
883
+
884
+ result.duration_ms = (time.time() - start_time) * 1000
885
+ return result
886
+
887
+ def bulk_delete_journals(
888
+ self, calendar_uid: str, journal_uids: List[str], options: BulkOptions = None
889
+ ) -> BulkResult:
890
+ """Delete multiple journals efficiently."""
891
+ if options is None:
892
+ options = BulkOptions()
893
+
894
+ start_time = time.time()
895
+ result = BulkResult(total=len(journal_uids), successful=0, failed=0)
896
+
897
+ if not self.journal_manager:
898
+ raise ValueError("JournalManager not provided to BulkOperationManager")
899
+
900
+ if options.dry_run:
901
+ for idx in range(len(journal_uids)):
902
+ result.results.append(
903
+ OperationResult(
904
+ index=idx, success=True, uid=journal_uids[idx], duration_ms=0.1
905
+ )
906
+ )
907
+ result.successful = len(journal_uids)
908
+ else:
909
+ for batch_start in range(0, len(journal_uids), options.max_parallel):
910
+ batch_end = min(batch_start + options.max_parallel, len(journal_uids))
911
+ batch_uids = journal_uids[batch_start:batch_end]
912
+
913
+ for idx, uid in enumerate(batch_uids):
914
+ op_start = time.time()
915
+ try:
916
+ self.journal_manager.delete_journal(calendar_uid, uid)
917
+ result.results.append(
918
+ OperationResult(
919
+ index=batch_start + idx,
920
+ success=True,
921
+ uid=uid,
922
+ duration_ms=(time.time() - op_start) * 1000,
923
+ )
924
+ )
925
+ result.successful += 1
926
+ except Exception as e:
927
+
928
+ result.results.append(
929
+ OperationResult(
930
+ index=batch_start + idx,
931
+ success=False,
932
+ uid=uid,
933
+ error=ErrorSanitizer.sanitize_message(str(e)),
934
+ duration_ms=(time.time() - op_start) * 1000,
935
+ )
936
+ )
937
+ result.failed += 1
938
+
939
+ if options.mode == BulkOperationMode.FAIL_FAST:
940
+ break
941
+
942
+ if options.mode == BulkOperationMode.FAIL_FAST and result.failed > 0:
943
+ break
944
+
945
+ result.duration_ms = (time.time() - start_time) * 1000
946
+ return result