headroom-ai 0.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. headroom/__init__.py +212 -0
  2. headroom/cache/__init__.py +76 -0
  3. headroom/cache/anthropic.py +517 -0
  4. headroom/cache/base.py +342 -0
  5. headroom/cache/compression_feedback.py +613 -0
  6. headroom/cache/compression_store.py +814 -0
  7. headroom/cache/dynamic_detector.py +1026 -0
  8. headroom/cache/google.py +884 -0
  9. headroom/cache/openai.py +584 -0
  10. headroom/cache/registry.py +175 -0
  11. headroom/cache/semantic.py +451 -0
  12. headroom/ccr/__init__.py +77 -0
  13. headroom/ccr/context_tracker.py +582 -0
  14. headroom/ccr/mcp_server.py +319 -0
  15. headroom/ccr/response_handler.py +772 -0
  16. headroom/ccr/tool_injection.py +415 -0
  17. headroom/cli.py +219 -0
  18. headroom/client.py +977 -0
  19. headroom/compression/__init__.py +42 -0
  20. headroom/compression/detector.py +424 -0
  21. headroom/compression/handlers/__init__.py +22 -0
  22. headroom/compression/handlers/base.py +219 -0
  23. headroom/compression/handlers/code_handler.py +506 -0
  24. headroom/compression/handlers/json_handler.py +418 -0
  25. headroom/compression/masks.py +345 -0
  26. headroom/compression/universal.py +465 -0
  27. headroom/config.py +474 -0
  28. headroom/exceptions.py +192 -0
  29. headroom/integrations/__init__.py +159 -0
  30. headroom/integrations/agno/__init__.py +53 -0
  31. headroom/integrations/agno/hooks.py +345 -0
  32. headroom/integrations/agno/model.py +625 -0
  33. headroom/integrations/agno/providers.py +154 -0
  34. headroom/integrations/langchain/__init__.py +106 -0
  35. headroom/integrations/langchain/agents.py +326 -0
  36. headroom/integrations/langchain/chat_model.py +1002 -0
  37. headroom/integrations/langchain/langsmith.py +324 -0
  38. headroom/integrations/langchain/memory.py +319 -0
  39. headroom/integrations/langchain/providers.py +200 -0
  40. headroom/integrations/langchain/retriever.py +371 -0
  41. headroom/integrations/langchain/streaming.py +341 -0
  42. headroom/integrations/mcp/__init__.py +37 -0
  43. headroom/integrations/mcp/server.py +533 -0
  44. headroom/memory/__init__.py +37 -0
  45. headroom/memory/extractor.py +390 -0
  46. headroom/memory/fast_store.py +621 -0
  47. headroom/memory/fast_wrapper.py +311 -0
  48. headroom/memory/inline_extractor.py +229 -0
  49. headroom/memory/store.py +434 -0
  50. headroom/memory/worker.py +260 -0
  51. headroom/memory/wrapper.py +321 -0
  52. headroom/models/__init__.py +39 -0
  53. headroom/models/registry.py +687 -0
  54. headroom/parser.py +293 -0
  55. headroom/pricing/__init__.py +51 -0
  56. headroom/pricing/anthropic_prices.py +81 -0
  57. headroom/pricing/litellm_pricing.py +113 -0
  58. headroom/pricing/openai_prices.py +91 -0
  59. headroom/pricing/registry.py +188 -0
  60. headroom/providers/__init__.py +61 -0
  61. headroom/providers/anthropic.py +621 -0
  62. headroom/providers/base.py +131 -0
  63. headroom/providers/cohere.py +362 -0
  64. headroom/providers/google.py +427 -0
  65. headroom/providers/litellm.py +297 -0
  66. headroom/providers/openai.py +566 -0
  67. headroom/providers/openai_compatible.py +521 -0
  68. headroom/proxy/__init__.py +19 -0
  69. headroom/proxy/server.py +2683 -0
  70. headroom/py.typed +0 -0
  71. headroom/relevance/__init__.py +124 -0
  72. headroom/relevance/base.py +106 -0
  73. headroom/relevance/bm25.py +255 -0
  74. headroom/relevance/embedding.py +255 -0
  75. headroom/relevance/hybrid.py +259 -0
  76. headroom/reporting/__init__.py +5 -0
  77. headroom/reporting/generator.py +549 -0
  78. headroom/storage/__init__.py +41 -0
  79. headroom/storage/base.py +125 -0
  80. headroom/storage/jsonl.py +220 -0
  81. headroom/storage/sqlite.py +289 -0
  82. headroom/telemetry/__init__.py +91 -0
  83. headroom/telemetry/collector.py +764 -0
  84. headroom/telemetry/models.py +880 -0
  85. headroom/telemetry/toin.py +1579 -0
  86. headroom/tokenizer.py +80 -0
  87. headroom/tokenizers/__init__.py +75 -0
  88. headroom/tokenizers/base.py +210 -0
  89. headroom/tokenizers/estimator.py +198 -0
  90. headroom/tokenizers/huggingface.py +317 -0
  91. headroom/tokenizers/mistral.py +245 -0
  92. headroom/tokenizers/registry.py +398 -0
  93. headroom/tokenizers/tiktoken_counter.py +248 -0
  94. headroom/transforms/__init__.py +106 -0
  95. headroom/transforms/base.py +57 -0
  96. headroom/transforms/cache_aligner.py +357 -0
  97. headroom/transforms/code_compressor.py +1313 -0
  98. headroom/transforms/content_detector.py +335 -0
  99. headroom/transforms/content_router.py +1158 -0
  100. headroom/transforms/llmlingua_compressor.py +638 -0
  101. headroom/transforms/log_compressor.py +529 -0
  102. headroom/transforms/pipeline.py +297 -0
  103. headroom/transforms/rolling_window.py +350 -0
  104. headroom/transforms/search_compressor.py +365 -0
  105. headroom/transforms/smart_crusher.py +2682 -0
  106. headroom/transforms/text_compressor.py +259 -0
  107. headroom/transforms/tool_crusher.py +338 -0
  108. headroom/utils.py +215 -0
  109. headroom_ai-0.2.13.dist-info/METADATA +315 -0
  110. headroom_ai-0.2.13.dist-info/RECORD +114 -0
  111. headroom_ai-0.2.13.dist-info/WHEEL +4 -0
  112. headroom_ai-0.2.13.dist-info/entry_points.txt +2 -0
  113. headroom_ai-0.2.13.dist-info/licenses/LICENSE +190 -0
  114. headroom_ai-0.2.13.dist-info/licenses/NOTICE +43 -0
@@ -0,0 +1,582 @@
1
+ """Multi-turn context tracking for CCR (Compress-Cache-Retrieve).
2
+
3
+ This module tracks compressed content across conversation turns and
4
+ provides intelligent context expansion based on query relevance.
5
+
6
+ Key features:
7
+ 1. Track all compression hashes across the conversation
8
+ 2. Analyze new queries to detect if they need expanded context
9
+ 3. Proactively expand relevant compressed content before LLM responds
10
+ 4. Prevent "context amnesia" where earlier compressed data is forgotten
11
+
12
+ Example:
13
+ Turn 1: Search returns 100 files → compressed to 10 (hash=abc123)
14
+ Turn 5: User asks "What about auth middleware?"
15
+
16
+ Without tracking: LLM doesn't know auth_middleware.py exists
17
+ With tracking: Tracker detects "auth middleware" might be in abc123,
18
+ proactively expands it, LLM gets the full context
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import json
24
+ import logging
25
+ import re
26
+ import time
27
+ from dataclasses import dataclass
28
+ from typing import Any
29
+
30
+ from ..cache.compression_store import get_compression_store
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class CompressedContext:
37
+ """Represents a piece of compressed context from the conversation."""
38
+
39
+ hash_key: str
40
+ turn_number: int
41
+ timestamp: float
42
+ tool_name: str | None
43
+ original_item_count: int
44
+ compressed_item_count: int
45
+ query_context: str # The query/context when compression happened
46
+ sample_content: str # Preview of what was compressed (for relevance matching)
47
+
48
+
49
+ @dataclass
50
+ class ExpansionRecommendation:
51
+ """Recommendation to expand compressed context."""
52
+
53
+ hash_key: str
54
+ reason: str
55
+ relevance_score: float
56
+ expand_full: bool = True # True = expand all, False = search only
57
+ search_query: str | None = None
58
+
59
+
60
+ @dataclass
61
+ class ContextTrackerConfig:
62
+ """Configuration for context tracking."""
63
+
64
+ # Whether tracking is enabled
65
+ enabled: bool = True
66
+
67
+ # Maximum contexts to track (LRU eviction)
68
+ max_tracked_contexts: int = 100
69
+
70
+ # Relevance threshold for recommending expansion (0-1)
71
+ relevance_threshold: float = 0.3
72
+
73
+ # Maximum age for contexts (seconds) - older contexts less likely to expand
74
+ max_context_age_seconds: float = 300.0 # 5 minutes
75
+
76
+ # Whether to proactively expand based on query analysis
77
+ proactive_expansion: bool = True
78
+
79
+ # Maximum items to proactively expand per turn
80
+ max_proactive_expansions: int = 2
81
+
82
+
83
+ class ContextTracker:
84
+ """Tracks compressed contexts across conversation turns.
85
+
86
+ This tracker maintains awareness of what has been compressed
87
+ and can recommend expansions when new queries might need that data.
88
+
89
+ Usage:
90
+ tracker = ContextTracker()
91
+
92
+ # Track compression events
93
+ tracker.track_compression(
94
+ hash_key="abc123",
95
+ turn_number=1,
96
+ tool_name="Bash",
97
+ original_count=100,
98
+ compressed_count=10,
99
+ query_context="find all python files",
100
+ sample_content='["src/main.py", "src/auth.py", ...]',
101
+ )
102
+
103
+ # On new user message, check for expansion needs
104
+ recommendations = tracker.analyze_query(
105
+ query="What about the authentication code?",
106
+ current_turn=5,
107
+ )
108
+
109
+ # recommendations might suggest expanding abc123 because
110
+ # "authentication" matches "auth.py" in the sample content
111
+ """
112
+
113
+ def __init__(self, config: ContextTrackerConfig | None = None):
114
+ self.config = config or ContextTrackerConfig()
115
+ self._contexts: dict[str, CompressedContext] = {}
116
+ self._turn_order: list[str] = [] # For LRU
117
+ self._current_turn: int = 0
118
+
119
+ def track_compression(
120
+ self,
121
+ hash_key: str,
122
+ turn_number: int,
123
+ tool_name: str | None,
124
+ original_count: int,
125
+ compressed_count: int,
126
+ query_context: str = "",
127
+ sample_content: str = "",
128
+ ) -> None:
129
+ """Track a compression event.
130
+
131
+ Args:
132
+ hash_key: The CCR hash for this compression.
133
+ turn_number: The conversation turn number.
134
+ tool_name: Name of the tool whose output was compressed.
135
+ original_count: Original item count.
136
+ compressed_count: Compressed item count.
137
+ query_context: The user query when compression happened.
138
+ sample_content: Sample of the content for relevance matching.
139
+ """
140
+ if not self.config.enabled:
141
+ return
142
+
143
+ context = CompressedContext(
144
+ hash_key=hash_key,
145
+ turn_number=turn_number,
146
+ timestamp=time.time(),
147
+ tool_name=tool_name,
148
+ original_item_count=original_count,
149
+ compressed_item_count=compressed_count,
150
+ query_context=query_context,
151
+ sample_content=sample_content[:2000], # Limit sample size
152
+ )
153
+
154
+ # Add or update context
155
+ if hash_key in self._contexts:
156
+ self._turn_order.remove(hash_key)
157
+ self._contexts[hash_key] = context
158
+ self._turn_order.append(hash_key)
159
+
160
+ # LRU eviction
161
+ while len(self._contexts) > self.config.max_tracked_contexts:
162
+ oldest = self._turn_order.pop(0)
163
+ del self._contexts[oldest]
164
+
165
+ self._current_turn = max(self._current_turn, turn_number)
166
+
167
+ logger.debug(
168
+ f"CCR Tracker: Tracked compression {hash_key} "
169
+ f"({original_count} -> {compressed_count} items)"
170
+ )
171
+
172
+ def analyze_query(
173
+ self,
174
+ query: str,
175
+ current_turn: int | None = None,
176
+ ) -> list[ExpansionRecommendation]:
177
+ """Analyze a query to find relevant compressed contexts.
178
+
179
+ Args:
180
+ query: The user's query/message.
181
+ current_turn: Current turn number (for age calculation).
182
+
183
+ Returns:
184
+ List of expansion recommendations, sorted by relevance.
185
+ """
186
+ if not self.config.enabled or not self.config.proactive_expansion:
187
+ return []
188
+
189
+ if current_turn is not None:
190
+ self._current_turn = current_turn
191
+
192
+ recommendations: list[ExpansionRecommendation] = []
193
+ now = time.time()
194
+
195
+ for hash_key, context in self._contexts.items():
196
+ # Check age
197
+ age = now - context.timestamp
198
+ if age > self.config.max_context_age_seconds:
199
+ continue
200
+
201
+ # Calculate relevance
202
+ relevance = self._calculate_relevance(query, context)
203
+
204
+ # Age discount: older contexts get lower scores
205
+ age_factor = 1.0 - (age / self.config.max_context_age_seconds) * 0.5
206
+ relevance *= age_factor
207
+
208
+ if relevance >= self.config.relevance_threshold:
209
+ # Determine if full expansion or search
210
+ expand_full, search_query = self._determine_expansion_type(
211
+ query, context, relevance
212
+ )
213
+
214
+ recommendations.append(
215
+ ExpansionRecommendation(
216
+ hash_key=hash_key,
217
+ reason=self._generate_reason(query, context, relevance),
218
+ relevance_score=relevance,
219
+ expand_full=expand_full,
220
+ search_query=search_query,
221
+ )
222
+ )
223
+
224
+ # Sort by relevance, limit count
225
+ recommendations.sort(key=lambda r: r.relevance_score, reverse=True)
226
+ return recommendations[: self.config.max_proactive_expansions]
227
+
228
+ def _calculate_relevance(
229
+ self,
230
+ query: str,
231
+ context: CompressedContext,
232
+ ) -> float:
233
+ """Calculate relevance score between query and compressed context.
234
+
235
+ Uses simple but effective heuristics:
236
+ 1. Keyword overlap with sample content
237
+ 2. Keyword overlap with original query context
238
+ 3. Tool name relevance
239
+ """
240
+ query_lower = query.lower()
241
+ query_words = set(self._extract_keywords(query_lower))
242
+
243
+ if not query_words:
244
+ return 0.0
245
+
246
+ score = 0.0
247
+
248
+ # Check sample content overlap
249
+ sample_lower = context.sample_content.lower()
250
+ sample_words = set(self._extract_keywords(sample_lower))
251
+
252
+ if sample_words:
253
+ overlap = query_words & sample_words
254
+ score += len(overlap) / len(query_words) * 0.5
255
+
256
+ # Bonus for exact substring matches
257
+ for word in query_words:
258
+ if len(word) >= 4 and word in sample_lower:
259
+ score += 0.2
260
+
261
+ # Check original query context overlap
262
+ if context.query_context:
263
+ context_lower = context.query_context.lower()
264
+ context_words = set(self._extract_keywords(context_lower))
265
+
266
+ if context_words:
267
+ overlap = query_words & context_words
268
+ score += len(overlap) / len(query_words) * 0.3
269
+
270
+ # Tool name relevance
271
+ if context.tool_name:
272
+ tool_lower = context.tool_name.lower()
273
+ # File operations more likely to need expansion
274
+ if any(w in tool_lower for w in ["find", "glob", "search", "grep", "ls"]):
275
+ if any(w in query_lower for w in ["file", "where", "find", "show", "list"]):
276
+ score += 0.1
277
+
278
+ return min(score, 1.0)
279
+
280
+ def _extract_keywords(self, text: str) -> list[str]:
281
+ """Extract meaningful keywords from text."""
282
+ # Remove common punctuation, split into words
283
+ words = re.findall(r"\b[a-z][a-z0-9_.-]*[a-z0-9]\b|\b[a-z]{2,}\b", text)
284
+
285
+ # Filter stop words and very short words
286
+ stop_words = {
287
+ "the",
288
+ "a",
289
+ "an",
290
+ "is",
291
+ "are",
292
+ "was",
293
+ "were",
294
+ "be",
295
+ "been",
296
+ "being",
297
+ "have",
298
+ "has",
299
+ "had",
300
+ "do",
301
+ "does",
302
+ "did",
303
+ "will",
304
+ "would",
305
+ "could",
306
+ "should",
307
+ "may",
308
+ "might",
309
+ "must",
310
+ "shall",
311
+ "can",
312
+ "need",
313
+ "dare",
314
+ "ought",
315
+ "used",
316
+ "to",
317
+ "of",
318
+ "in",
319
+ "for",
320
+ "on",
321
+ "with",
322
+ "at",
323
+ "by",
324
+ "from",
325
+ "as",
326
+ "into",
327
+ "through",
328
+ "during",
329
+ "before",
330
+ "after",
331
+ "above",
332
+ "below",
333
+ "between",
334
+ "under",
335
+ "again",
336
+ "further",
337
+ "then",
338
+ "once",
339
+ "here",
340
+ "there",
341
+ "when",
342
+ "where",
343
+ "why",
344
+ "how",
345
+ "all",
346
+ "each",
347
+ "few",
348
+ "more",
349
+ "most",
350
+ "other",
351
+ "some",
352
+ "such",
353
+ "no",
354
+ "nor",
355
+ "not",
356
+ "only",
357
+ "own",
358
+ "same",
359
+ "so",
360
+ "than",
361
+ "too",
362
+ "very",
363
+ "just",
364
+ "and",
365
+ "but",
366
+ "if",
367
+ "or",
368
+ "because",
369
+ "until",
370
+ "while",
371
+ "this",
372
+ "that",
373
+ "these",
374
+ "those",
375
+ "what",
376
+ "which",
377
+ "who",
378
+ "whom",
379
+ "it",
380
+ "its",
381
+ "me",
382
+ "my",
383
+ "i",
384
+ "you",
385
+ }
386
+
387
+ return [w for w in words if w not in stop_words and len(w) >= 2]
388
+
389
+ def _determine_expansion_type(
390
+ self,
391
+ query: str,
392
+ context: CompressedContext,
393
+ relevance: float,
394
+ ) -> tuple[bool, str | None]:
395
+ """Determine whether to do full expansion or search.
396
+
397
+ Returns:
398
+ Tuple of (expand_full, search_query)
399
+ """
400
+ # High relevance + small original count = full expansion
401
+ if relevance > 0.6 or context.original_item_count <= 50:
402
+ return True, None
403
+
404
+ # Extract specific search terms from query
405
+ keywords = self._extract_keywords(query.lower())
406
+
407
+ # Filter to most specific keywords (longer, less common)
408
+ specific_keywords = [
409
+ k
410
+ for k in keywords
411
+ if len(k) >= 4 and k not in {"file", "code", "show", "find", "list", "what"}
412
+ ]
413
+
414
+ if specific_keywords:
415
+ # Use top keywords as search query
416
+ search_query = " ".join(specific_keywords[:3])
417
+ return False, search_query
418
+
419
+ # Default to full expansion if we can't form a good search
420
+ return True, None
421
+
422
+ def _generate_reason(
423
+ self,
424
+ query: str,
425
+ context: CompressedContext,
426
+ relevance: float,
427
+ ) -> str:
428
+ """Generate human-readable reason for expansion recommendation."""
429
+ parts = []
430
+
431
+ if context.tool_name:
432
+ parts.append(f"from {context.tool_name}")
433
+
434
+ parts.append(
435
+ f"{context.original_item_count} items compressed in turn {context.turn_number}"
436
+ )
437
+
438
+ if relevance > 0.5:
439
+ parts.append("high relevance to current query")
440
+ else:
441
+ parts.append("possible relevance to current query")
442
+
443
+ return ", ".join(parts)
444
+
445
+ def execute_expansions(
446
+ self,
447
+ recommendations: list[ExpansionRecommendation],
448
+ ) -> list[dict[str, Any]]:
449
+ """Execute expansion recommendations and return the expanded content.
450
+
451
+ Args:
452
+ recommendations: List of expansion recommendations.
453
+
454
+ Returns:
455
+ List of expanded content dicts with hash, content, and metadata.
456
+ """
457
+ store = get_compression_store()
458
+ results = []
459
+
460
+ for rec in recommendations:
461
+ try:
462
+ if rec.expand_full:
463
+ entry = store.retrieve(rec.hash_key)
464
+ if entry:
465
+ results.append(
466
+ {
467
+ "hash": rec.hash_key,
468
+ "type": "full",
469
+ "content": entry.original_content,
470
+ "item_count": entry.original_item_count,
471
+ "reason": rec.reason,
472
+ }
473
+ )
474
+ logger.info(
475
+ f"CCR Tracker: Proactively expanded {rec.hash_key} "
476
+ f"({entry.original_item_count} items)"
477
+ )
478
+ else:
479
+ search_results = store.search(rec.hash_key, rec.search_query or "")
480
+ if search_results:
481
+ results.append(
482
+ {
483
+ "hash": rec.hash_key,
484
+ "type": "search",
485
+ "query": rec.search_query,
486
+ "content": search_results,
487
+ "item_count": len(search_results),
488
+ "reason": rec.reason,
489
+ }
490
+ )
491
+ logger.info(
492
+ f"CCR Tracker: Proactive search in {rec.hash_key} "
493
+ f"for '{rec.search_query}' ({len(search_results)} results)"
494
+ )
495
+ except Exception as e:
496
+ logger.warning(f"CCR Tracker: Failed to expand {rec.hash_key}: {e}")
497
+
498
+ return results
499
+
500
+ def format_expansions_for_context(
501
+ self,
502
+ expansions: list[dict[str, Any]],
503
+ ) -> str:
504
+ """Format expansions as additional context for the LLM.
505
+
506
+ Args:
507
+ expansions: Results from execute_expansions.
508
+
509
+ Returns:
510
+ Formatted string to add to context.
511
+ """
512
+ if not expansions:
513
+ return ""
514
+
515
+ parts = ["[Proactive Context Expansion - relevant to your query]"]
516
+
517
+ for exp in expansions:
518
+ if exp["type"] == "full":
519
+ parts.append(f"\n--- Expanded from earlier ({exp['reason']}) ---")
520
+ parts.append(exp["content"])
521
+ else:
522
+ parts.append(f"\n--- Search results for '{exp['query']}' ({exp['reason']}) ---")
523
+ if isinstance(exp["content"], list):
524
+ parts.append(json.dumps(exp["content"], indent=2))
525
+ else:
526
+ parts.append(str(exp["content"]))
527
+
528
+ parts.append("\n[End Proactive Expansion]")
529
+
530
+ return "\n".join(parts)
531
+
532
+ def get_tracked_hashes(self) -> list[str]:
533
+ """Get list of currently tracked hashes."""
534
+ return list(self._contexts.keys())
535
+
536
+ def get_stats(self) -> dict[str, Any]:
537
+ """Get tracker statistics."""
538
+ return {
539
+ "tracked_contexts": len(self._contexts),
540
+ "current_turn": self._current_turn,
541
+ "config": {
542
+ "enabled": self.config.enabled,
543
+ "max_contexts": self.config.max_tracked_contexts,
544
+ "relevance_threshold": self.config.relevance_threshold,
545
+ "proactive_expansion": self.config.proactive_expansion,
546
+ },
547
+ "contexts": [
548
+ {
549
+ "hash": ctx.hash_key,
550
+ "turn": ctx.turn_number,
551
+ "tool": ctx.tool_name,
552
+ "items": f"{ctx.compressed_item_count}/{ctx.original_item_count}",
553
+ }
554
+ for ctx in self._contexts.values()
555
+ ],
556
+ }
557
+
558
+ def clear(self) -> None:
559
+ """Clear all tracked contexts."""
560
+ self._contexts.clear()
561
+ self._turn_order.clear()
562
+ self._current_turn = 0
563
+
564
+
565
+ # Global instance (per-session)
566
+ _context_tracker: ContextTracker | None = None
567
+
568
+
569
+ def get_context_tracker() -> ContextTracker:
570
+ """Get the global context tracker."""
571
+ global _context_tracker
572
+ if _context_tracker is None:
573
+ _context_tracker = ContextTracker()
574
+ return _context_tracker
575
+
576
+
577
+ def reset_context_tracker() -> None:
578
+ """Reset the global context tracker."""
579
+ global _context_tracker
580
+ if _context_tracker is not None:
581
+ _context_tracker.clear()
582
+ _context_tracker = None