dasein-core 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dasein/api.py +4902 -4895
- dasein/capture.py +19 -19
- dasein/events.py +1 -1
- dasein/microturn.py +18 -6
- dasein/pipecleaner.py +17 -17
- dasein/services/post_run_client.py +5 -4
- dasein/wrappers.py +25 -9
- {dasein_core-0.2.18.dist-info → dasein_core-0.2.20.dist-info}/METADATA +1 -1
- {dasein_core-0.2.18.dist-info → dasein_core-0.2.20.dist-info}/RECORD +12 -12
- {dasein_core-0.2.18.dist-info → dasein_core-0.2.20.dist-info}/WHEEL +0 -0
- {dasein_core-0.2.18.dist-info → dasein_core-0.2.20.dist-info}/licenses/LICENSE +0 -0
- {dasein_core-0.2.18.dist-info → dasein_core-0.2.20.dist-info}/top_level.txt +0 -0
dasein/capture.py
CHANGED
@@ -445,27 +445,27 @@ class DaseinCallbackHandler(BaseCallbackHandler):
|
|
445
445
|
Called from on_llm_start when tools are detected for a node.
|
446
446
|
"""
|
447
447
|
try:
|
448
|
-
|
449
|
-
|
450
|
-
|
448
|
+
self._vprint(f"\n{'='*70}")
|
449
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] 🔧 Patching tools for node: {node_name}")
|
450
|
+
self._vprint(f"{'='*70}")
|
451
451
|
|
452
452
|
from .wrappers import patch_tool_instance
|
453
453
|
|
454
454
|
# Track patched tools to avoid double-patching
|
455
455
|
if not hasattr(self, '_patched_tools'):
|
456
456
|
self._patched_tools = set()
|
457
|
-
|
457
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] Initialized patched tools tracker")
|
458
458
|
|
459
459
|
# Find the actual tool objects for this node in the agent graph
|
460
|
-
|
460
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] Searching for tool objects in node '{node_name}'...")
|
461
461
|
tool_objects = self._find_tool_objects_for_node(node_name)
|
462
462
|
|
463
463
|
if not tool_objects:
|
464
|
-
|
465
|
-
|
464
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] ⚠️ No tool objects found for node '{node_name}'")
|
465
|
+
self._vprint(f"{'='*70}\n")
|
466
466
|
return
|
467
467
|
|
468
|
-
|
468
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] ✓ Found {len(tool_objects)} tool object(s)")
|
469
469
|
|
470
470
|
# Patch each tool
|
471
471
|
patched_count = 0
|
@@ -474,28 +474,28 @@ class DaseinCallbackHandler(BaseCallbackHandler):
|
|
474
474
|
tool_type = type(tool_obj).__name__
|
475
475
|
tool_id = f"{node_name}:{tool_name}"
|
476
476
|
|
477
|
-
|
477
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] [{i}/{len(tool_objects)}] Tool: '{tool_name}' (type: {tool_type})")
|
478
478
|
|
479
479
|
if tool_id in self._patched_tools:
|
480
|
-
|
480
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] ⏭️ Already patched, skipping")
|
481
481
|
else:
|
482
|
-
|
482
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] 🔨 Patching...")
|
483
483
|
if patch_tool_instance(tool_obj, self):
|
484
484
|
self._patched_tools.add(tool_id)
|
485
485
|
patched_count += 1
|
486
|
-
|
486
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] ✅ Successfully patched '{tool_name}'")
|
487
487
|
else:
|
488
|
-
|
488
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] ❌ Failed to patch '{tool_name}'")
|
489
489
|
|
490
|
-
|
491
|
-
|
492
|
-
|
490
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] Summary: Patched {patched_count}/{len(tool_objects)} tools")
|
491
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] Total tools patched so far: {len(self._patched_tools)}")
|
492
|
+
self._vprint(f"{'='*70}\n")
|
493
493
|
|
494
494
|
except Exception as e:
|
495
|
-
|
495
|
+
self._vprint(f"[DASEIN][TOOL_PATCH] ❌ ERROR patching tools for node {node_name}: {e}")
|
496
496
|
import traceback
|
497
497
|
traceback.print_exc()
|
498
|
-
|
498
|
+
self._vprint(f"{'='*70}\n")
|
499
499
|
|
500
500
|
def _search_node_recursively(self, node_name: str, nodes: dict, depth: int = 0) -> list:
|
501
501
|
"""Recursively search for a node by name in graphs and subgraphs."""
|
@@ -1823,7 +1823,7 @@ Precedence: AVOID/SKIP > FIX > PREFER > HINT. On conflict, the higher rule ALWAY
|
|
1823
1823
|
|
1824
1824
|
{state_context}Checklist (non-negotiable):
|
1825
1825
|
- AVOID: no banned targets under ANY condition.
|
1826
|
-
- SKIP: bypass skipped steps/tools; NEVER retry them.
|
1826
|
+
- SKIP: bypass skipped steps/tools; NEVER retry them. All SKIP rules below provide COMPLETE and SUFFICIENT information to proceed safely. Calling skipped tools is REDUNDANT—you already have everything needed.
|
1827
1827
|
- FIX: all required params/settings MUST be included.
|
1828
1828
|
- PREFER: when multiple compliant options exist, choose the preferred—NO exceptions.
|
1829
1829
|
- Recovery: if a banned/skipped item already failed, IMMEDIATELY switch to a compliant alternative.
|
dasein/events.py
CHANGED
@@ -159,7 +159,7 @@ class EventStore:
|
|
159
159
|
|
160
160
|
def _safe_log_warning(self, msg: str) -> None:
|
161
161
|
"""Log warning message safely."""
|
162
|
-
|
162
|
+
# Avoid direct prints; rely on logger
|
163
163
|
logger.warning(f"[DASEIN][EVENTS] {msg}")
|
164
164
|
|
165
165
|
# Add methods for each entity type
|
dasein/microturn.py
CHANGED
@@ -411,14 +411,20 @@ async def run_microturn_enforcement(
|
|
411
411
|
if sig in seen_in_response or sig in patch_depth.seen_tool_signatures:
|
412
412
|
# Duplicate detected
|
413
413
|
duplicates.append((idx, sig))
|
414
|
-
|
414
|
+
# Debug duplicate detection only if env enables it
|
415
|
+
import os
|
416
|
+
if os.getenv("DASEIN_DEBUG_MICROTURN", "0") == "1":
|
417
|
+
print(f"[DASEIN][MICROTURN] 🔄 Duplicate detected: {sig}")
|
415
418
|
else:
|
416
419
|
# First occurrence
|
417
420
|
seen_in_response.add(sig)
|
418
421
|
|
419
422
|
# DETERMINISTIC DUPLICATE BLOCKING (always on)
|
420
423
|
if duplicates and msg:
|
421
|
-
|
424
|
+
# Keep this high-signal log behind env flag
|
425
|
+
import os
|
426
|
+
if os.getenv("DASEIN_DEBUG_MICROTURN", "0") == "1":
|
427
|
+
print(f"[DASEIN][MICROTURN] Blocking {len(duplicates)} duplicate call(s)")
|
422
428
|
blocked_count, blocked_calls = modify_tool_calls_with_deadletter(
|
423
429
|
msg,
|
424
430
|
[], # No LLM-based compliant names, just mark duplicates
|
@@ -429,7 +435,8 @@ async def run_microturn_enforcement(
|
|
429
435
|
|
430
436
|
if blocked_count > 0:
|
431
437
|
update_callback_state(callback_handler, blocked_calls)
|
432
|
-
|
438
|
+
if os.getenv("DASEIN_DEBUG_MICROTURN", "0") == "1":
|
439
|
+
print(f"[DASEIN][MICROTURN] ✅ Blocked {blocked_count} duplicate(s)")
|
433
440
|
return True
|
434
441
|
|
435
442
|
# LLM-BASED MICROTURN (behind flag)
|
@@ -461,15 +468,20 @@ async def run_microturn_enforcement(
|
|
461
468
|
|
462
469
|
if blocked_count > 0:
|
463
470
|
update_callback_state(callback_handler, blocked_calls)
|
464
|
-
|
471
|
+
if os.getenv("DASEIN_DEBUG_MICROTURN", "0") == "1":
|
472
|
+
print(f"[DASEIN][MICROTURN] ✅ LLM blocked {blocked_count} call(s): {blocked_calls}")
|
465
473
|
return True
|
466
474
|
|
467
475
|
# No enforcement applied
|
468
476
|
return False
|
469
477
|
|
470
478
|
except Exception as e:
|
471
|
-
print
|
479
|
+
# Only print on debug; otherwise fail silently
|
480
|
+
import os
|
481
|
+
if os.getenv("DASEIN_DEBUG_MICROTURN", "0") == "1":
|
482
|
+
print(f"[DASEIN][MICROTURN] ⚠️ Error during enforcement: {e}")
|
472
483
|
import traceback
|
473
|
-
|
484
|
+
if os.getenv("DASEIN_DEBUG_MICROTURN", "0") == "1":
|
485
|
+
traceback.print_exc()
|
474
486
|
return False
|
475
487
|
|
dasein/pipecleaner.py
CHANGED
@@ -59,15 +59,15 @@ def _get_embedding_model():
|
|
59
59
|
if _embedding_model is None:
|
60
60
|
try:
|
61
61
|
from sentence_transformers import SentenceTransformer
|
62
|
-
|
62
|
+
_vprint("[PIPECLEANER] Loading embedding model: all-MiniLM-L6-v2 (384-dim, ~80MB)...", True)
|
63
63
|
# Force CPU device to avoid meta tensor issues
|
64
64
|
_embedding_model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
|
65
|
-
|
65
|
+
_vprint("[PIPECLEANER] ✅ Embedding model loaded successfully (CPU)", True)
|
66
66
|
except ImportError:
|
67
|
-
|
67
|
+
_vprint("[PIPECLEANER] ⚠️ sentence-transformers not installed. Install: pip install sentence-transformers", True)
|
68
68
|
raise
|
69
69
|
except Exception as e:
|
70
|
-
|
70
|
+
_vprint(f"[PIPECLEANER] ⚠️ Failed to load embedding model: {e}", True)
|
71
71
|
raise
|
72
72
|
|
73
73
|
return _embedding_model
|
@@ -79,14 +79,14 @@ def _get_spacy_model():
|
|
79
79
|
if _spacy_nlp is None:
|
80
80
|
try:
|
81
81
|
import spacy
|
82
|
-
|
82
|
+
_vprint("[PIPECLEANER] Loading spaCy model: en_core_web_sm...", True)
|
83
83
|
_spacy_nlp = spacy.load("en_core_web_sm")
|
84
|
-
|
84
|
+
_vprint("[PIPECLEANER] ✅ spaCy model loaded successfully", True)
|
85
85
|
except ImportError:
|
86
|
-
|
86
|
+
_vprint("[PIPECLEANER] ⚠️ spaCy not installed. Using regex fallback for entities.", True)
|
87
87
|
_spacy_nlp = "fallback"
|
88
88
|
except OSError:
|
89
|
-
|
89
|
+
_vprint("[PIPECLEANER] ⚠️ spaCy model not found. Using regex fallback for entities.", True)
|
90
90
|
_spacy_nlp = "fallback"
|
91
91
|
return _spacy_nlp
|
92
92
|
|
@@ -394,7 +394,7 @@ class RunScopedCorpus:
|
|
394
394
|
# First prompt in batch, start timer at 5s
|
395
395
|
self.batch_start_time = arrival_time
|
396
396
|
self.barrier_duration = 5.0
|
397
|
-
|
397
|
+
_vprint(f"[CORPUS] ⏱️ Starting batch barrier: 5.0s (first prompt, min wait)", self.verbose)
|
398
398
|
self.batch_timer = threading.Timer(self.barrier_duration, self._process_batch)
|
399
399
|
self.batch_timer.start()
|
400
400
|
else:
|
@@ -421,7 +421,7 @@ class RunScopedCorpus:
|
|
421
421
|
|
422
422
|
if timed_out:
|
423
423
|
# Fail open: return original text if batch processing hangs
|
424
|
-
|
424
|
+
_vprint(f"[CORPUS] ⚠️ Timeout waiting for batch processing, returning original prompt", self.verbose)
|
425
425
|
self.telemetry.chars_out += len(prompt_text)
|
426
426
|
return prompt_text
|
427
427
|
|
@@ -430,7 +430,7 @@ class RunScopedCorpus:
|
|
430
430
|
|
431
431
|
if not deduplicated_text:
|
432
432
|
# Safety: if result is missing, return original
|
433
|
-
|
433
|
+
_vprint(f"[CORPUS] ⚠️ Missing deduplicated result for prompt {prompt_id[:8]}, returning original", self.verbose)
|
434
434
|
self.telemetry.chars_out += len(prompt_text)
|
435
435
|
return prompt_text
|
436
436
|
|
@@ -456,7 +456,7 @@ class RunScopedCorpus:
|
|
456
456
|
self.telemetry.batches_processed += 1
|
457
457
|
|
458
458
|
# Always show batch summary (key metric)
|
459
|
-
|
459
|
+
_vprint(f"\n[CORPUS] 🔄 Processing batch: {len(batch_prompts)} prompts, barrier={batch_duration_ms:.0f}ms", self.verbose)
|
460
460
|
|
461
461
|
# Step 0: Compute embeddings for NEW prompts in this batch (BATCHED operation!)
|
462
462
|
# This is done ONCE for the entire batch, allowing parallel arrivals
|
@@ -563,7 +563,7 @@ class RunScopedCorpus:
|
|
563
563
|
non_isolates = [s for s in all_sentences if degree_map[s.id] > 0]
|
564
564
|
pct_isolates = len(isolates_before) / len(all_sentences) * 100 if all_sentences else 0
|
565
565
|
avg_degree_non_iso = sum(degree_map[s.id] for s in non_isolates) / len(non_isolates) if non_isolates else 0
|
566
|
-
|
566
|
+
_vprint(f"[CORPUS] 📊 Graph: isolates={pct_isolates:.1f}% (expect <20%), non-isolate avg degree={avg_degree_non_iso:.1f} (expect >3)", self.verbose)
|
567
567
|
|
568
568
|
# Step 3: Run greedy maximum-independent-set selection
|
569
569
|
# Start with LOCKED sentences (from previous batches, already emitted)
|
@@ -571,7 +571,7 @@ class RunScopedCorpus:
|
|
571
571
|
selected_sentences = [s for s in all_sentences if s.id in locked_sentences]
|
572
572
|
selected_ids = locked_sentences.copy()
|
573
573
|
|
574
|
-
|
574
|
+
_vprint(f"[CORPUS] 🔒 Pre-seeded MIS with {len(locked_sentences)} locked sentences from previous batches", self.verbose)
|
575
575
|
|
576
576
|
# Now run MIS on NEW sentences only (exclude locked)
|
577
577
|
new_sentences = [s for s in all_sentences if s.id not in locked_sentences]
|
@@ -748,7 +748,7 @@ class RunScopedCorpus:
|
|
748
748
|
# Update telemetry
|
749
749
|
self.telemetry.entity_coverage_avg = final_node_coverage * 100 # Now tracking NODE coverage
|
750
750
|
# Always show final batch summary (key metric)
|
751
|
-
|
751
|
+
_vprint(f"[CORPUS] ✅ Batch complete: Node coverage {final_node_coverage*100:.1f}%", self.verbose)
|
752
752
|
|
753
753
|
# Update telemetry
|
754
754
|
if self.telemetry.barrier_times:
|
@@ -831,9 +831,9 @@ def cleanup_corpus(run_id: str):
|
|
831
831
|
with _corpus_lock:
|
832
832
|
if run_id in _run_corpuses:
|
833
833
|
corpus = _run_corpuses[run_id]
|
834
|
-
|
834
|
+
_vprint(corpus.get_telemetry_summary(), getattr(corpus, 'verbose', False))
|
835
835
|
del _run_corpuses[run_id]
|
836
|
-
|
836
|
+
_vprint(f"[CORPUS] 🗑️ Cleaned up corpus for run_id={run_id[:8]}", getattr(corpus, 'verbose', False))
|
837
837
|
|
838
838
|
|
839
839
|
# ============================================================================
|
@@ -99,10 +99,11 @@ class PostRunClient:
|
|
99
99
|
"performance_tracking_id": request.performance_tracking_id,
|
100
100
|
"skip_synthesis": request.skip_synthesis,
|
101
101
|
"wait_for_synthesis": request.wait_for_synthesis,
|
102
|
-
"
|
103
|
-
"
|
104
|
-
"
|
105
|
-
"
|
102
|
+
"step_id": request.step_id,
|
103
|
+
"tools_metadata": request.tools_metadata or [],
|
104
|
+
"graph_metadata": request.graph_metadata or {},
|
105
|
+
"rules_applied": request.rules_applied or [],
|
106
|
+
"context_hash": request.context_hash,
|
106
107
|
}
|
107
108
|
|
108
109
|
logger.info(f"Synthesizing rules for run: {request.run_id}")
|
dasein/wrappers.py
CHANGED
@@ -31,7 +31,9 @@ def wrap_tools_for_pipecleaner(agent: Any, callback_handler: Any, verbose: bool
|
|
31
31
|
"""
|
32
32
|
DISABLED: This function has been disabled to avoid interfering with tool execution.
|
33
33
|
"""
|
34
|
-
|
34
|
+
# Keep this message silent unless verbose is explicitly enabled
|
35
|
+
if verbose:
|
36
|
+
print(f"[WRAPPERS DISABLED] wrap_tools_for_pipecleaner called but DISABLED - no patching will occur")
|
35
37
|
return False # Return False to indicate nothing was done
|
36
38
|
|
37
39
|
# ORIGINAL CODE BELOW - COMPLETELY DISABLED
|
@@ -218,7 +220,10 @@ def _extract_text_from_search_result(result: Any, tool_name: str) -> str:
|
|
218
220
|
|
219
221
|
# Tavily format: list of result dicts
|
220
222
|
if isinstance(result, list):
|
221
|
-
|
223
|
+
# Keep extraction log quiet unless user opts in via env
|
224
|
+
import os
|
225
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
226
|
+
print(f"[PIPECLEANER] Extracting from list of {len(result)} search results")
|
222
227
|
for i, item in enumerate(result, 1):
|
223
228
|
if isinstance(item, dict):
|
224
229
|
# Extract all text fields
|
@@ -238,7 +243,9 @@ def _extract_text_from_search_result(result: Any, tool_name: str) -> str:
|
|
238
243
|
elif isinstance(result, dict):
|
239
244
|
organic = result.get('organic', []) or result.get('results', [])
|
240
245
|
if organic:
|
241
|
-
|
246
|
+
import os
|
247
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
248
|
+
print(f"[PIPECLEANER] Extracting from dict with {len(organic)} organic results")
|
242
249
|
for i, item in enumerate(organic, 1):
|
243
250
|
title = item.get('title', '')
|
244
251
|
url = item.get('link', '') or item.get('url', '')
|
@@ -260,12 +267,16 @@ def _extract_text_from_search_result(result: Any, tool_name: str) -> str:
|
|
260
267
|
# Fallback: convert to string (but log warning)
|
261
268
|
if not extracted_parts:
|
262
269
|
result_str = str(result)
|
263
|
-
|
264
|
-
|
270
|
+
import os
|
271
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
272
|
+
print(f"[PIPECLEANER] ⚠️ Unknown result format, using str() - may be truncated")
|
273
|
+
print(f"[PIPECLEANER] Result type: {type(result).__name__}")
|
265
274
|
return result_str
|
266
275
|
|
267
276
|
full_text = "\n".join(extracted_parts)
|
268
|
-
|
277
|
+
import os
|
278
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
279
|
+
print(f"[PIPECLEANER] ✅ Extracted {len(full_text)} chars from {len(extracted_parts)} parts")
|
269
280
|
return full_text
|
270
281
|
|
271
282
|
|
@@ -303,13 +314,18 @@ def _apply_pipecleaner_to_result(tool_name: str, result: Any, callback_handler:
|
|
303
314
|
|
304
315
|
# Return deduplicated result (as same type as original if possible)
|
305
316
|
if deduplicated_str != result_str:
|
306
|
-
|
317
|
+
import os
|
318
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
319
|
+
print(f"[PIPECLEANER] ✅ Deduplicated: {len(result_str)} → {len(deduplicated_str)} chars")
|
307
320
|
return deduplicated_str
|
308
321
|
|
309
322
|
return result
|
310
323
|
|
311
324
|
except Exception as e:
|
312
|
-
|
325
|
+
import os
|
326
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
327
|
+
print(f"[PIPECLEANER] Error applying pipecleaner: {e}")
|
313
328
|
import traceback
|
314
|
-
|
329
|
+
if os.getenv("DASEIN_DEBUG_PIPECLEANER", "0") == "1":
|
330
|
+
traceback.print_exc()
|
315
331
|
return result
|
@@ -1,17 +1,17 @@
|
|
1
1
|
dasein/__init__.py,sha256=RY0lhaaWB6yJ_5YMRmaHDvQ0eFbc0BGbYNe5OVyxzYE,2316
|
2
2
|
dasein/advice_format.py,sha256=5-h4J24L_B2Y9dlmyDuIYtmPCWOGAYoinBEXqpcNg2s,5386
|
3
|
-
dasein/api.py,sha256=
|
4
|
-
dasein/capture.py,sha256=
|
3
|
+
dasein/api.py,sha256=_eoET8IDYovQqUSzWThgMT6zuSxLkGbLP-JaHJkH6aw,265261
|
4
|
+
dasein/capture.py,sha256=xM-JmKAEIrqDWnymHrbwdd7-ZeA9PWiCsw1irdCc2bc,112434
|
5
5
|
dasein/config.py,sha256=lXO8JG4RXbodn3gT5yEnuB0VRwWdrRVwhX3Rm06IZmU,1957
|
6
|
-
dasein/events.py,sha256=
|
6
|
+
dasein/events.py,sha256=Im5imxrLLEeuYuNq0LLDsJnbP-rLmANDYDpJhSdIl74,12892
|
7
7
|
dasein/extractors.py,sha256=fUFBVH9u2x4cJaM-8Zw4qiIpBF2LvjcdYkMvoXQUpL8,3986
|
8
8
|
dasein/injection_strategies.py,sha256=JBAvLnJK4xKqK4D1ZQnklIy-yIKSvWuvm2x0YGDMZVU,6507
|
9
9
|
dasein/injector.py,sha256=EItWhlG6oMAf_D7YJnRNyDwAQIK5MsaATu1ig3OENqM,7256
|
10
|
-
dasein/microturn.py,sha256=
|
11
|
-
dasein/pipecleaner.py,sha256=
|
10
|
+
dasein/microturn.py,sha256=GO7VbbrBBRZ0cXkxOD-P1PYziQL7lEE02SdVX6mL9is,19733
|
11
|
+
dasein/pipecleaner.py,sha256=ZJmPN8kzU04oUWqL0t2JmU4qPRUIxttatx8Gb-U8BZQ,80714
|
12
12
|
dasein/trace_buffer.py,sha256=bIyTpU8ZrNFR_TCwS43HvzUrDHpZ2F8pLVDeUE9jpwM,4117
|
13
13
|
dasein/types.py,sha256=FjGXZowiRYZzWj5GzSnAnA_-xwYqqE7WmXFCosVyGI8,2974
|
14
|
-
dasein/wrappers.py,sha256
|
14
|
+
dasein/wrappers.py,sha256=-X1mdesnruFsPdmTZhH8-dSt3Sy56o_ivjrIIywyj0c,14123
|
15
15
|
dasein/models/en_core_web_sm/en_core_web_sm/__init__.py,sha256=yOtXB5wD_EFXwif2QXgfvLPp0RQ5q-G_C3LkwPp4o40,237
|
16
16
|
dasein/models/en_core_web_sm/en_core_web_sm/meta.json,sha256=X8R1--W7Axckn9flHCLVMFI0W7I_E-rxSf9ZAiOWTRw,10085
|
17
17
|
dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/LICENSE,sha256=OTPBdpebaLxtC8yQLH1sEw8dEn9Hbxe6XNuo2Zz9ABI,1056
|
@@ -41,11 +41,11 @@ dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/strings.j
|
|
41
41
|
dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors,sha256=FHcraD5yZDbVlIrT__K0PQNu8uu-NFiq_tYATgWkBwY,128
|
42
42
|
dasein/models/en_core_web_sm/en_core_web_sm/en_core_web_sm-3.7.1/vocab/vectors.cfg,sha256=_0NZCRlSyM0W8fBIL1dw-4LRcHNo1cyjxGqlAfVS48U,22
|
43
43
|
dasein/services/__init__.py,sha256=0o6vKEVSYgGo-u-xDFf7Z4cQr8gIht2YovD6eEXUquE,356
|
44
|
-
dasein/services/post_run_client.py,sha256=
|
44
|
+
dasein/services/post_run_client.py,sha256=sfvrzpxQQfSxF1DGG9mjX8gjxBPnMBwlU9lefpHRYzk,5043
|
45
45
|
dasein/services/pre_run_client.py,sha256=2-HwdsmZGy0x-G19bxpZEDUc34vxBJBrVsx3ddW2-Aw,4383
|
46
46
|
dasein/services/service_adapter.py,sha256=hpADfh-oqSipSu8AvM23n7aVJYMmHjqQqKBM_FhoS3Q,7617
|
47
47
|
dasein/services/service_config.py,sha256=8_4tpV4mZvfaOc5_yyHbOyL4rYsPHzkLTEY1rtYgLs8,1629
|
48
|
-
dasein_core-0.2.
|
48
|
+
dasein_core-0.2.20.dist-info/licenses/LICENSE,sha256=7FHjIFEKl_3hSc3tGUVEWmufC_3oi8rh_2zVuL7jMKs,1091
|
49
49
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSE,sha256=OTPBdpebaLxtC8yQLH1sEw8dEn9Hbxe6XNuo2Zz9ABI,1056
|
50
50
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/LICENSES_SOURCES,sha256=INnfrNIVESJR8VNB7dGkex-Yvzk6IS8Q8ZT_3H7pipA,2347
|
51
51
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/METADATA,sha256=-vGqRxa_M2RwKtLjBhc4JlBQdJ3k7CwOnseT_ReYcic,2958
|
@@ -53,7 +53,7 @@ dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/RECORD,sha256=dDb6U7
|
|
53
53
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
|
54
54
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/entry_points.txt,sha256=OkWs-KxPJtDdpvIFCVXzDC9ECtejhPxv7pP3Tgk2cNg,47
|
55
55
|
dasein/models/en_core_web_sm/en_core_web_sm-3.7.1.dist-info/top_level.txt,sha256=56OIuRbEuhr12HsM9XpCMnTtHRMgNC5Hje4Xeo8wF2c,15
|
56
|
-
dasein_core-0.2.
|
57
|
-
dasein_core-0.2.
|
58
|
-
dasein_core-0.2.
|
59
|
-
dasein_core-0.2.
|
56
|
+
dasein_core-0.2.20.dist-info/METADATA,sha256=dLFMNEkef2Ym4K_3pxmsI_H2ffmZ1AW6x8qLfyju7fw,10330
|
57
|
+
dasein_core-0.2.20.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
58
|
+
dasein_core-0.2.20.dist-info/top_level.txt,sha256=6yYY9kltjvvPsg9K6KyMKRtzEr5qM7sHXN7VzmrDtp0,7
|
59
|
+
dasein_core-0.2.20.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|