mycorrhizal 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mycorrhizal/_version.py CHANGED
@@ -1 +1 @@
1
- version = "0.2.1"
1
+ version = "0.2.2"
@@ -285,51 +285,64 @@ class TransitionRuntime:
285
285
  if result is not None:
286
286
  await self._process_yield(result)
287
287
 
288
- def _resolve_place_ref(self, place_ref) -> Optional[Tuple[str, ...]]:
288
+ def _resolve_place_ref(self, place_ref) -> Tuple[str, ...]:
289
289
  """Resolve a PlaceRef to runtime place parts.
290
290
 
291
- Attempts multiple resolution strategies in order:
291
+ Requires exact match or parent-relative mapping for subnet instances.
292
+
293
+ Resolution strategies in order:
292
294
  1. Exact match using PlaceRef.get_parts()
293
295
  2. Parent-relative mapping (for subnet instances)
294
- 3. Suffix fallback (match by local name)
295
296
 
296
297
  Returns:
297
- Tuple of place parts if found, None otherwise
298
+ Tuple of place parts if found
299
+
300
+ Raises:
301
+ ValueError: If reference cannot be resolved
298
302
  """
303
+ # Extract local_name from place_ref (handles PlaceRef objects and strings)
304
+ if isinstance(place_ref, str):
305
+ local_name = place_ref
306
+ elif hasattr(place_ref, 'local_name'):
307
+ local_name = place_ref.local_name
308
+ else:
309
+ local_name = str(place_ref)
310
+
299
311
  # Try to get parts from the PlaceRef API
300
312
  try:
301
313
  parts = tuple(place_ref.get_parts())
302
314
  except Exception:
303
315
  parts = None
304
316
 
305
- logger.debug("[resolve] place_ref=%r parts=%s", place_ref, parts)
317
+ logger.debug("[resolve] place_ref=%r parts=%s local_name=%s",
318
+ place_ref, parts, local_name)
306
319
 
320
+ # Strategy 1: Exact match
307
321
  if parts:
308
322
  key = tuple(parts)
309
323
  if key in self.net.places:
310
324
  logger.debug("[resolve] exact match parts=%s", parts)
311
325
  return key
312
326
 
313
- # Map relative to this transition's parent parts
327
+ # Strategy 2: Parent-relative mapping (for subnet instances)
314
328
  trans_parts = tuple(self.fqn.split('.'))
315
329
  if len(trans_parts) > 1:
316
330
  parent_prefix = trans_parts[:-1]
317
- candidate = tuple(list(parent_prefix) + [place_ref.local_name])
331
+ candidate = tuple(list(parent_prefix) + [local_name])
318
332
  if candidate in self.net.places:
319
- logger.debug("[resolve] mapped %s -> %s using parent_prefix", parts, candidate)
333
+ logger.debug("[resolve] mapped %s -> %s using parent_prefix",
334
+ local_name, candidate)
320
335
  return candidate
321
336
 
322
- # Fallback: find any place whose last segment matches local_name
323
- for p in self.net.places.keys():
324
- if isinstance(p, tuple):
325
- segs = list(p)
326
- else:
327
- segs = p.split('.')
328
- if segs and segs[-1] == place_ref.local_name:
329
- logger.debug("[resolve] suffix match %s -> %s", place_ref.local_name, p)
330
- return tuple(segs)
331
-
332
- return None
337
+ # No match found - raise helpful error
338
+ available_places = [p[-1] if isinstance(p, tuple) else p.split('.')[-1]
339
+ for p in self.net.places.keys()]
340
+ raise ValueError(
341
+ f"Cannot resolve place reference '{local_name}' "
342
+ f"in transition '{self.fqn}'. Available places: {available_places}. "
343
+ f"Attempted resolution: {parts}. "
344
+ f"Use explicit place references (e.g., subnet.place)."
345
+ )
333
346
 
334
347
  def _normalize_to_parts(self, key) -> Tuple[str, ...]:
335
348
  """Normalize a place key to tuple of parts for runtime lookup.
@@ -387,9 +400,6 @@ class TransitionRuntime:
387
400
  continue
388
401
 
389
402
  place_parts = self._resolve_place_ref(key)
390
- if place_parts is None:
391
- continue
392
-
393
403
  key_normalized = self._normalize_to_parts(place_parts)
394
404
  explicit_targets.add(key_normalized)
395
405
  await self._add_token_to_place(key_normalized, token)
@@ -420,10 +430,6 @@ class TransitionRuntime:
420
430
  """Process a single (place_ref, token) tuple yield."""
421
431
  place_ref, token = yielded
422
432
  place_parts = self._resolve_place_ref(place_ref)
423
-
424
- if place_parts is None:
425
- return
426
-
427
433
  key = self._normalize_to_parts(place_parts)
428
434
  await self._add_token_to_place(key, token)
429
435
 
@@ -521,6 +527,8 @@ class TransitionRuntime:
521
527
 
522
528
  except asyncio.CancelledError:
523
529
  pass
530
+ except ValueError:
531
+ raise
524
532
  except Exception:
525
533
  logger.exception("[%s] Transition error", self.fqn)
526
534
 
@@ -969,20 +977,24 @@ class NetRuntime:
969
977
 
970
978
  class Runner:
971
979
  """High-level runner for executing a Petri net"""
972
-
980
+
973
981
  def __init__(self, net_func: Any, blackboard: Any):
974
982
  if not hasattr(net_func, '_spec'):
975
983
  raise ValueError(f"{net_func} is not a valid net")
976
-
984
+
977
985
  self.spec = net_func._spec
978
986
  self.blackboard = blackboard
979
987
  self.timebase = None
980
988
  self.runtime: Optional[NetRuntime] = None
981
-
989
+
982
990
  async def start(self, timebase: Any):
983
991
  """Start the net with given timebase"""
984
992
  self.timebase = timebase
985
- self.runtime = NetRuntime(self.spec, self.blackboard, self.timebase)
993
+ self.runtime = NetRuntime(
994
+ self.spec,
995
+ self.blackboard,
996
+ self.timebase
997
+ )
986
998
  await self.runtime.start()
987
999
 
988
1000
  async def stop(self, timeout: float = 5.0):
@@ -452,7 +452,35 @@ class Condition(Action[BB]):
452
452
 
453
453
 
454
454
  class Sequence(Node[BB]):
455
- """Sequence (AND): fail/err fast; RUNNING bubbles; all SUCCESS → SUCCESS."""
455
+ """Sequence (AND): fail/err fast; RUNNING bubbles; all SUCCESS → SUCCESS.
456
+
457
+ Memory Behavior:
458
+ When memory=True (default), the sequence remembers its position across ticks.
459
+ This allows the sequence to progress through its children incrementally.
460
+
461
+ When memory=False, the sequence restarts from the beginning on every tick.
462
+ This is useful for reactive sequences that should always start from the first child.
463
+
464
+ Important Note on do_while Loops:
465
+ If a sequence is used as the child of a do_while loop, it typically needs
466
+ memory=True to make progress. Without memory, the sequence will restart from
467
+ its first child on each tick, preventing it from completing all children.
468
+
469
+ Example:
470
+ @bt.sequence(memory=True) # Required for progress
471
+ def image_samples():
472
+ yield bt.subtree(MoveToSample)
473
+ yield send_image_request
474
+ yield bt.subtree(IncrementSampleCounter)
475
+
476
+ @bt.sequence(memory=True)
477
+ def happy_path():
478
+ yield bt.do_while(samples_remain)(image_samples)
479
+ yield set_pod_to_grow
480
+
481
+ Without memory=True on image_samples, the do_while loop would never
482
+ progress past MoveToSample because the sequence would restart each tick.
483
+ """
456
484
 
457
485
  def __init__(
458
486
  self,
@@ -492,7 +520,20 @@ class Sequence(Node[BB]):
492
520
 
493
521
 
494
522
  class Selector(Node[BB]):
495
- """Selector (Fallback): first SUCCESS wins; RUNNING bubbles; else FAILURE."""
523
+ """Selector (Fallback): first SUCCESS wins; RUNNING bubbles; else FAILURE.
524
+
525
+ Memory Behavior:
526
+ When memory=True (default), the selector remembers its position across ticks.
527
+ This allows the selector to continue trying children from where it left off.
528
+
529
+ When memory=False, the selector restarts from the beginning on every tick.
530
+ This is useful for reactive selectors that should always re-evaluate from the first child.
531
+
532
+ Important Note on do_while Loops:
533
+ Similar to Sequence, if a selector is used as the child of a do_while loop,
534
+ it typically needs memory=True to make progress. Without memory, the selector
535
+ will restart from its first child on each tick.
536
+ """
496
537
 
497
538
  def __init__(
498
539
  self,
@@ -849,20 +890,72 @@ class Gate(Node[BB]):
849
890
  return await self._finish(bb, st, tb)
850
891
 
851
892
 
893
+ class When(Node[BB]):
894
+ """
895
+ Conditionally execute a child, returning SUCCESS if the condition fails.
896
+
897
+ Unlike gate(), when() does NOT fail the parent when the condition is false.
898
+ This is useful for optional steps or feature flags in sequences.
899
+
900
+ Behavior:
901
+ - If condition returns SUCCESS → execute child, return child's status
902
+ - If condition returns RUNNING → return RUNNING
903
+ - Otherwise → return SUCCESS (skip but don't fail parent)
904
+
905
+ Example:
906
+ yield bt.when(feature_enabled)(optional_action)
907
+ # If feature_enabled is false, returns SUCCESS and sequence continues
908
+ """
909
+
910
+ def __init__(
911
+ self,
912
+ condition: Node[BB],
913
+ child: Node[BB],
914
+ name: Optional[str] = None,
915
+ exception_policy: ExceptionPolicy = ExceptionPolicy.LOG_AND_CONTINUE,
916
+ ) -> None:
917
+ super().__init__(
918
+ name or f"When(cond={_name_of(condition)}, child={_name_of(child)})",
919
+ exception_policy=exception_policy,
920
+ )
921
+ self.condition = condition
922
+ self.child = child
923
+ self.condition.parent = self
924
+ self.child.parent = self
925
+
926
+ def reset(self) -> None:
927
+ super().reset()
928
+ self.condition.reset()
929
+ self.child.reset()
930
+
931
+ async def tick(self, bb: BB, tb: Timebase) -> Status:
932
+ await self._ensure_entered(bb, tb)
933
+ c = await self.condition.tick(bb, tb)
934
+ if c is Status.RUNNING:
935
+ return Status.RUNNING
936
+ if c is not Status.SUCCESS:
937
+ # Condition failed - return SUCCESS to skip but don't block parent
938
+ return await self._finish(bb, Status.SUCCESS, tb)
939
+ st = await self.child.tick(bb, tb)
940
+ if st is Status.RUNNING:
941
+ return Status.RUNNING
942
+ return await self._finish(bb, st, tb)
943
+
944
+
852
945
  class Match(Node[BB]):
853
946
  """
854
947
  Pattern-matching dispatch node.
855
-
948
+
856
949
  Evaluates a key function against the blackboard, then checks each case
857
950
  in order. The first matching case's child is executed. If the child
858
951
  completes (SUCCESS or FAILURE), that status is returned immediately.
859
-
952
+
860
953
  Cases can match by:
861
954
  - Type: isinstance(value, case_type)
862
955
  - Predicate: case_predicate(value) returns True
863
956
  - Value: value == case_value
864
957
  - Default: always matches (should be last)
865
-
958
+
866
959
  If no case matches and there's no default, returns FAILURE.
867
960
  """
868
961
 
@@ -1003,6 +1096,68 @@ class DoWhile(Node[BB]):
1003
1096
  return await self._finish(bb, Status.FAILURE, tb)
1004
1097
 
1005
1098
 
1099
+ class TryCatch(Node[BB]):
1100
+ """
1101
+ Try-catch error handling node.
1102
+
1103
+ Executes the try block first. If it returns SUCCESS, that status is returned.
1104
+ If the try block returns FAILURE, the catch block is executed instead.
1105
+ Returns SUCCESS if either block succeeds, FAILURE if both fail.
1106
+
1107
+ This is semantically equivalent to a selector with two children, but provides
1108
+ clearer intent and better visualization with labeled edges.
1109
+ """
1110
+
1111
+ def __init__(
1112
+ self,
1113
+ try_block: Node[BB],
1114
+ catch_block: Node[BB],
1115
+ name: Optional[str] = None,
1116
+ exception_policy: ExceptionPolicy = ExceptionPolicy.LOG_AND_CONTINUE,
1117
+ ) -> None:
1118
+ super().__init__(
1119
+ name or f"TryCatch(try={_name_of(try_block)}, catch={_name_of(catch_block)})",
1120
+ exception_policy=exception_policy,
1121
+ )
1122
+ self.try_block = try_block
1123
+ self.catch_block = catch_block
1124
+ self.try_block.parent = self
1125
+ self.catch_block.parent = self
1126
+ self._use_catch = False
1127
+
1128
+ def reset(self) -> None:
1129
+ super().reset()
1130
+ self.try_block.reset()
1131
+ self.catch_block.reset()
1132
+ self._use_catch = False
1133
+
1134
+ async def tick(self, bb: BB, tb: Timebase) -> Status:
1135
+ await self._ensure_entered(bb, tb)
1136
+
1137
+ # If we're in catch mode, continue executing catch block
1138
+ if self._use_catch:
1139
+ st = await self.catch_block.tick(bb, tb)
1140
+ if st is Status.RUNNING:
1141
+ return Status.RUNNING
1142
+ self._use_catch = False
1143
+ return await self._finish(bb, st, tb)
1144
+
1145
+ # Try the try block
1146
+ st = await self.try_block.tick(bb, tb)
1147
+ if st is Status.RUNNING:
1148
+ return Status.RUNNING
1149
+ if st is Status.SUCCESS:
1150
+ return await self._finish(bb, Status.SUCCESS, tb)
1151
+
1152
+ # Try block failed, switch to catch
1153
+ self._use_catch = True
1154
+ st = await self.catch_block.tick(bb, tb)
1155
+ if st is Status.RUNNING:
1156
+ return Status.RUNNING
1157
+ self._use_catch = False
1158
+ return await self._finish(bb, st, tb)
1159
+
1160
+
1006
1161
  # ======================================================================================
1007
1162
  # Authoring DSL (NodeSpec + bt namespace)
1008
1163
  # ======================================================================================
@@ -1018,6 +1173,7 @@ class NodeSpecKind(Enum):
1018
1173
  SUBTREE = "subtree"
1019
1174
  MATCH = "match"
1020
1175
  DO_WHILE = "do_while"
1176
+ TRY_CATCH = "try_catch"
1021
1177
 
1022
1178
 
1023
1179
  class _DefaultCase:
@@ -1128,6 +1284,16 @@ class NodeSpec:
1128
1284
  exception_policy=exception_policy,
1129
1285
  )
1130
1286
 
1287
+ case NodeSpecKind.TRY_CATCH:
1288
+ try_spec = self.payload["try"]
1289
+ catch_spec = self.payload["catch"]
1290
+ return TryCatch(
1291
+ try_spec.to_node(exception_policy),
1292
+ catch_spec.to_node(exception_policy),
1293
+ name=self.name,
1294
+ exception_policy=exception_policy,
1295
+ )
1296
+
1131
1297
  case _:
1132
1298
  raise ValueError(f"Unknown spec kind: {self.kind}")
1133
1299
 
@@ -1271,6 +1437,31 @@ class _WrapperChain:
1271
1437
  lambda ch: Gate(cond_spec.to_node(), ch),
1272
1438
  )
1273
1439
 
1440
+ def when(
1441
+ self, condition_spec_or_fn: Union["NodeSpec", Callable[[Any], Any]]
1442
+ ) -> "_WrapperChain":
1443
+ """
1444
+ Add a conditional wrapper that executes the child only when condition is true.
1445
+
1446
+ Unlike gate(), when() returns SUCCESS (not FAILURE) when the condition fails.
1447
+ This allows sequences to continue when optional steps are skipped.
1448
+
1449
+ Args:
1450
+ condition_spec_or_fn: A node spec or callable that returns True/False
1451
+
1452
+ Returns:
1453
+ The chain for further wrapping
1454
+
1455
+ Example:
1456
+ yield bt.when(is_enabled)(optional_action)
1457
+ # If is_enabled is False, returns SUCCESS and sequence continues
1458
+ """
1459
+ cond_spec = bt.as_spec(condition_spec_or_fn)
1460
+ return self._append(
1461
+ f"When(cond={_name_of(cond_spec)})",
1462
+ lambda ch: When(cond_spec.to_node(), ch),
1463
+ )
1464
+
1274
1465
  def __call__(self, inner: Union["NodeSpec", Callable[[Any], Any]]) -> "NodeSpec":
1275
1466
  """
1276
1467
  Apply the chain to a child spec → nested decorator NodeSpecs.
@@ -1342,10 +1533,10 @@ class _MatchBuilder:
1342
1533
 
1343
1534
  class _DoWhileBuilder:
1344
1535
  """Builder for do_while loops."""
1345
-
1536
+
1346
1537
  def __init__(self, condition_spec: NodeSpec) -> None:
1347
1538
  self._condition_spec = condition_spec
1348
-
1539
+
1349
1540
  def __call__(self, child: Union["NodeSpec", Callable[[Any], Any]]) -> NodeSpec:
1350
1541
  child_spec = bt.as_spec(child)
1351
1542
  return NodeSpec(
@@ -1404,6 +1595,33 @@ class _BT:
1404
1595
 
1405
1596
  3. Direct call with child nodes:
1406
1597
  bt.sequence(action1, action2, action3)
1598
+
1599
+ Memory Parameter:
1600
+ The memory parameter controls whether the sequence remembers its position
1601
+ across ticks:
1602
+
1603
+ - memory=None (default): Use the Runner's memory setting
1604
+ - memory=True: Remember position, allowing incremental progress
1605
+ - memory=False: Restart from beginning each tick (reactive behavior)
1606
+
1607
+ IMPORTANT: If a sequence is inside a do_while loop and needs to execute
1608
+ all its children incrementally, use memory=True. Otherwise, the sequence
1609
+ will restart from the first child on every tick and never complete.
1610
+
1611
+ Example:
1612
+ # CORRECT - sequence progresses through children
1613
+ @bt.sequence(memory=True)
1614
+ def process_samples():
1615
+ yield move_to_sample
1616
+ yield capture_image
1617
+ yield bt.do_while(samples_remain)(process_samples)
1618
+
1619
+ # WRONG - sequence restarts at move_to_sample every tick
1620
+ @bt.sequence(memory=False)
1621
+ def process_samples():
1622
+ yield move_to_sample
1623
+ yield capture_image # Never reached!
1624
+ yield bt.do_while(samples_remain)(process_samples)
1407
1625
  """
1408
1626
  # Case 3: Direct call with children - bt.sequence(node1, node2, ...)
1409
1627
  # This is detected when we have multiple args, or a single arg that's not a generator function
@@ -1488,6 +1706,9 @@ class _BT:
1488
1706
 
1489
1707
  3. Direct call with child nodes:
1490
1708
  bt.selector(option1, option2, option3)
1709
+
1710
+ The memory parameter defaults to None, which means use the Runner's memory setting.
1711
+ Explicitly set to True or False to override the Runner's setting.
1491
1712
  """
1492
1713
  # Case 3: Direct call with children - bt.selector(node1, node2, ...)
1493
1714
  # This is detected when we have multiple args, or a single arg that's not a generator function
@@ -1604,6 +1825,28 @@ class _BT:
1604
1825
  cond_spec = self.as_spec(condition)
1605
1826
  return _WrapperChain().gate(cond_spec)
1606
1827
 
1828
+ def when(self, condition: Union[NodeSpec, Callable[[Any], Any]]) -> _WrapperChain:
1829
+ """
1830
+ Create a conditional wrapper that executes the child only when condition is true.
1831
+
1832
+ Unlike gate(), when() returns SUCCESS (not FAILURE) when the condition fails.
1833
+ This is useful for optional steps or feature flags in sequences.
1834
+
1835
+ Args:
1836
+ condition: A node spec or callable that returns True/False
1837
+
1838
+ Returns:
1839
+ A wrapper chain that can be applied to a child node
1840
+
1841
+ Example:
1842
+ @bt.sequence
1843
+ def my_sequence():
1844
+ yield bt.when(feature_enabled)(optional_feature)
1845
+ yield next_step # Always reached, even if flag is disabled
1846
+ """
1847
+ cond_spec = self.as_spec(condition)
1848
+ return _WrapperChain().when(cond_spec)
1849
+
1607
1850
  def match(
1608
1851
  self, key_fn: Callable[[Any], Any], name: Optional[str] = None
1609
1852
  ) -> "_MatchBuilder":
@@ -1685,6 +1928,62 @@ class _BT:
1685
1928
  cond_spec = self.as_spec(condition)
1686
1929
  return _DoWhileBuilder(cond_spec)
1687
1930
 
1931
+ def try_catch(
1932
+ self, try_block: Union[NodeSpec, Callable[[Any], Any]]
1933
+ ) -> Callable[[Union[NodeSpec, Callable[[Any], Any]]], NodeSpec]:
1934
+ """
1935
+ Create a try-catch error handling pattern.
1936
+
1937
+ Usage:
1938
+ # Define try and catch blocks with explicit memory settings
1939
+ @bt.sequence(memory=True)
1940
+ def try_block():
1941
+ yield action1
1942
+ yield action2
1943
+
1944
+ @bt.sequence(memory=True)
1945
+ def catch_block():
1946
+ yield cleanup
1947
+
1948
+ # Use in the tree
1949
+ @bt.root
1950
+ @bt.sequence
1951
+ def root():
1952
+ yield bt.try_catch(try_block)(catch_block)
1953
+
1954
+ Behavior:
1955
+ 1. Execute try block
1956
+ 2. If try block returns SUCCESS → return SUCCESS
1957
+ 3. If try block returns FAILURE → execute catch block
1958
+ 4. Return SUCCESS if either block succeeds, FAILURE if both fail
1959
+
1960
+ This is semantically equivalent to a selector with two children:
1961
+ - First child (try) runs first
1962
+ - Second child (catch) runs only if try fails
1963
+ - Returns SUCCESS if either succeeds
1964
+
1965
+ Args:
1966
+ try_block: The node to try first (pre-defined sequence/selector/etc with explicit memory settings)
1967
+
1968
+ Returns:
1969
+ A callable that accepts a catch block (pre-defined sequence/selector/etc with explicit memory settings)
1970
+ """
1971
+ try_spec = self.as_spec(try_block)
1972
+
1973
+ def catcher(catch_block: Union[NodeSpec, Callable[[Any], Any]]) -> NodeSpec:
1974
+ catch_spec = self.as_spec(catch_block)
1975
+ return NodeSpec(
1976
+ kind=NodeSpecKind.TRY_CATCH,
1977
+ name=f"TryCatch(try={_name_of(try_spec)}, catch={_name_of(catch_spec)})",
1978
+ payload={
1979
+ "try": try_spec,
1980
+ "catch": catch_spec,
1981
+ },
1982
+ children=[try_spec, catch_spec],
1983
+ )
1984
+
1985
+ return catcher
1986
+
1688
1987
  def subtree(self, tree: SimpleNamespace) -> NodeSpec:
1689
1988
  """
1690
1989
  Mount another tree's root spec as a subtree.
@@ -1813,6 +2112,8 @@ def _generate_mermaid(tree: SimpleNamespace) -> str:
1813
2112
  return f"Match<br/>{spec.name}"
1814
2113
  case NodeSpecKind.DO_WHILE:
1815
2114
  return f"DoWhile<br/>{spec.name}"
2115
+ case NodeSpecKind.TRY_CATCH:
2116
+ return f"TryCatch<br/>{spec.name}"
1816
2117
  case _:
1817
2118
  return spec.name
1818
2119
 
@@ -1831,6 +2132,9 @@ def _generate_mermaid(tree: SimpleNamespace) -> str:
1831
2132
  # Children already set (just the body), but we also want to show condition
1832
2133
  cond_spec = spec.payload["condition"]
1833
2134
  spec.children = [cond_spec] + spec.children
2135
+ case NodeSpecKind.TRY_CATCH:
2136
+ # Children already set in _TryCatchBuilder
2137
+ pass
1834
2138
  return spec.children
1835
2139
 
1836
2140
  def walk(spec: NodeSpec) -> None:
@@ -1860,10 +2164,24 @@ def _generate_mermaid(tree: SimpleNamespace) -> str:
1860
2164
  body_id = nid(children[1])
1861
2165
  lines.append(f' {this_id} -->|"body"| {body_id}')
1862
2166
  walk(children[1])
2167
+ elif spec.kind == NodeSpecKind.TRY_CATCH:
2168
+ # First child is try, second is catch
2169
+ try_id = nid(children[0])
2170
+ lines.append(f' {this_id} -->|"try"| {try_id}')
2171
+ walk(children[0])
2172
+ if len(children) > 1:
2173
+ catch_id = nid(children[1])
2174
+ lines.append(f' {this_id} -->|"catch"| {catch_id}')
2175
+ walk(children[1])
1863
2176
  else:
1864
- for child in children:
2177
+ for i, child in enumerate(children, start=1):
1865
2178
  child_id = nid(child)
1866
- lines.append(f" {this_id} --> {child_id}")
2179
+ if spec.kind == NodeSpecKind.PARALLEL:
2180
+ lines.append(f' {this_id} -->|"P"| {child_id}')
2181
+ elif spec.kind in (NodeSpecKind.SEQUENCE, NodeSpecKind.SELECTOR):
2182
+ lines.append(f' {this_id} -->|"{i}"| {child_id}')
2183
+ else:
2184
+ lines.append(f" {this_id} --> {child_id}")
1867
2185
  walk(child)
1868
2186
 
1869
2187
  walk(tree.root)
@@ -82,10 +82,14 @@ from .core import (
82
82
  configure,
83
83
  get_config,
84
84
  get_object_cache,
85
+ flush_object_cache,
86
+ get_cache_metrics,
85
87
  get_spore_sync,
86
88
  get_spore_async,
87
89
  spore,
88
90
  SporesConfig,
91
+ EvictionPolicy,
92
+ CacheMetrics,
89
93
  EventLogger,
90
94
  AsyncEventLogger,
91
95
  SyncEventLogger,
@@ -132,6 +136,8 @@ __all__ = [
132
136
  'configure',
133
137
  'get_config',
134
138
  'get_object_cache',
139
+ 'flush_object_cache',
140
+ 'get_cache_metrics',
135
141
  'get_spore_sync',
136
142
  'get_spore_async',
137
143
  'EventLogger',
@@ -139,6 +145,8 @@ __all__ = [
139
145
  'SyncEventLogger',
140
146
  'spore', # For DSL adapters and @spore.object() decorator
141
147
  'SporesConfig',
148
+ 'EvictionPolicy',
149
+ 'CacheMetrics',
142
150
 
143
151
  # Models
144
152
  'Event',
@@ -8,11 +8,14 @@ LRU cache for tracking seen objects and logging them on eviction.
8
8
  from __future__ import annotations
9
9
 
10
10
  from collections import OrderedDict
11
- from typing import Dict, Callable, Optional, TypeVar, Generic
11
+ from typing import Dict, Callable, Optional, TypeVar, Generic, TYPE_CHECKING
12
12
  from dataclasses import dataclass
13
13
 
14
14
  from .models import Object
15
15
 
16
+ if TYPE_CHECKING:
17
+ from .core import CacheMetrics
18
+
16
19
 
17
20
  # Generic types for cache
18
21
  K = TypeVar('K') # Key type (object ID)
@@ -28,61 +31,90 @@ class CacheEntry:
28
31
  object: The OCEL Object
29
32
  sight_count: How many times we've seen this object
30
33
  first_sight_time: When we first saw the object
34
+ attributes_hash: Hash of object attributes for change detection
31
35
  """
32
36
  object: Object
33
37
  sight_count: int = 1
34
38
  first_sight_time: Optional[float] = None
39
+ attributes_hash: Optional[int] = None
40
+
41
+
42
+ def _compute_attributes_hash(obj: Object) -> Optional[int]:
43
+ """
44
+ Compute hash of object attributes for change detection.
45
+
46
+ Args:
47
+ obj: The OCEL Object
48
+
49
+ Returns:
50
+ Hash of attributes, or None if object has no attributes
51
+ """
52
+ if not obj.attributes:
53
+ return None
54
+ # Hash frozenset of (attr_name, attr_value) tuples
55
+ # ObjectAttributeValue.value holds the actual Python value
56
+ attrs = {k: v.value for k, v in obj.attributes.items()}
57
+ return hash(frozenset(attrs.items()))
35
58
 
36
59
 
37
60
  class ObjectLRUCache(Generic[K, V]):
38
61
  """
39
- LRU cache with eviction callback for object tracking.
62
+ LRU cache with unified callback for object logging.
40
63
 
41
- When an object is first seen, it should be logged via on_first_sight.
42
- When an object is evicted from the cache, it's logged via on_evict.
64
+ The cache fires the `needs_logged` callback when an object needs to be logged:
65
+ - On first sight (new object added to cache)
66
+ - On eviction (object removed from cache to make room)
67
+ - When attributes change (detected via hash comparison)
68
+ - Every N touches (configurable via `touch_resend_n`)
43
69
 
44
70
  This ensures OCEL consumers see object evolution:
45
71
  - First sight: Initial object state
72
+ - Attribute changes: Updated state
46
73
  - Eviction: Final state before being removed from cache
74
+ - Periodic resend: Long-lived objects are re-logged periodically
47
75
 
48
76
  Example:
49
77
  ```python
50
- def on_evict(object_id: str, obj: Object):
78
+ def needs_logged(object_id: str, obj: Object):
51
79
  # Send object to transport
52
80
  transport.send(LogRecord(object=obj))
53
81
 
54
- cache = ObjectLRUCache(maxsize=128, on_evict=on_evict)
82
+ cache = ObjectLRUCache(maxsize=128, needs_logged=needs_logged, touch_resend_n=100)
55
83
 
56
84
  # Check if object exists, add if not
57
85
  if not cache.contains_or_add(object_id, object):
58
- # Object not in cache, already logged by on_first_sight
86
+ # Object not in cache, already logged by needs_logged
59
87
  pass
60
88
  ```
61
89
 
62
90
  Args:
63
91
  maxsize: Maximum number of objects to cache
64
- on_evict: Callback when object is evicted (object_id, object) -> None
65
- on_first_sight: Optional callback when object is first seen (object_id, object) -> None
92
+ needs_logged: Callback when object needs logging (object_id, object) -> None
93
+ touch_resend_n: Resend object every N touches (0 to disable periodic resend)
66
94
  """
67
95
 
68
96
  def __init__(
69
97
  self,
70
98
  maxsize: int = 128,
71
- on_evict: Optional[Callable[[K, Object], None]] = None,
72
- on_first_sight: Optional[Callable[[K, Object], None]] = None
99
+ needs_logged: Optional[Callable[[K, Object], None]] = None,
100
+ touch_resend_n: int = 100,
101
+ metrics: Optional["CacheMetrics"] = None
73
102
  ):
74
103
  self.maxsize = maxsize
75
- self.on_evict = on_evict
76
- self.on_first_sight = on_first_sight
104
+ self.needs_logged = needs_logged
105
+ self.touch_resend_n = touch_resend_n
106
+ self.metrics = metrics
77
107
  self._cache: OrderedDict[K, CacheEntry] = OrderedDict()
78
108
 
79
109
  def _evict_if_needed(self) -> None:
80
110
  """Evict oldest entry if cache is full."""
81
- if len(self._cache) >= self.maxsize:
111
+ while len(self._cache) >= self.maxsize:
82
112
  # FIFO from OrderedDict (oldest first)
83
113
  object_id, entry = self._cache.popitem(last=False)
84
- if self.on_evict:
85
- self.on_evict(object_id, entry.object)
114
+ if self.metrics is not None:
115
+ self.metrics.evictions += 1
116
+ if self.needs_logged:
117
+ self.needs_logged(object_id, entry.object)
86
118
 
87
119
  def get(self, key: K) -> Optional[Object]:
88
120
  """
@@ -119,8 +151,13 @@ class ObjectLRUCache(Generic[K, V]):
119
151
  Check if key exists, add if not.
120
152
 
121
153
  This is the primary method for object tracking:
122
- - If key exists: mark as recently used, return True
123
- - If key doesn't exist: add object, potentially evict, call on_first_sight, return False
154
+ - If key exists: check for attribute changes, periodic resend, mark as recently used
155
+ - If key doesn't exist: add object, potentially evict, log first sight
156
+
157
+ The `needs_logged` callback is fired when:
158
+ - First sight: new object added to cache
159
+ - Attribute change: object attributes changed (hash comparison)
160
+ - Every N touches: periodic resend for long-lived objects
124
161
 
125
162
  Args:
126
163
  key: The object ID
@@ -130,27 +167,51 @@ class ObjectLRUCache(Generic[K, V]):
130
167
  True if object was already in cache, False if it was just added
131
168
  """
132
169
  if key in self._cache:
133
- # Already seen, mark as recently used
134
- self._cache.move_to_end(key)
170
+ # Object exists - check if we need to log
135
171
  entry = self._cache[key]
136
172
  entry.sight_count += 1
173
+
174
+ # Compute current attribute hash
175
+ current_hash = _compute_attributes_hash(obj)
176
+
177
+ # Check if attributes changed
178
+ attrs_changed = (entry.attributes_hash is not None and
179
+ current_hash != entry.attributes_hash)
180
+
181
+ # Update if attributes changed
182
+ if attrs_changed:
183
+ entry.object = obj
184
+ entry.attributes_hash = current_hash
185
+
186
+ # Fire callback if ANY condition met
187
+ should_log = any([
188
+ attrs_changed,
189
+ self.touch_resend_n > 0 and entry.sight_count % self.touch_resend_n == 0,
190
+ ])
191
+ if should_log and self.needs_logged:
192
+ self.needs_logged(key, entry.object)
193
+
194
+ self._cache.move_to_end(key)
137
195
  return True
138
196
  else:
139
- # First sight
197
+ # First sight - store hash, fire callback
140
198
  self._evict_if_needed()
141
-
142
- entry = CacheEntry(object=obj)
199
+ if self.metrics is not None:
200
+ self.metrics.first_sights += 1
201
+ attr_hash = _compute_attributes_hash(obj)
202
+ entry = CacheEntry(object=obj, attributes_hash=attr_hash)
143
203
  self._cache[key] = entry
144
-
145
- if self.on_first_sight:
146
- self.on_first_sight(key, obj)
147
-
204
+ if self.needs_logged:
205
+ self.needs_logged(key, obj)
148
206
  return False
149
207
 
150
208
  def add(self, key: K, obj: Object) -> None:
151
209
  """
152
210
  Add an object to the cache (or update if exists).
153
211
 
212
+ Note: This method fires needs_logged for new objects, but not for updates.
213
+ For attribute change detection and periodic resend, use contains_or_add().
214
+
154
215
  Args:
155
216
  key: The object ID
156
217
  obj: The OCEL Object
@@ -165,11 +226,12 @@ class ObjectLRUCache(Generic[K, V]):
165
226
  # New entry
166
227
  self._evict_if_needed()
167
228
 
168
- entry = CacheEntry(object=obj)
229
+ attr_hash = _compute_attributes_hash(obj)
230
+ entry = CacheEntry(object=obj, attributes_hash=attr_hash)
169
231
  self._cache[key] = entry
170
232
 
171
- if self.on_first_sight:
172
- self.on_first_sight(key, obj)
233
+ if self.needs_logged:
234
+ self.needs_logged(key, obj)
173
235
 
174
236
  def remove(self, key: K) -> Optional[Object]:
175
237
  """
@@ -13,11 +13,12 @@ import functools
13
13
  import logging
14
14
  import threading
15
15
  from datetime import datetime
16
+ from enum import Enum
16
17
  from typing import (
17
18
  Any, Callable, Optional, Union, Dict, List,
18
19
  ParamSpec, TypeVar, overload, get_origin, get_args, Annotated
19
20
  )
20
- from dataclasses import dataclass
21
+ from dataclasses import dataclass, field
21
22
  from abc import ABC, abstractmethod
22
23
 
23
24
  from .models import (
@@ -44,6 +45,28 @@ R = TypeVar('R')
44
45
  # Global Configuration
45
46
  # ============================================================================
46
47
 
48
+ class EvictionPolicy(str, Enum):
49
+ """
50
+ Cache eviction policy for object logging.
51
+
52
+ Attributes:
53
+ EVICT_AND_LOG: Evict from cache when full, log immediately via sync/async path
54
+ EVICT_AND_BUFFER: Evict from cache, buffer for later logging (future)
55
+ NO_EVICT: Keep in cache until explicit flush (future)
56
+ """
57
+ EVICT_AND_LOG = "evict_and_log"
58
+ EVICT_AND_BUFFER = "evict_and_buffer"
59
+ NO_EVICT = "no_evict"
60
+
61
+
62
+ @dataclass
63
+ class CacheMetrics:
64
+ """Track cache eviction statistics."""
65
+ evictions: int = 0
66
+ eviction_failures: int = 0
67
+ first_sights: int = 0
68
+
69
+
47
70
  @dataclass
48
71
  class SporesConfig:
49
72
  """
@@ -54,11 +77,15 @@ class SporesConfig:
54
77
  object_cache_size: Maximum objects in LRU cache
55
78
  encoder: Encoder instance to use
56
79
  transport: Transport instance to use (required for logging to work)
80
+ eviction_policy: Policy for handling cache eviction
81
+ touch_resend_n: Resend object every N touches (0 to disable periodic resend)
57
82
  """
58
83
  enabled: bool = True
59
84
  object_cache_size: int = 128
60
85
  encoder: Optional[Encoder] = None
61
86
  transport: Optional[Transport] = None
87
+ eviction_policy: EvictionPolicy = EvictionPolicy.EVICT_AND_LOG
88
+ touch_resend_n: int = 100
62
89
 
63
90
  def __post_init__(self):
64
91
  """Set default encoder if not provided."""
@@ -69,24 +96,34 @@ class SporesConfig:
69
96
  # Global state
70
97
  _config: Optional[SporesConfig] = None
71
98
  _object_cache: Optional[ObjectLRUCache[str, Object]] = None
99
+ _config_lock = threading.RLock() # Reentrant lock for global state initialization
100
+ _config_initialized = False # Track whether configure() has been called
101
+ _cache_metrics = CacheMetrics() # Track cache eviction statistics
72
102
 
73
103
 
74
104
  def configure(
75
105
  enabled: bool = True,
76
106
  object_cache_size: int = 128,
77
107
  encoder: Optional[Encoder] = None,
78
- transport: Optional[Transport] = None
108
+ transport: Optional[Transport] = None,
109
+ eviction_policy: Union[EvictionPolicy, str] = EvictionPolicy.EVICT_AND_LOG,
110
+ touch_resend_n: int = 100
79
111
  ) -> None:
80
112
  """
81
- Configure the Spores logging system.
113
+ Configure the Spores logging system (thread-safe).
82
114
 
83
115
  This should be called once at application startup.
116
+ If called multiple times, the last call wins.
117
+
118
+ Thread-safe: Can be called from multiple threads concurrently.
84
119
 
85
120
  Args:
86
121
  enabled: Whether spores logging is enabled (default: True)
87
122
  object_cache_size: Maximum objects in LRU cache (default: 128)
88
123
  encoder: Encoder instance (defaults to JSONEncoder)
89
124
  transport: Transport instance (required for logging to work)
125
+ eviction_policy: Policy for cache eviction (default: evict_and_log)
126
+ touch_resend_n: Resend object every N touches (default: 100, 0 to disable)
90
127
 
91
128
  Example:
92
129
  ```python
@@ -94,72 +131,183 @@ def configure(
94
131
 
95
132
  spore.configure(
96
133
  transport=FileTransport("logs/ocel.jsonl"),
97
- object_cache_size=256
134
+ object_cache_size=256,
135
+ touch_resend_n=100,
98
136
  )
99
137
  ```
100
138
  """
101
- global _config, _object_cache
102
-
103
- _config = SporesConfig(
104
- enabled=enabled,
105
- object_cache_size=object_cache_size,
106
- encoder=encoder,
107
- transport=transport
108
- )
109
-
110
- # Create object cache with eviction callback
111
- def on_evict(object_id: str, obj: Object):
112
- """Send object when evicted from cache."""
113
- try:
114
- loop = asyncio.get_event_loop()
115
- if loop.is_running():
116
- asyncio.create_task(_send_log_record(LogRecord(object=obj)))
117
- else:
118
- # No running loop, schedule the coroutine
119
- asyncio.ensure_future(_send_log_record(LogRecord(object=obj)))
120
- except RuntimeError:
121
- # No event loop at all, ignore for now
122
- pass
123
-
124
- def on_first_sight(object_id: str, obj: Object):
125
- """Send object when first seen."""
126
- try:
127
- loop = asyncio.get_event_loop()
128
- if loop.is_running():
129
- asyncio.create_task(_send_log_record(LogRecord(object=obj)))
130
- else:
131
- # No running loop, schedule the coroutine
132
- asyncio.ensure_future(_send_log_record(LogRecord(object=obj)))
133
- except RuntimeError:
134
- # No event loop at all, ignore for now
135
- pass
139
+ global _config, _object_cache, _config_initialized, _cache_metrics
140
+
141
+ # Convert string to EvictionPolicy if needed
142
+ if isinstance(eviction_policy, str):
143
+ eviction_policy = EvictionPolicy(eviction_policy)
144
+
145
+ with _config_lock:
146
+ # Warn if already configured (but allow reconfiguration)
147
+ if _config_initialized:
148
+ logger.warning(
149
+ "Spores already configured. Reconfiguring may cause issues. "
150
+ "Ensure configure() is called only once at startup."
151
+ )
152
+
153
+ # Create new config (atomic under lock)
154
+ _config = SporesConfig(
155
+ enabled=enabled,
156
+ object_cache_size=object_cache_size,
157
+ encoder=encoder,
158
+ transport=transport,
159
+ eviction_policy=eviction_policy,
160
+ touch_resend_n=touch_resend_n
161
+ )
136
162
 
137
- _object_cache = ObjectLRUCache(
138
- maxsize=object_cache_size,
139
- on_evict=on_evict,
140
- on_first_sight=on_first_sight
141
- )
163
+ # Reset metrics on reconfiguration
164
+ _cache_metrics = CacheMetrics()
142
165
 
143
- logger.info(f"Spores configured: enabled={enabled}, cache_size={object_cache_size}")
166
+ # Create object cache with unified callback
167
+ def needs_logged(object_id: str, obj: Object):
168
+ """Log object when it needs to be logged (first sight, eviction, change, or Nth touch)."""
169
+ global _cache_metrics
170
+
171
+ try:
172
+ loop = asyncio.get_event_loop()
173
+ if loop.is_running():
174
+ # Async context: schedule task
175
+ asyncio.create_task(_send_log_record(LogRecord(object=obj)))
176
+ else:
177
+ # No running loop, use sync path
178
+ _send_log_record_sync(LogRecord(object=obj))
179
+ except RuntimeError:
180
+ # No event loop at all, use sync path
181
+ try:
182
+ _send_log_record_sync(LogRecord(object=obj))
183
+ except Exception as e:
184
+ logger.error(f"Failed to log object {object_id}: {e}")
185
+
186
+ _object_cache = ObjectLRUCache(
187
+ maxsize=object_cache_size,
188
+ needs_logged=needs_logged,
189
+ touch_resend_n=touch_resend_n,
190
+ metrics=_cache_metrics
191
+ )
192
+
193
+ _config_initialized = True
194
+
195
+ logger.info(f"Spores configured: enabled={enabled}, cache_size={object_cache_size}, eviction_policy={eviction_policy.value}")
144
196
 
145
197
 
146
198
  def get_config() -> SporesConfig:
147
- """Get the current spores configuration."""
148
- global _config
149
- if _config is None:
150
- # Use default configuration
151
- _config = SporesConfig()
199
+ """
200
+ Get the current spores configuration (thread-safe).
201
+
202
+ If no configuration exists, creates a default one.
203
+ Thread-safe: Can be called from multiple threads concurrently.
204
+ """
205
+ global _config, _config_initialized
206
+
207
+ # Fast path: read without lock (GIL protects single read)
208
+ if _config is not None:
209
+ return _config
210
+
211
+ # Slow path: needs initialization
212
+ with _config_lock:
213
+ # Double-check: another thread may have initialized while we waited
214
+ if _config is None:
215
+ # Initialize with defaults
216
+ _config = SporesConfig()
217
+ _config_initialized = True
218
+
152
219
  return _config
153
220
 
154
221
 
155
222
  def get_object_cache() -> ObjectLRUCache[str, Object]:
156
- """Get the object cache."""
223
+ """
224
+ Get the object cache (thread-safe).
225
+
226
+ If no cache exists, initializes with default configuration.
227
+ Thread-safe: Can be called from multiple threads concurrently.
228
+ """
157
229
  global _object_cache
158
- if _object_cache is None:
159
- configure() # Initialize with defaults
230
+
231
+ # Fast path: read without lock (GIL protects single read)
232
+ if _object_cache is not None:
233
+ return _object_cache
234
+
235
+ # Slow path: needs initialization
236
+ with _config_lock:
237
+ # Double-check: another thread may have initialized while we waited
238
+ if _object_cache is None:
239
+ # Trigger full initialization
240
+ configure() # Will acquire lock again, but that's OK (reentrant in same thread)
241
+
160
242
  return _object_cache
161
243
 
162
244
 
245
+ def flush_object_cache() -> None:
246
+ """
247
+ Flush all objects from the cache to the log.
248
+
249
+ This forces all cached objects to be written, even if they haven't
250
+ been evicted yet. Use this before application shutdown to ensure
251
+ all objects are logged.
252
+
253
+ Thread-safe: Can be called from multiple threads concurrently.
254
+
255
+ Example:
256
+ ```python
257
+ from mycorrhizal.spores import configure, flush_object_cache
258
+ from mycorrhizal.spores.transport import FileTransport
259
+
260
+ configure(transport=FileTransport("logs/ocel.jsonl"))
261
+
262
+ # ... application logic ...
263
+
264
+ flush_object_cache() # Ensure all objects logged
265
+ ```
266
+ """
267
+ cache = get_object_cache()
268
+ config = get_config()
269
+
270
+ if not config.enabled:
271
+ return
272
+
273
+ # Get all objects currently in cache
274
+ all_objects = list(cache._cache.values())
275
+
276
+ # Log each object
277
+ flushed_count = 0
278
+ for entry in all_objects:
279
+ try:
280
+ # Use sync path to ensure it's written
281
+ _send_log_record_sync(LogRecord(object=entry.object))
282
+ flushed_count += 1
283
+ except Exception as e:
284
+ logger.error(f"Failed to flush object {entry.object.id}: {e}")
285
+
286
+ logger.info(f"Flushed {flushed_count} objects from cache")
287
+
288
+
289
+ def get_cache_metrics() -> CacheMetrics:
290
+ """
291
+ Get cache eviction metrics.
292
+
293
+ Returns statistics about cache evictions and failures.
294
+
295
+ Returns:
296
+ CacheMetrics with eviction statistics
297
+
298
+ Example:
299
+ ```python
300
+ from mycorrhizal.spores import get_cache_metrics
301
+
302
+ metrics = get_cache_metrics()
303
+ print(f"Evictions: {metrics.evictions}")
304
+ print(f"Failures: {metrics.eviction_failures}")
305
+ ```
306
+ """
307
+ global _cache_metrics
308
+ return _cache_metrics
309
+
310
+
163
311
  async def _send_log_record(record: LogRecord) -> None:
164
312
  """
165
313
  Send a log record via the configured transport (async).
@@ -1236,6 +1384,13 @@ __all__ = [
1236
1384
  'configure',
1237
1385
  'get_config',
1238
1386
  'get_object_cache',
1387
+ 'flush_object_cache',
1388
+ 'get_cache_metrics',
1389
+
1390
+ # Types
1391
+ 'SporesConfig',
1392
+ 'EvictionPolicy',
1393
+ 'CacheMetrics',
1239
1394
 
1240
1395
  # Spore getters (explicit)
1241
1396
  'get_spore_sync',
@@ -10,7 +10,7 @@ from __future__ import annotations
10
10
 
11
11
  import json
12
12
  from datetime import datetime
13
- from typing import Any, Dict
13
+ from typing import Any, Dict, TYPE_CHECKING
14
14
 
15
15
  from .base import Encoder
16
16
  from ..models import LogRecord, Event, Object, Relationship, EventAttributeValue, ObjectAttributeValue
@@ -20,7 +20,7 @@ class JSONEncoder(Encoder):
20
20
  """
21
21
  JSON encoder for OCEL LogRecords.
22
22
 
23
- Produces OCEL-compatible JSON.
23
+ Produces OCEL-compatible JSON with Unix float64 timestamps.
24
24
 
25
25
  Example output:
26
26
  {
@@ -28,9 +28,9 @@ class JSONEncoder(Encoder):
28
28
  "id": "evt-123",
29
29
  "type": "process_item",
30
30
  "activity": null,
31
- "time": "2025-01-11T12:34:56.789000000Z",
31
+ "time": 1705689296.123456,
32
32
  "attributes": [
33
- {"name": "priority", "value": "high", "type": "string", "time": null}
33
+ {"name": "priority", "value": "high", "type": "string"}
34
34
  ],
35
35
  "relationships": [
36
36
  {"objectId": "obj-456", "qualifier": "input"}
@@ -46,13 +46,21 @@ class JSONEncoder(Encoder):
46
46
  "id": "obj-456",
47
47
  "type": "WorkItem",
48
48
  "attributes": [
49
- {"name": "status", "value": "pending", "type": "string", "time": null}
49
+ {"name": "status", "value": "pending", "type": "string", "time": 1705689296.123456}
50
50
  ],
51
51
  "relationships": []
52
52
  }
53
53
  }
54
54
  """
55
55
 
56
+ def __init__(self):
57
+ """
58
+ Initialize JSONEncoder.
59
+
60
+ Timestamps are always encoded as Unix float64 timestamps.
61
+ """
62
+ pass
63
+
56
64
  def encode(self, record: LogRecord) -> bytes:
57
65
  """
58
66
  Encode a LogRecord to JSON bytes.
@@ -136,33 +144,14 @@ class JSONEncoder(Encoder):
136
144
  "qualifier": qualifier
137
145
  }
138
146
 
139
- def _format_datetime(self, dt: datetime) -> str:
147
+ def _format_datetime(self, dt: datetime) -> float:
140
148
  """
141
- Format datetime to RFC3339Nano.
149
+ Format datetime as Unix float64 timestamp.
142
150
 
143
151
  Args:
144
152
  dt: The datetime to format
145
153
 
146
154
  Returns:
147
- ISO format string with nanosecond precision
155
+ Unix float64 timestamp (seconds since epoch)
148
156
  """
149
- # Python's isoformat() produces RFC3339-compatible output
150
- # For nanosecond precision, we need to ensure it has 9 digits
151
- iso = dt.isoformat()
152
-
153
- # Add microseconds if not present (Python 3.11+ has timespec='nanoseconds')
154
- if '.' not in iso:
155
- iso += '.000000000'
156
-
157
- # Ensure we have 9 digits of fractional seconds
158
- if '.' in iso:
159
- main, frac = iso.split('.')
160
- # Pad or truncate to 9 digits
161
- frac = (frac + '0' * 9)[:9]
162
- iso = f"{main}.{frac}"
163
-
164
- # Ensure timezone is Z (UTC) or offset
165
- if iso.endswith('+00:00'):
166
- iso = iso[:-6] + 'Z'
167
-
168
- return iso
157
+ return dt.timestamp()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mycorrhizal
3
- Version: 0.2.1
3
+ Version: 0.2.2
4
4
  Summary: Utilities and DSLs for modelling and implementing safe, performant, structured systems
5
5
  Author-email: Jeff Ciesielski <jeffciesielski@gmail.com>
6
6
  Requires-Python: >=3.10
@@ -1,5 +1,5 @@
1
1
  mycorrhizal/__init__.py,sha256=ajz1GSNU9xYVrFEDSz6Xwg7amWQ_yvW75tQa1ZvRIWc,3
2
- mycorrhizal/_version.py,sha256=Ui79YHRxRfg2Yk8f7S1RGQcYuTAo3-YJSMGCUb8i3gI,18
2
+ mycorrhizal/_version.py,sha256=YRbFwheRvZAINxDCCNbF89MTOO3B_-zLEY8eP9-nRbI,18
3
3
  mycorrhizal/common/__init__.py,sha256=OwPFyCFW6H6E6EkQIjQfl-h67qfo36geu0ndfXqYEn4,1705
4
4
  mycorrhizal/common/cache.py,sha256=D-L9aVG3eXemXKVmhiRONFB0KWobzRHACknMkHgZ3ug,3477
5
5
  mycorrhizal/common/compilation.py,sha256=f98aE0Dq6ztgm6SjxYbWKGjhOZ2i9SWLT7bc2Yvvwos,8922
@@ -13,7 +13,7 @@ mycorrhizal/hypha/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
13
13
  mycorrhizal/hypha/util.py,sha256=tpvjWXTdGhtVUycQElcQg4usOy-tSEqwE9KEni8R30s,906
14
14
  mycorrhizal/hypha/core/__init__.py,sha256=o6BE3fhRKjVfyIWwtNTO64kPt0udlZGyXr-ZuN_ZzF8,2518
15
15
  mycorrhizal/hypha/core/builder.py,sha256=aZ6xvktGVteDBVFf2Uva7-7vsBbpAP3maBfiL6xiJl0,17205
16
- mycorrhizal/hypha/core/runtime.py,sha256=BTguGT2ku2U70r-5IYVPP3ubrG1QC5_opQ0hbF5162k,40830
16
+ mycorrhizal/hypha/core/runtime.py,sha256=hwaI1fq1Hz6tKL8K6TLt5yVQKHKmaaDWk2h2BPl5L_8,41311
17
17
  mycorrhizal/hypha/core/specs.py,sha256=uWvW9x7-bSoSENIUJkYMIkJ3KxsgE9mB9PyRLPrOEjk,8429
18
18
  mycorrhizal/mycelium/__init__.py,sha256=MFgh6eZVjE1zSwgDqD2GXneZv-7MWtRoky5Qtmydqdw,4231
19
19
  mycorrhizal/mycelium/core.py,sha256=ly3GtHKHHwqoODk3tFWMKoUT3VYPsQHBYyDNSKCmGMY,19583
@@ -27,16 +27,16 @@ mycorrhizal/mycelium/tree_builder.py,sha256=WOxCZgAPmGmFb_t0FghCNgAVdVLEqdbtWN5K
27
27
  mycorrhizal/mycelium/tree_spec.py,sha256=0RyZBOJROxWrYdqtNeawWv1vB5ZgwzGR-lWFN8b2-Ro,6919
28
28
  mycorrhizal/rhizomorph/README.md,sha256=UrCYyaMUtqa9U2ejMyg9om-1QhFm3S6sFS9Cjp_4jUs,9367
29
29
  mycorrhizal/rhizomorph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- mycorrhizal/rhizomorph/core.py,sha256=LThaeieSyQWcv7lxvdIfbWfHCG238uacleurw75hmus,67413
30
+ mycorrhizal/rhizomorph/core.py,sha256=KL2qCLh24jfjeWvNuFYhfefqokrlNNouW7n0VkujErY,79711
31
31
  mycorrhizal/rhizomorph/util.py,sha256=dUgKAWX5RNtzFK01mGkz10tJyeTMCqyyfnZ8Mi97LkY,1280
32
32
  mycorrhizal/septum/TRANSITION_REFERENCE.md,sha256=4mxDeTbeuqIDJCebXlwxT537Tgk2XLpinssam6I6lNw,10987
33
33
  mycorrhizal/septum/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  mycorrhizal/septum/core.py,sha256=vGGvhP0TDidu5MGJgxiioQXsE3t-G9uvQKJeEud_18o,64043
35
35
  mycorrhizal/septum/testing_utils.py,sha256=ONk9hOm7oc8qSgbOcsWaZvlfP_iOUS7uoo3aS7Pfuis,17314
36
36
  mycorrhizal/septum/util.py,sha256=J4D0uBzNkVWDBwrOMhG9x5Y4lkjP8pEDkuc2d4UX7eI,9344
37
- mycorrhizal/spores/__init__.py,sha256=iePLLnGdKVHKrzoy9gQFARcQG0jmDIUavkJwLv9Mx6w,4766
38
- mycorrhizal/spores/cache.py,sha256=0FuWHKudgyC7vlrlcJ_kmRKr7OAJuC2gxRURReMlqF4,6119
39
- mycorrhizal/spores/core.py,sha256=fBklI4pQUd78o8qJ6_MXFmDZN612CDqr3dfS0yAmc_U,45109
37
+ mycorrhizal/spores/__init__.py,sha256=xrp94D3x2pf-qlkK8bcWn6cJkMl4WxetzrbZSB1PtL8,4944
38
+ mycorrhizal/spores/cache.py,sha256=gIHYcawH1JLmEF3iBSW66kt0nS1GuKhoVlfO-WDTYVs,8728
39
+ mycorrhizal/spores/core.py,sha256=4JTN5IIJ2yI_6vURzy2lTtGjRlz0x7A52AOSSr5hvMM,50227
40
40
  mycorrhizal/spores/extraction.py,sha256=pCc_bm-MUx7SYGM9xI_m4cOL3X5VjGIr3FQ66fkUW5k,15063
41
41
  mycorrhizal/spores/models.py,sha256=nwjSSbSvBMuuE0__nDt2loDRJhJWQBu0Y3jRg4p3wfU,9724
42
42
  mycorrhizal/spores/dsl/__init__.py,sha256=DDrLqJ4CjtRsepmzsxkdxu_ugYYpAwaxawau-0t4Bm0,1163
@@ -45,10 +45,10 @@ mycorrhizal/spores/dsl/rhizomorph.py,sha256=JXaZmRyiKVOz2I1uf-96u7Zl9h4YlJCaAtaX
45
45
  mycorrhizal/spores/dsl/septum.py,sha256=bG6HPTAWAAoSbps-2hs0oGsyTOEq1pPCIsgDvmfUI6Q,14206
46
46
  mycorrhizal/spores/encoder/__init__.py,sha256=hpp1W4xwQUG1N1zdrea0coBPtMf-WYSE77Q780TinP8,204
47
47
  mycorrhizal/spores/encoder/base.py,sha256=xFKk-f5_W_HNrOgNXyquUwhcrcz28LmeT5Y5enDVrwU,870
48
- mycorrhizal/spores/encoder/json.py,sha256=OPezVz-5-to-NmZ_QuVGMPSLn_24GbdG6Y3F4_RXx40,5064
48
+ mycorrhizal/spores/encoder/json.py,sha256=Zz5kNGKLOF8qaJ-iiM2m9lQ-G-TU354iKQ0drLVxUG4,4615
49
49
  mycorrhizal/spores/transport/__init__.py,sha256=Tw6glCXsPK2pzsLPbb9vKOCvvT9vh-ql-xUaulKBAw0,407
50
50
  mycorrhizal/spores/transport/base.py,sha256=OsyWx1J8Ig7Y7idUoI9oS-LkONwQAxAP47jL_5OVRjA,1668
51
51
  mycorrhizal/spores/transport/file.py,sha256=xcMPxAGiv5q-GfcNKsF_3a-2-yxlDkhHusZli1Fsqa0,3833
52
- mycorrhizal-0.2.1.dist-info/METADATA,sha256=_6Q5ls7HRBVzc0ewoP8V1ARbO7W6TMkY6nGvlabOX1Q,11607
53
- mycorrhizal-0.2.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
54
- mycorrhizal-0.2.1.dist-info/RECORD,,
52
+ mycorrhizal-0.2.2.dist-info/METADATA,sha256=xDKBhCtW4-zjFunGZSJ-rU3vGGG79GoCNnLrctX1kwg,11607
53
+ mycorrhizal-0.2.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
54
+ mycorrhizal-0.2.2.dist-info/RECORD,,