empathy-framework 3.5.6__py3-none-any.whl → 3.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/compliance_anticipation_agent.py +113 -118
- agents/compliance_db.py +339 -0
- agents/epic_integration_wizard.py +37 -48
- agents/notifications.py +291 -0
- agents/trust_building_behaviors.py +66 -85
- coach_wizards/__init__.py +11 -12
- coach_wizards/accessibility_wizard.py +12 -12
- coach_wizards/api_wizard.py +12 -12
- coach_wizards/base_wizard.py +26 -20
- coach_wizards/cicd_wizard.py +15 -13
- coach_wizards/compliance_wizard.py +12 -12
- coach_wizards/database_wizard.py +12 -12
- coach_wizards/debugging_wizard.py +12 -12
- coach_wizards/documentation_wizard.py +12 -12
- coach_wizards/generate_wizards.py +1 -2
- coach_wizards/localization_wizard.py +21 -14
- coach_wizards/migration_wizard.py +12 -12
- coach_wizards/monitoring_wizard.py +12 -12
- coach_wizards/observability_wizard.py +12 -12
- coach_wizards/performance_wizard.py +12 -12
- coach_wizards/prompt_engineering_wizard.py +22 -25
- coach_wizards/refactoring_wizard.py +12 -12
- coach_wizards/scaling_wizard.py +12 -12
- coach_wizards/security_wizard.py +12 -12
- coach_wizards/testing_wizard.py +12 -12
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/METADATA +234 -30
- empathy_framework-3.7.0.dist-info/RECORD +105 -0
- empathy_healthcare_plugin/__init__.py +1 -2
- empathy_llm_toolkit/__init__.py +5 -6
- empathy_llm_toolkit/claude_memory.py +14 -15
- empathy_llm_toolkit/code_health.py +27 -19
- empathy_llm_toolkit/contextual_patterns.py +11 -12
- empathy_llm_toolkit/core.py +43 -49
- empathy_llm_toolkit/git_pattern_extractor.py +16 -12
- empathy_llm_toolkit/levels.py +6 -13
- empathy_llm_toolkit/pattern_confidence.py +14 -18
- empathy_llm_toolkit/pattern_resolver.py +10 -12
- empathy_llm_toolkit/pattern_summary.py +13 -11
- empathy_llm_toolkit/providers.py +27 -38
- empathy_llm_toolkit/session_status.py +18 -20
- empathy_llm_toolkit/state.py +20 -21
- empathy_os/__init__.py +72 -73
- empathy_os/cli.py +193 -98
- empathy_os/cli_unified.py +68 -41
- empathy_os/config.py +31 -31
- empathy_os/coordination.py +48 -54
- empathy_os/core.py +90 -99
- empathy_os/cost_tracker.py +20 -23
- empathy_os/discovery.py +9 -11
- empathy_os/emergence.py +20 -21
- empathy_os/exceptions.py +18 -30
- empathy_os/feedback_loops.py +27 -30
- empathy_os/levels.py +31 -34
- empathy_os/leverage_points.py +27 -28
- empathy_os/logging_config.py +11 -12
- empathy_os/monitoring.py +27 -27
- empathy_os/pattern_library.py +29 -28
- empathy_os/persistence.py +30 -34
- empathy_os/platform_utils.py +46 -47
- empathy_os/redis_config.py +14 -15
- empathy_os/redis_memory.py +53 -56
- empathy_os/templates.py +12 -11
- empathy_os/trust_building.py +44 -36
- empathy_os/workflow_commands.py +123 -31
- empathy_software_plugin/__init__.py +1 -2
- empathy_software_plugin/cli.py +32 -25
- empathy_software_plugin/plugin.py +4 -8
- empathy_framework-3.5.6.dist-info/RECORD +0 -103
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/top_level.txt +0 -0
empathy_os/coordination.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Multi-Agent Coordination for Distributed Memory Networks
|
|
1
|
+
"""Multi-Agent Coordination for Distributed Memory Networks
|
|
3
2
|
|
|
4
3
|
Provides conflict resolution and coordination primitives for multi-agent
|
|
5
4
|
systems sharing pattern libraries.
|
|
@@ -63,7 +62,7 @@ class TeamPriorities:
|
|
|
63
62
|
"performance": 0.7,
|
|
64
63
|
"style": 0.5,
|
|
65
64
|
"warning": 0.6,
|
|
66
|
-
}
|
|
65
|
+
},
|
|
67
66
|
)
|
|
68
67
|
|
|
69
68
|
# Tag preferences (tags that should be prioritized)
|
|
@@ -71,8 +70,7 @@ class TeamPriorities:
|
|
|
71
70
|
|
|
72
71
|
|
|
73
72
|
class ConflictResolver:
|
|
74
|
-
"""
|
|
75
|
-
Resolves conflicts between patterns from different agents.
|
|
73
|
+
"""Resolves conflicts between patterns from different agents.
|
|
76
74
|
|
|
77
75
|
When multiple agents contribute patterns that address the same issue
|
|
78
76
|
but recommend different approaches, the ConflictResolver determines
|
|
@@ -105,6 +103,7 @@ class ConflictResolver:
|
|
|
105
103
|
... context={"team_priority": "readability", "code_complexity": "high"}
|
|
106
104
|
... )
|
|
107
105
|
>>> print(f"Winner: {resolution.winning_pattern.name}")
|
|
106
|
+
|
|
108
107
|
"""
|
|
109
108
|
|
|
110
109
|
def __init__(
|
|
@@ -112,12 +111,12 @@ class ConflictResolver:
|
|
|
112
111
|
default_strategy: ResolutionStrategy = ResolutionStrategy.WEIGHTED_SCORE,
|
|
113
112
|
team_priorities: TeamPriorities | None = None,
|
|
114
113
|
):
|
|
115
|
-
"""
|
|
116
|
-
Initialize the ConflictResolver.
|
|
114
|
+
"""Initialize the ConflictResolver.
|
|
117
115
|
|
|
118
116
|
Args:
|
|
119
117
|
default_strategy: Strategy to use when not specified
|
|
120
118
|
team_priorities: Team-configured priorities for resolution
|
|
119
|
+
|
|
121
120
|
"""
|
|
122
121
|
self.default_strategy = default_strategy
|
|
123
122
|
self.team_priorities = team_priorities or TeamPriorities()
|
|
@@ -129,8 +128,7 @@ class ConflictResolver:
|
|
|
129
128
|
context: dict[str, Any] | None = None,
|
|
130
129
|
strategy: ResolutionStrategy | None = None,
|
|
131
130
|
) -> ResolutionResult:
|
|
132
|
-
"""
|
|
133
|
-
Resolve conflict between multiple patterns.
|
|
131
|
+
"""Resolve conflict between multiple patterns.
|
|
134
132
|
|
|
135
133
|
Args:
|
|
136
134
|
patterns: List of conflicting patterns (minimum 2)
|
|
@@ -142,6 +140,7 @@ class ConflictResolver:
|
|
|
142
140
|
|
|
143
141
|
Raises:
|
|
144
142
|
ValueError: If fewer than 2 patterns provided
|
|
143
|
+
|
|
145
144
|
"""
|
|
146
145
|
if len(patterns) < 2:
|
|
147
146
|
raise ValueError("Need at least 2 patterns to resolve conflict")
|
|
@@ -185,7 +184,6 @@ class ConflictResolver:
|
|
|
185
184
|
strategy: ResolutionStrategy,
|
|
186
185
|
) -> dict[str, float]:
|
|
187
186
|
"""Calculate score for a pattern based on strategy"""
|
|
188
|
-
|
|
189
187
|
scores: dict[str, float] = {}
|
|
190
188
|
|
|
191
189
|
# Factor 1: Confidence score (0-1)
|
|
@@ -230,7 +228,6 @@ class ConflictResolver:
|
|
|
230
228
|
context: dict[str, Any],
|
|
231
229
|
) -> float:
|
|
232
230
|
"""Calculate how well a pattern matches the current context"""
|
|
233
|
-
|
|
234
231
|
if not context or not pattern.context:
|
|
235
232
|
return 0.5 # Neutral if no context available
|
|
236
233
|
|
|
@@ -261,7 +258,6 @@ class ConflictResolver:
|
|
|
261
258
|
context: dict[str, Any],
|
|
262
259
|
) -> float:
|
|
263
260
|
"""Calculate how well a pattern aligns with team priorities"""
|
|
264
|
-
|
|
265
261
|
score = 0.5 # Start neutral
|
|
266
262
|
|
|
267
263
|
# Check team priority in context
|
|
@@ -307,13 +303,12 @@ class ConflictResolver:
|
|
|
307
303
|
strategy: ResolutionStrategy,
|
|
308
304
|
) -> str:
|
|
309
305
|
"""Generate human-readable reasoning for the resolution"""
|
|
310
|
-
|
|
311
306
|
reasons = []
|
|
312
307
|
|
|
313
308
|
# Strategy-specific reasoning
|
|
314
309
|
if strategy == ResolutionStrategy.HIGHEST_CONFIDENCE:
|
|
315
310
|
reasons.append(
|
|
316
|
-
f"Selected '{winner.name}' with highest confidence ({winner.confidence:.0%})"
|
|
311
|
+
f"Selected '{winner.name}' with highest confidence ({winner.confidence:.0%})",
|
|
317
312
|
)
|
|
318
313
|
elif strategy == ResolutionStrategy.MOST_RECENT:
|
|
319
314
|
age = (datetime.now() - winner.discovered_at).days
|
|
@@ -321,7 +316,7 @@ class ConflictResolver:
|
|
|
321
316
|
elif strategy == ResolutionStrategy.BEST_CONTEXT_MATCH:
|
|
322
317
|
reasons.append(
|
|
323
318
|
f"Selected '{winner.name}' as best match for current context "
|
|
324
|
-
f"(match score: {scores['context_match']:.0%})"
|
|
319
|
+
f"(match score: {scores['context_match']:.0%})",
|
|
325
320
|
)
|
|
326
321
|
elif strategy == ResolutionStrategy.TEAM_PRIORITY:
|
|
327
322
|
team_priority = context.get("team_priority", "balanced")
|
|
@@ -334,7 +329,7 @@ class ConflictResolver:
|
|
|
334
329
|
)[:2]
|
|
335
330
|
factor_desc = ", ".join(f"{k}: {v:.0%}" for k, v in top_factors)
|
|
336
331
|
reasons.append(
|
|
337
|
-
f"Selected '{winner.name}' based on weighted scoring (top factors: {factor_desc})"
|
|
332
|
+
f"Selected '{winner.name}' based on weighted scoring (top factors: {factor_desc})",
|
|
338
333
|
)
|
|
339
334
|
|
|
340
335
|
# Add comparison to losers
|
|
@@ -346,7 +341,6 @@ class ConflictResolver:
|
|
|
346
341
|
|
|
347
342
|
def get_resolution_stats(self) -> dict[str, Any]:
|
|
348
343
|
"""Get statistics about resolution history"""
|
|
349
|
-
|
|
350
344
|
if not self.resolution_history:
|
|
351
345
|
return {
|
|
352
346
|
"total_resolutions": 0,
|
|
@@ -397,8 +391,7 @@ class AgentTask:
|
|
|
397
391
|
|
|
398
392
|
|
|
399
393
|
class AgentCoordinator:
|
|
400
|
-
"""
|
|
401
|
-
Redis-backed coordinator for multi-agent teams.
|
|
394
|
+
"""Redis-backed coordinator for multi-agent teams.
|
|
402
395
|
|
|
403
396
|
Enables real-time coordination between agents using Redis short-term memory:
|
|
404
397
|
- Task distribution and claiming
|
|
@@ -425,6 +418,7 @@ class AgentCoordinator:
|
|
|
425
418
|
>>> if task:
|
|
426
419
|
... # Do work...
|
|
427
420
|
... coordinator.complete_task(task.task_id, {"issues_found": 3})
|
|
421
|
+
|
|
428
422
|
"""
|
|
429
423
|
|
|
430
424
|
def __init__(
|
|
@@ -433,13 +427,13 @@ class AgentCoordinator:
|
|
|
433
427
|
team_id: str,
|
|
434
428
|
conflict_resolver: ConflictResolver | None = None,
|
|
435
429
|
):
|
|
436
|
-
"""
|
|
437
|
-
Initialize the coordinator.
|
|
430
|
+
"""Initialize the coordinator.
|
|
438
431
|
|
|
439
432
|
Args:
|
|
440
433
|
short_term_memory: RedisShortTermMemory instance
|
|
441
434
|
team_id: Unique identifier for this team
|
|
442
435
|
conflict_resolver: Optional ConflictResolver for pattern conflicts
|
|
436
|
+
|
|
443
437
|
"""
|
|
444
438
|
from .redis_memory import AccessTier, AgentCredentials
|
|
445
439
|
|
|
@@ -457,14 +451,14 @@ class AgentCoordinator:
|
|
|
457
451
|
self._active_agents: dict[str, datetime] = {}
|
|
458
452
|
|
|
459
453
|
def add_task(self, task: AgentTask) -> bool:
|
|
460
|
-
"""
|
|
461
|
-
Add a task to the queue for agents to claim.
|
|
454
|
+
"""Add a task to the queue for agents to claim.
|
|
462
455
|
|
|
463
456
|
Args:
|
|
464
457
|
task: The task to add
|
|
465
458
|
|
|
466
459
|
Returns:
|
|
467
460
|
True if added successfully
|
|
461
|
+
|
|
468
462
|
"""
|
|
469
463
|
task_data = {
|
|
470
464
|
"task_id": task.task_id,
|
|
@@ -485,14 +479,14 @@ class AgentCoordinator:
|
|
|
485
479
|
return bool(result)
|
|
486
480
|
|
|
487
481
|
def get_pending_tasks(self, task_type: str | None = None) -> list[AgentTask]:
|
|
488
|
-
"""
|
|
489
|
-
Get all pending tasks, optionally filtered by type.
|
|
482
|
+
"""Get all pending tasks, optionally filtered by type.
|
|
490
483
|
|
|
491
484
|
Args:
|
|
492
485
|
task_type: Filter by task type
|
|
493
486
|
|
|
494
487
|
Returns:
|
|
495
488
|
List of pending AgentTask objects
|
|
489
|
+
|
|
496
490
|
"""
|
|
497
491
|
# In a real implementation, we'd scan Redis keys
|
|
498
492
|
# For now, this is a simplified version
|
|
@@ -516,7 +510,7 @@ class AgentCoordinator:
|
|
|
516
510
|
status=task_data.get("status", "pending"),
|
|
517
511
|
priority=task_data.get("priority", 5),
|
|
518
512
|
context=task_data.get("context", {}),
|
|
519
|
-
)
|
|
513
|
+
),
|
|
520
514
|
)
|
|
521
515
|
|
|
522
516
|
return sorted(tasks, key=lambda t: t.priority, reverse=True)
|
|
@@ -526,8 +520,7 @@ class AgentCoordinator:
|
|
|
526
520
|
agent_id: str,
|
|
527
521
|
task_type: str | None = None,
|
|
528
522
|
) -> AgentTask | None:
|
|
529
|
-
"""
|
|
530
|
-
Claim a pending task for an agent.
|
|
523
|
+
"""Claim a pending task for an agent.
|
|
531
524
|
|
|
532
525
|
Uses atomic operations to prevent race conditions.
|
|
533
526
|
|
|
@@ -537,6 +530,7 @@ class AgentCoordinator:
|
|
|
537
530
|
|
|
538
531
|
Returns:
|
|
539
532
|
The claimed task, or None if no tasks available
|
|
533
|
+
|
|
540
534
|
"""
|
|
541
535
|
pending = self.get_pending_tasks(task_type)
|
|
542
536
|
|
|
@@ -576,8 +570,7 @@ class AgentCoordinator:
|
|
|
576
570
|
result: dict[str, Any],
|
|
577
571
|
agent_id: str | None = None,
|
|
578
572
|
) -> bool:
|
|
579
|
-
"""
|
|
580
|
-
Mark a task as completed with results.
|
|
573
|
+
"""Mark a task as completed with results.
|
|
581
574
|
|
|
582
575
|
Args:
|
|
583
576
|
task_id: Task to complete
|
|
@@ -586,6 +579,7 @@ class AgentCoordinator:
|
|
|
586
579
|
|
|
587
580
|
Returns:
|
|
588
581
|
True if completed successfully
|
|
582
|
+
|
|
589
583
|
"""
|
|
590
584
|
task_key = f"task:{self.team_id}:{task_id}"
|
|
591
585
|
current = self.memory.retrieve(task_key, self._credentials)
|
|
@@ -619,8 +613,7 @@ class AgentCoordinator:
|
|
|
619
613
|
return False
|
|
620
614
|
|
|
621
615
|
def register_agent(self, agent_id: str, capabilities: list[str] | None = None) -> bool:
|
|
622
|
-
"""
|
|
623
|
-
Register an agent with the team.
|
|
616
|
+
"""Register an agent with the team.
|
|
624
617
|
|
|
625
618
|
Args:
|
|
626
619
|
agent_id: Unique agent identifier
|
|
@@ -628,6 +621,7 @@ class AgentCoordinator:
|
|
|
628
621
|
|
|
629
622
|
Returns:
|
|
630
623
|
True if registered successfully
|
|
624
|
+
|
|
631
625
|
"""
|
|
632
626
|
self._active_agents[agent_id] = datetime.now()
|
|
633
627
|
|
|
@@ -644,14 +638,14 @@ class AgentCoordinator:
|
|
|
644
638
|
return bool(result)
|
|
645
639
|
|
|
646
640
|
def heartbeat(self, agent_id: str) -> bool:
|
|
647
|
-
"""
|
|
648
|
-
Send heartbeat to indicate agent is still active.
|
|
641
|
+
"""Send heartbeat to indicate agent is still active.
|
|
649
642
|
|
|
650
643
|
Args:
|
|
651
644
|
agent_id: Agent sending heartbeat
|
|
652
645
|
|
|
653
646
|
Returns:
|
|
654
647
|
True if heartbeat recorded
|
|
648
|
+
|
|
655
649
|
"""
|
|
656
650
|
self._active_agents[agent_id] = datetime.now()
|
|
657
651
|
|
|
@@ -663,14 +657,14 @@ class AgentCoordinator:
|
|
|
663
657
|
return bool(result)
|
|
664
658
|
|
|
665
659
|
def get_active_agents(self, timeout_seconds: int = 300) -> list[str]:
|
|
666
|
-
"""
|
|
667
|
-
Get list of recently active agents.
|
|
660
|
+
"""Get list of recently active agents.
|
|
668
661
|
|
|
669
662
|
Args:
|
|
670
663
|
timeout_seconds: Consider agents inactive after this duration
|
|
671
664
|
|
|
672
665
|
Returns:
|
|
673
666
|
List of active agent IDs
|
|
667
|
+
|
|
674
668
|
"""
|
|
675
669
|
cutoff = datetime.now()
|
|
676
670
|
active = []
|
|
@@ -682,8 +676,7 @@ class AgentCoordinator:
|
|
|
682
676
|
return active
|
|
683
677
|
|
|
684
678
|
def broadcast(self, message_type: str, data: dict[str, Any]) -> bool:
|
|
685
|
-
"""
|
|
686
|
-
Broadcast a message to all agents in the team.
|
|
679
|
+
"""Broadcast a message to all agents in the team.
|
|
687
680
|
|
|
688
681
|
Args:
|
|
689
682
|
message_type: Type of message
|
|
@@ -691,6 +684,7 @@ class AgentCoordinator:
|
|
|
691
684
|
|
|
692
685
|
Returns:
|
|
693
686
|
True if broadcast sent
|
|
687
|
+
|
|
694
688
|
"""
|
|
695
689
|
result = self.memory.send_signal(
|
|
696
690
|
signal_type=message_type,
|
|
@@ -700,14 +694,14 @@ class AgentCoordinator:
|
|
|
700
694
|
return bool(result)
|
|
701
695
|
|
|
702
696
|
def aggregate_results(self, task_type: str | None = None) -> dict[str, Any]:
|
|
703
|
-
"""
|
|
704
|
-
Aggregate results from completed tasks.
|
|
697
|
+
"""Aggregate results from completed tasks.
|
|
705
698
|
|
|
706
699
|
Args:
|
|
707
700
|
task_type: Optional filter by task type
|
|
708
701
|
|
|
709
702
|
Returns:
|
|
710
703
|
Aggregated results summary
|
|
704
|
+
|
|
711
705
|
"""
|
|
712
706
|
# Get completion signals
|
|
713
707
|
completions = self.memory.receive_signals(
|
|
@@ -742,8 +736,7 @@ class AgentCoordinator:
|
|
|
742
736
|
|
|
743
737
|
|
|
744
738
|
class TeamSession:
|
|
745
|
-
"""
|
|
746
|
-
A collaborative session for multiple agents working together.
|
|
739
|
+
"""A collaborative session for multiple agents working together.
|
|
747
740
|
|
|
748
741
|
Example:
|
|
749
742
|
>>> from empathy_os import get_redis_memory, TeamSession
|
|
@@ -763,6 +756,7 @@ class TeamSession:
|
|
|
763
756
|
>>>
|
|
764
757
|
>>> # Get context from session
|
|
765
758
|
>>> scope = session.get("analysis_scope")
|
|
759
|
+
|
|
766
760
|
"""
|
|
767
761
|
|
|
768
762
|
def __init__(
|
|
@@ -771,13 +765,13 @@ class TeamSession:
|
|
|
771
765
|
session_id: str,
|
|
772
766
|
purpose: str = "",
|
|
773
767
|
):
|
|
774
|
-
"""
|
|
775
|
-
Create or join a team session.
|
|
768
|
+
"""Create or join a team session.
|
|
776
769
|
|
|
777
770
|
Args:
|
|
778
771
|
short_term_memory: RedisShortTermMemory instance
|
|
779
772
|
session_id: Unique session identifier
|
|
780
773
|
purpose: Description of what this session is for
|
|
774
|
+
|
|
781
775
|
"""
|
|
782
776
|
from .redis_memory import AccessTier, AgentCredentials
|
|
783
777
|
|
|
@@ -810,8 +804,7 @@ class TeamSession:
|
|
|
810
804
|
return dict(result) if result else None
|
|
811
805
|
|
|
812
806
|
def share(self, key: str, data: Any) -> bool:
|
|
813
|
-
"""
|
|
814
|
-
Share data with all agents in the session.
|
|
807
|
+
"""Share data with all agents in the session.
|
|
815
808
|
|
|
816
809
|
Args:
|
|
817
810
|
key: Unique key for this data
|
|
@@ -819,24 +812,25 @@ class TeamSession:
|
|
|
819
812
|
|
|
820
813
|
Returns:
|
|
821
814
|
True if shared successfully
|
|
815
|
+
|
|
822
816
|
"""
|
|
823
817
|
return bool(
|
|
824
818
|
self.memory.stash(
|
|
825
819
|
f"session:{self.session_id}:{key}",
|
|
826
820
|
data,
|
|
827
821
|
self._credentials,
|
|
828
|
-
)
|
|
822
|
+
),
|
|
829
823
|
)
|
|
830
824
|
|
|
831
825
|
def get(self, key: str) -> Any | None:
|
|
832
|
-
"""
|
|
833
|
-
Get shared data from the session.
|
|
826
|
+
"""Get shared data from the session.
|
|
834
827
|
|
|
835
828
|
Args:
|
|
836
829
|
key: Key of the shared data
|
|
837
830
|
|
|
838
831
|
Returns:
|
|
839
832
|
The data, or None if not found
|
|
833
|
+
|
|
840
834
|
"""
|
|
841
835
|
return self.memory.retrieve(
|
|
842
836
|
f"session:{self.session_id}:{key}",
|
|
@@ -844,8 +838,7 @@ class TeamSession:
|
|
|
844
838
|
)
|
|
845
839
|
|
|
846
840
|
def signal(self, signal_type: str, data: dict[str, Any]) -> bool:
|
|
847
|
-
"""
|
|
848
|
-
Send a signal to session participants.
|
|
841
|
+
"""Send a signal to session participants.
|
|
849
842
|
|
|
850
843
|
Args:
|
|
851
844
|
signal_type: Type of signal
|
|
@@ -853,24 +846,25 @@ class TeamSession:
|
|
|
853
846
|
|
|
854
847
|
Returns:
|
|
855
848
|
True if sent
|
|
849
|
+
|
|
856
850
|
"""
|
|
857
851
|
return bool(
|
|
858
852
|
self.memory.send_signal(
|
|
859
853
|
signal_type=signal_type,
|
|
860
854
|
data={"session_id": self.session_id, **data},
|
|
861
855
|
credentials=self._credentials,
|
|
862
|
-
)
|
|
856
|
+
),
|
|
863
857
|
)
|
|
864
858
|
|
|
865
859
|
def get_signals(self, signal_type: str | None = None) -> list[dict]:
|
|
866
|
-
"""
|
|
867
|
-
Get signals from the session.
|
|
860
|
+
"""Get signals from the session.
|
|
868
861
|
|
|
869
862
|
Args:
|
|
870
863
|
signal_type: Optional filter
|
|
871
864
|
|
|
872
865
|
Returns:
|
|
873
866
|
List of signals
|
|
867
|
+
|
|
874
868
|
"""
|
|
875
869
|
result = self.memory.receive_signals(self._credentials, signal_type=signal_type)
|
|
876
870
|
return list(result) if result else []
|