gobby 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. gobby/adapters/claude_code.py +96 -35
  2. gobby/adapters/gemini.py +140 -38
  3. gobby/agents/isolation.py +130 -0
  4. gobby/agents/registry.py +11 -0
  5. gobby/agents/session.py +1 -0
  6. gobby/agents/spawn_executor.py +43 -13
  7. gobby/agents/spawners/macos.py +26 -1
  8. gobby/cli/__init__.py +0 -2
  9. gobby/cli/memory.py +185 -0
  10. gobby/clones/git.py +177 -0
  11. gobby/config/skills.py +31 -0
  12. gobby/hooks/event_handlers.py +109 -10
  13. gobby/hooks/hook_manager.py +19 -1
  14. gobby/install/gemini/hooks/hook_dispatcher.py +74 -15
  15. gobby/mcp_proxy/instructions.py +2 -2
  16. gobby/mcp_proxy/registries.py +21 -4
  17. gobby/mcp_proxy/tools/agent_messaging.py +93 -44
  18. gobby/mcp_proxy/tools/agents.py +45 -9
  19. gobby/mcp_proxy/tools/artifacts.py +43 -9
  20. gobby/mcp_proxy/tools/sessions/_commits.py +31 -24
  21. gobby/mcp_proxy/tools/sessions/_crud.py +5 -5
  22. gobby/mcp_proxy/tools/sessions/_handoff.py +45 -41
  23. gobby/mcp_proxy/tools/sessions/_messages.py +35 -7
  24. gobby/mcp_proxy/tools/spawn_agent.py +44 -6
  25. gobby/mcp_proxy/tools/tasks/_context.py +18 -0
  26. gobby/mcp_proxy/tools/tasks/_crud.py +13 -6
  27. gobby/mcp_proxy/tools/tasks/_lifecycle.py +29 -14
  28. gobby/mcp_proxy/tools/tasks/_session.py +22 -7
  29. gobby/mcp_proxy/tools/workflows.py +84 -34
  30. gobby/mcp_proxy/tools/worktrees.py +32 -7
  31. gobby/memory/extractor.py +15 -1
  32. gobby/runner.py +13 -0
  33. gobby/servers/routes/mcp/hooks.py +50 -3
  34. gobby/servers/websocket.py +57 -1
  35. gobby/sessions/analyzer.py +2 -2
  36. gobby/sessions/manager.py +9 -0
  37. gobby/sessions/transcripts/gemini.py +100 -34
  38. gobby/storage/database.py +9 -2
  39. gobby/storage/memories.py +32 -21
  40. gobby/storage/migrations.py +23 -4
  41. gobby/storage/sessions.py +4 -2
  42. gobby/storage/skills.py +43 -3
  43. gobby/workflows/detection_helpers.py +38 -24
  44. gobby/workflows/enforcement/blocking.py +13 -1
  45. gobby/workflows/engine.py +93 -0
  46. gobby/workflows/evaluator.py +110 -0
  47. gobby/workflows/hooks.py +41 -0
  48. gobby/workflows/memory_actions.py +11 -0
  49. gobby/workflows/safe_evaluator.py +8 -0
  50. gobby/workflows/summary_actions.py +123 -50
  51. {gobby-0.2.7.dist-info → gobby-0.2.8.dist-info}/METADATA +1 -1
  52. {gobby-0.2.7.dist-info → gobby-0.2.8.dist-info}/RECORD +56 -80
  53. gobby/cli/tui.py +0 -34
  54. gobby/tui/__init__.py +0 -5
  55. gobby/tui/api_client.py +0 -278
  56. gobby/tui/app.py +0 -329
  57. gobby/tui/screens/__init__.py +0 -25
  58. gobby/tui/screens/agents.py +0 -333
  59. gobby/tui/screens/chat.py +0 -450
  60. gobby/tui/screens/dashboard.py +0 -377
  61. gobby/tui/screens/memory.py +0 -305
  62. gobby/tui/screens/metrics.py +0 -231
  63. gobby/tui/screens/orchestrator.py +0 -903
  64. gobby/tui/screens/sessions.py +0 -412
  65. gobby/tui/screens/tasks.py +0 -440
  66. gobby/tui/screens/workflows.py +0 -289
  67. gobby/tui/screens/worktrees.py +0 -174
  68. gobby/tui/widgets/__init__.py +0 -21
  69. gobby/tui/widgets/chat.py +0 -210
  70. gobby/tui/widgets/conductor.py +0 -104
  71. gobby/tui/widgets/menu.py +0 -132
  72. gobby/tui/widgets/message_panel.py +0 -160
  73. gobby/tui/widgets/review_gate.py +0 -224
  74. gobby/tui/widgets/task_tree.py +0 -99
  75. gobby/tui/widgets/token_budget.py +0 -166
  76. gobby/tui/ws_client.py +0 -258
  77. {gobby-0.2.7.dist-info → gobby-0.2.8.dist-info}/WHEEL +0 -0
  78. {gobby-0.2.7.dist-info → gobby-0.2.8.dist-info}/entry_points.txt +0 -0
  79. {gobby-0.2.7.dist-info → gobby-0.2.8.dist-info}/licenses/LICENSE.md +0 -0
  80. {gobby-0.2.7.dist-info → gobby-0.2.8.dist-info}/top_level.txt +0 -0
gobby/storage/skills.py CHANGED
@@ -52,6 +52,11 @@ class Skill:
52
52
  - source_type: 'local', 'github', 'url', 'zip', 'filesystem'
53
53
  - source_ref: Git ref for updates (branch/tag/commit)
54
54
 
55
+ Hub Tracking:
56
+ - hub_name: Name of the hub the skill originated from
57
+ - hub_slug: Slug of the hub the skill originated from
58
+ - hub_version: Version of the skill as reported by the hub
59
+
55
60
  Gobby-specific:
56
61
  - enabled: Toggle skill on/off without removing
57
62
  - project_id: NULL for global, else project-scoped
@@ -79,6 +84,11 @@ class Skill:
79
84
  source_type: SkillSourceType | None = None
80
85
  source_ref: str | None = None
81
86
 
87
+ # Hub Tracking
88
+ hub_name: str | None = None
89
+ hub_slug: str | None = None
90
+ hub_version: str | None = None
91
+
82
92
  # Gobby-specific
83
93
  enabled: bool = True
84
94
  project_id: str | None = None
@@ -117,6 +127,9 @@ class Skill:
117
127
  source_path=row["source_path"],
118
128
  source_type=row["source_type"],
119
129
  source_ref=row["source_ref"],
130
+ hub_name=row["hub_name"] if "hub_name" in row.keys() else None,
131
+ hub_slug=row["hub_slug"] if "hub_slug" in row.keys() else None,
132
+ hub_version=row["hub_version"] if "hub_version" in row.keys() else None,
120
133
  enabled=bool(row["enabled"]),
121
134
  project_id=row["project_id"],
122
135
  created_at=row["created_at"],
@@ -142,6 +155,9 @@ class Skill:
142
155
  "source_path": self.source_path,
143
156
  "source_type": self.source_type,
144
157
  "source_ref": self.source_ref,
158
+ "hub_name": self.hub_name,
159
+ "hub_slug": self.hub_slug,
160
+ "hub_version": self.hub_version,
145
161
  "enabled": self.enabled,
146
162
  "project_id": self.project_id,
147
163
  "created_at": self.created_at,
@@ -387,6 +403,9 @@ class LocalSkillManager:
387
403
  source_path: str | None = None,
388
404
  source_type: SkillSourceType | None = None,
389
405
  source_ref: str | None = None,
406
+ hub_name: str | None = None,
407
+ hub_slug: str | None = None,
408
+ hub_version: str | None = None,
390
409
  enabled: bool = True,
391
410
  project_id: str | None = None,
392
411
  ) -> Skill:
@@ -404,6 +423,9 @@ class LocalSkillManager:
404
423
  source_path: Original file path or URL
405
424
  source_type: Source type ('local', 'github', 'url', 'zip', 'filesystem')
406
425
  source_ref: Git ref for updates
426
+ hub_name: Optional hub name
427
+ hub_slug: Optional hub slug
428
+ hub_version: Optional hub version
407
429
  enabled: Whether skill is active
408
430
  project_id: Project scope (None for global)
409
431
 
@@ -434,9 +456,9 @@ class LocalSkillManager:
434
456
  INSERT INTO skills (
435
457
  id, name, description, content, version, license,
436
458
  compatibility, allowed_tools, metadata, source_path,
437
- source_type, source_ref, enabled, project_id,
438
- created_at, updated_at
439
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
459
+ source_type, source_ref, hub_name, hub_slug, hub_version,
460
+ enabled, project_id, created_at, updated_at
461
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
440
462
  """,
441
463
  (
442
464
  skill_id,
@@ -451,6 +473,9 @@ class LocalSkillManager:
451
473
  source_path,
452
474
  source_type,
453
475
  source_ref,
476
+ hub_name,
477
+ hub_slug,
478
+ hub_version,
454
479
  enabled,
455
480
  project_id,
456
481
  now,
@@ -530,6 +555,9 @@ class LocalSkillManager:
530
555
  source_path: str | None = _UNSET,
531
556
  source_type: SkillSourceType | None = _UNSET,
532
557
  source_ref: str | None = _UNSET,
558
+ hub_name: str | None = _UNSET,
559
+ hub_slug: str | None = _UNSET,
560
+ hub_version: str | None = _UNSET,
533
561
  enabled: bool | None = None,
534
562
  ) -> Skill:
535
563
  """Update an existing skill.
@@ -547,6 +575,9 @@ class LocalSkillManager:
547
575
  source_path: New source path (use _UNSET to leave unchanged, None to clear)
548
576
  source_type: New source type (use _UNSET to leave unchanged, None to clear)
549
577
  source_ref: New source ref (use _UNSET to leave unchanged, None to clear)
578
+ hub_name: New hub name (use _UNSET to leave unchanged, None to clear)
579
+ hub_slug: New hub slug (use _UNSET to leave unchanged, None to clear)
580
+ hub_version: New hub version (use _UNSET to leave unchanged, None to clear)
550
581
  enabled: New enabled state (optional)
551
582
 
552
583
  Returns:
@@ -591,6 +622,15 @@ class LocalSkillManager:
591
622
  if source_ref is not _UNSET:
592
623
  updates.append("source_ref = ?")
593
624
  params.append(source_ref)
625
+ if hub_name is not _UNSET:
626
+ updates.append("hub_name = ?")
627
+ params.append(hub_name)
628
+ if hub_slug is not _UNSET:
629
+ updates.append("hub_slug = ?")
630
+ params.append(hub_slug)
631
+ if hub_version is not _UNSET:
632
+ updates.append("hub_version = ?")
633
+ params.append(hub_version)
594
634
  if enabled is not None:
595
635
  updates.append("enabled = ?")
596
636
  params.append(enabled)
@@ -7,7 +7,7 @@ and update workflow state variables accordingly.
7
7
  """
8
8
 
9
9
  import logging
10
- from typing import TYPE_CHECKING
10
+ from typing import TYPE_CHECKING, Any
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from gobby.hooks.events import HookEvent
@@ -44,30 +44,24 @@ def detect_task_claim(
44
44
  if not event.data:
45
45
  return
46
46
 
47
- tool_name = event.data.get("tool_name", "")
48
47
  tool_input = event.data.get("tool_input", {}) or {}
49
- # Claude Code sends "tool_result", but we also check "tool_output" for compatibility
50
- tool_output = event.data.get("tool_result") or event.data.get("tool_output") or {}
51
-
52
- # Check if this is a gobby-tasks call via MCP proxy
53
- # Tool name could be "call_tool" (from legacy) or "mcp__gobby__call_tool" (direct)
54
- if tool_name not in ("call_tool", "mcp__gobby__call_tool"):
55
- return
48
+ # Use normalized tool_output (adapters normalize tool_result/tool_response)
49
+ tool_output = event.data.get("tool_output") or {}
56
50
 
57
- # Check server is gobby-tasks
58
- server_name = tool_input.get("server_name", "")
51
+ # Use normalized MCP fields from adapter layer
52
+ # Adapters extract these from CLI-specific formats
53
+ server_name = event.data.get("mcp_server", "")
59
54
  if server_name != "gobby-tasks":
60
55
  return
61
56
 
62
- # Check inner tool name
63
- inner_tool_name = tool_input.get("tool_name", "")
57
+ inner_tool_name = event.data.get("mcp_tool", "")
64
58
 
65
59
  # Handle close_task - clears task_claimed when task is closed
66
60
  # Note: Claude Code doesn't include tool_result in post-tool-use hooks, so for CC
67
61
  # the workflow state is updated directly in the MCP proxy's close_task function.
68
62
  # This detection provides a fallback for CLIs that do report tool results (Gemini/Codex).
69
63
  if inner_tool_name == "close_task":
70
- tool_output = event.data.get("tool_result") or event.data.get("tool_output") or {}
64
+ # tool_output already normalized at top of function
71
65
 
72
66
  # If no tool output, skip - can't verify success
73
67
  # The MCP proxy's close_task handles state clearing for successful closes
@@ -254,6 +248,11 @@ def detect_mcp_call(event: "HookEvent", state: "WorkflowState") -> None:
254
248
  This enables workflow conditions like:
255
249
  when: "mcp_called('gobby-memory', 'recall')"
256
250
 
251
+ Uses normalized fields from adapters:
252
+ - mcp_server: The MCP server name (normalized from both Claude and Gemini formats)
253
+ - mcp_tool: The tool name on the server (normalized from both formats)
254
+ - tool_output: The tool result (normalized from tool_result/tool_response)
255
+
257
256
  Args:
258
257
  event: The AFTER_TOOL hook event
259
258
  state: Current workflow state (modified in place)
@@ -261,21 +260,36 @@ def detect_mcp_call(event: "HookEvent", state: "WorkflowState") -> None:
261
260
  if not event.data:
262
261
  return
263
262
 
264
- tool_name = event.data.get("tool_name", "")
265
- tool_input = event.data.get("tool_input", {}) or {}
266
- # Claude Code sends "tool_result", but we also check "tool_output" for compatibility
267
- tool_output = event.data.get("tool_result") or event.data.get("tool_output") or {}
263
+ # Use normalized fields from adapter layer
264
+ # Adapters extract these from CLI-specific formats:
265
+ # - Claude: tool_input.server_name/tool_name mcp_server/mcp_tool
266
+ # - Gemini: mcp_context.server_name/tool_name mcp_server/mcp_tool
267
+ server_name = event.data.get("mcp_server", "")
268
+ inner_tool = event.data.get("mcp_tool", "")
268
269
 
269
- # Check for MCP proxy call
270
- if tool_name not in ("call_tool", "mcp__gobby__call_tool"):
270
+ if not server_name or not inner_tool:
271
271
  return
272
272
 
273
- server_name = tool_input.get("server_name", "")
274
- inner_tool = tool_input.get("tool_name", "")
273
+ # Use normalized tool_output (adapters normalize tool_result/tool_response)
274
+ tool_output = event.data.get("tool_output") or {}
275
275
 
276
- if not server_name or not inner_tool:
277
- return
276
+ _track_mcp_call(state, server_name, inner_tool, tool_output)
277
+
278
+
279
+ def _track_mcp_call(
280
+ state: "WorkflowState",
281
+ server_name: str,
282
+ inner_tool: str,
283
+ tool_output: dict[str, Any] | Any,
284
+ ) -> None:
285
+ """Track a successful MCP call in workflow state.
278
286
 
287
+ Args:
288
+ state: Current workflow state (modified in place)
289
+ server_name: MCP server name (e.g., "gobby-sessions")
290
+ inner_tool: Tool name on the server (e.g., "get_current_session")
291
+ tool_output: Tool output to check for errors
292
+ """
279
293
  # Check if call succeeded (skip tracking failed calls)
280
294
  if isinstance(tool_output, dict):
281
295
  if tool_output.get("error") or tool_output.get("status") == "error":
@@ -5,6 +5,7 @@ Provides configurable tool blocking based on workflow state and conditions.
5
5
 
6
6
  from __future__ import annotations
7
7
 
8
+ import json
8
9
  import logging
9
10
  from collections.abc import Callable
10
11
  from typing import TYPE_CHECKING, Any
@@ -241,7 +242,18 @@ async def block_tools(
241
242
  if mcp_key in mcp_tools:
242
243
  rule_matches = True
243
244
  # For MCP tools, the actual arguments are in tool_input.arguments
244
- mcp_tool_args = tool_input.get("arguments", {}) or {}
245
+ # Arguments may be a JSON string (Claude Code serialization) or dict
246
+ raw_args = tool_input.get("arguments")
247
+ if isinstance(raw_args, str):
248
+ try:
249
+ parsed = json.loads(raw_args)
250
+ mcp_tool_args = parsed if isinstance(parsed, dict) else {}
251
+ except (json.JSONDecodeError, TypeError):
252
+ mcp_tool_args = {}
253
+ elif isinstance(raw_args, dict):
254
+ mcp_tool_args = raw_args
255
+ else:
256
+ mcp_tool_args = {}
245
257
 
246
258
  if not rule_matches:
247
259
  continue
gobby/workflows/engine.py CHANGED
@@ -494,3 +494,96 @@ class WorkflowEngine:
494
494
  def _detect_mcp_call(self, event: HookEvent, state: WorkflowState) -> None:
495
495
  """Track MCP tool calls by server/tool for workflow conditions."""
496
496
  detect_mcp_call(event, state)
497
+
498
+ def activate_workflow(
499
+ self,
500
+ workflow_name: str,
501
+ session_id: str,
502
+ project_path: Path | None = None,
503
+ variables: dict[str, Any] | None = None,
504
+ ) -> dict[str, Any]:
505
+ """
506
+ Activate a step-based workflow for a session.
507
+
508
+ This is used internally during session startup for terminal-mode agents
509
+ that have a workflow_name set. It creates the initial workflow state.
510
+
511
+ Args:
512
+ workflow_name: Name of the workflow to activate
513
+ session_id: Session ID to activate for
514
+ project_path: Optional project path for workflow discovery
515
+ variables: Optional initial variables to merge with workflow defaults
516
+
517
+ Returns:
518
+ Dict with success status and workflow info
519
+ """
520
+ # Load workflow
521
+ definition = self.loader.load_workflow(workflow_name, project_path)
522
+ if not definition:
523
+ logger.warning(f"Workflow '{workflow_name}' not found for auto-activation")
524
+ return {"success": False, "error": f"Workflow '{workflow_name}' not found"}
525
+
526
+ if definition.type == "lifecycle":
527
+ logger.debug(f"Skipping auto-activation of lifecycle workflow '{workflow_name}'")
528
+ return {
529
+ "success": False,
530
+ "error": f"Workflow '{workflow_name}' is lifecycle type (auto-runs on events)",
531
+ }
532
+
533
+ # Check for existing step workflow
534
+ existing = self.state_manager.get_state(session_id)
535
+ if existing and existing.workflow_name != "__lifecycle__":
536
+ # Check if existing is lifecycle type
537
+ existing_def = self.loader.load_workflow(existing.workflow_name, project_path)
538
+ if not existing_def or existing_def.type != "lifecycle":
539
+ logger.warning(
540
+ f"Session {session_id} already has workflow '{existing.workflow_name}' active"
541
+ )
542
+ return {
543
+ "success": False,
544
+ "error": f"Session already has workflow '{existing.workflow_name}' active",
545
+ }
546
+
547
+ # Determine initial step - fail fast if no steps defined
548
+ if not definition.steps:
549
+ logger.error(f"Workflow '{workflow_name}' has no steps defined")
550
+ return {
551
+ "success": False,
552
+ "error": f"Workflow '{workflow_name}' has no steps defined",
553
+ }
554
+ step = definition.steps[0].name
555
+
556
+ # Merge workflow default variables with passed-in variables
557
+ merged_variables = dict(definition.variables)
558
+ if variables:
559
+ merged_variables.update(variables)
560
+
561
+ # Create state
562
+ state = WorkflowState(
563
+ session_id=session_id,
564
+ workflow_name=workflow_name,
565
+ step=step,
566
+ step_entered_at=datetime.now(UTC),
567
+ step_action_count=0,
568
+ total_action_count=0,
569
+ artifacts={},
570
+ observations=[],
571
+ reflection_pending=False,
572
+ context_injected=False,
573
+ variables=merged_variables,
574
+ task_list=None,
575
+ current_task_index=0,
576
+ files_modified_this_task=0,
577
+ )
578
+
579
+ self.state_manager.save_state(state)
580
+ logger.info(f"Auto-activated workflow '{workflow_name}' for session {session_id}")
581
+
582
+ return {
583
+ "success": True,
584
+ "session_id": session_id,
585
+ "workflow": workflow_name,
586
+ "step": step,
587
+ "steps": [s.name for s in definition.steps],
588
+ "variables": merged_variables,
589
+ }
@@ -348,6 +348,116 @@ class ConditionEvaluator:
348
348
 
349
349
  allowed_globals["mcp_called"] = _mcp_called
350
350
 
351
+ def _mcp_result_is_null(server: str, tool: str) -> bool:
352
+ """Check if MCP tool result is null/missing.
353
+
354
+ Used in workflow conditions like:
355
+ when: "mcp_result_is_null('gobby-tasks', 'suggest_next_task')"
356
+
357
+ Args:
358
+ server: MCP server name
359
+ tool: Tool name
360
+
361
+ Returns:
362
+ True if the result is null/missing, False if result exists.
363
+ """
364
+ variables = context.get("variables", {})
365
+ if isinstance(variables, dict):
366
+ mcp_results = variables.get("mcp_results", {})
367
+ else:
368
+ mcp_results = getattr(variables, "mcp_results", {})
369
+
370
+ if not isinstance(mcp_results, dict):
371
+ return True # No results means null
372
+
373
+ server_results = mcp_results.get(server, {})
374
+ if not isinstance(server_results, dict):
375
+ return True
376
+
377
+ result = server_results.get(tool)
378
+ return result is None
379
+
380
+ allowed_globals["mcp_result_is_null"] = _mcp_result_is_null
381
+
382
+ def _mcp_failed(server: str, tool: str) -> bool:
383
+ """Check if MCP tool call failed.
384
+
385
+ Used in workflow conditions like:
386
+ when: "mcp_failed('gobby-agents', 'spawn_agent')"
387
+
388
+ Args:
389
+ server: MCP server name
390
+ tool: Tool name
391
+
392
+ Returns:
393
+ True if the result exists and indicates failure.
394
+ """
395
+ variables = context.get("variables", {})
396
+ if isinstance(variables, dict):
397
+ mcp_results = variables.get("mcp_results", {})
398
+ else:
399
+ mcp_results = getattr(variables, "mcp_results", {})
400
+
401
+ if not isinstance(mcp_results, dict):
402
+ return False # No results means we can't determine failure
403
+
404
+ server_results = mcp_results.get(server, {})
405
+ if not isinstance(server_results, dict):
406
+ return False
407
+
408
+ result = server_results.get(tool)
409
+ if result is None:
410
+ return False
411
+
412
+ # Check for failure indicators
413
+ if isinstance(result, dict):
414
+ if result.get("success") is False:
415
+ return True
416
+ if result.get("error"):
417
+ return True
418
+ if result.get("status") == "failed":
419
+ return True
420
+ return False
421
+
422
+ allowed_globals["mcp_failed"] = _mcp_failed
423
+
424
+ def _mcp_result_has(server: str, tool: str, field: str, value: Any) -> bool:
425
+ """Check if MCP tool result has a specific field value.
426
+
427
+ Used in workflow conditions like:
428
+ when: "mcp_result_has('gobby-tasks', 'wait_for_task', 'timed_out', True)"
429
+
430
+ Args:
431
+ server: MCP server name
432
+ tool: Tool name
433
+ field: Field name to check
434
+ value: Expected value (supports bool, str, int, float)
435
+
436
+ Returns:
437
+ True if the field equals the expected value.
438
+ """
439
+ variables = context.get("variables", {})
440
+ if isinstance(variables, dict):
441
+ mcp_results = variables.get("mcp_results", {})
442
+ else:
443
+ mcp_results = getattr(variables, "mcp_results", {})
444
+
445
+ if not isinstance(mcp_results, dict):
446
+ return False
447
+
448
+ server_results = mcp_results.get(server, {})
449
+ if not isinstance(server_results, dict):
450
+ return False
451
+
452
+ result = server_results.get(tool)
453
+ if not isinstance(result, dict):
454
+ return False
455
+
456
+ actual_value = result.get(field)
457
+ return bool(actual_value == value)
458
+
459
+ allowed_globals["mcp_result_has"] = _mcp_result_has
460
+
351
461
  # eval used with restricted allowed_globals for workflow conditions
352
462
  # nosec B307: eval is intentional here for DSL evaluation with
353
463
  # restricted globals (__builtins__={}) and controlled workflow conditions
gobby/workflows/hooks.py CHANGED
@@ -167,3 +167,44 @@ class WorkflowHookHandler:
167
167
  except Exception as e:
168
168
  logger.error(f"Error handling lifecycle workflow: {e}", exc_info=True)
169
169
  return HookResponse(decision="allow")
170
+
171
+ def activate_workflow(
172
+ self,
173
+ workflow_name: str,
174
+ session_id: str,
175
+ project_path: str | None = None,
176
+ variables: dict[str, Any] | None = None,
177
+ ) -> dict[str, Any]:
178
+ """
179
+ Activate a step-based workflow for a session.
180
+
181
+ This is used during session startup for terminal-mode agents that have
182
+ a workflow_name set. It's a synchronous wrapper around the engine's
183
+ activate_workflow method.
184
+
185
+ Args:
186
+ workflow_name: Name of the workflow to activate
187
+ session_id: Session ID to activate for
188
+ project_path: Optional project path for workflow discovery
189
+ variables: Optional initial variables to merge with workflow defaults
190
+
191
+ Returns:
192
+ Dict with success status and workflow info
193
+ """
194
+ if not self._enabled:
195
+ return {"success": False, "error": "Workflow engine is disabled"}
196
+
197
+ from pathlib import Path
198
+
199
+ path = Path(project_path) if project_path else None
200
+
201
+ try:
202
+ return self.engine.activate_workflow(
203
+ workflow_name=workflow_name,
204
+ session_id=session_id,
205
+ project_path=path,
206
+ variables=variables,
207
+ )
208
+ except Exception as e:
209
+ logger.error(f"Error activating workflow: {e}", exc_info=True)
210
+ return {"success": False, "error": str(e)}
@@ -205,6 +205,17 @@ async def memory_recall_relevant(
205
205
  # Filter out memories that have already been injected in this session
206
206
  new_memories = [m for m in memories if m.id not in injected_ids]
207
207
 
208
+ # Deduplicate by content to avoid showing same content with different IDs
209
+ # (can happen when same content was stored with different project_ids)
210
+ seen_content: set[str] = set()
211
+ unique_memories = []
212
+ for m in new_memories:
213
+ normalized = m.content.strip()
214
+ if normalized not in seen_content:
215
+ seen_content.add(normalized)
216
+ unique_memories.append(m)
217
+ new_memories = unique_memories
218
+
208
219
  if not new_memories:
209
220
  logger.debug(
210
221
  f"memory_recall_relevant: All {len(memories)} memories already injected, skipping"
@@ -178,6 +178,14 @@ class SafeExpressionEvaluator(ast.NodeVisitor):
178
178
  except (KeyError, IndexError, TypeError) as e:
179
179
  raise ValueError(f"Subscript access failed: {e}") from e
180
180
 
181
+ def visit_List(self, node: ast.List) -> list[Any]:
182
+ """Handle list literals (e.g., ['a', 'b', 'c'])."""
183
+ return [self.visit(elt) for elt in node.elts]
184
+
185
+ def visit_Tuple(self, node: ast.Tuple) -> tuple[Any, ...]:
186
+ """Handle tuple literals (e.g., ('a', 'b', 'c'))."""
187
+ return tuple(self.visit(elt) for elt in node.elts)
188
+
181
189
  def generic_visit(self, node: ast.AST) -> Any:
182
190
  """Reject any unsupported AST nodes."""
183
191
  raise ValueError(f"Unsupported expression type: {type(node).__name__}")