flock-core 0.5.11__py3-none-any.whl → 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (91) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/{api_models.py → api/models.py} +0 -2
  11. flock/{service.py → api/service.py} +3 -3
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/graph_builder.py +7 -7
  31. flock/dashboard/routes/__init__.py +21 -0
  32. flock/dashboard/routes/control.py +327 -0
  33. flock/dashboard/routes/helpers.py +340 -0
  34. flock/dashboard/routes/themes.py +76 -0
  35. flock/dashboard/routes/traces.py +521 -0
  36. flock/dashboard/routes/websocket.py +108 -0
  37. flock/dashboard/service.py +43 -1316
  38. flock/engines/dspy/__init__.py +20 -0
  39. flock/engines/dspy/artifact_materializer.py +216 -0
  40. flock/engines/dspy/signature_builder.py +474 -0
  41. flock/engines/dspy/streaming_executor.py +858 -0
  42. flock/engines/dspy_engine.py +45 -1330
  43. flock/engines/examples/simple_batch_engine.py +2 -2
  44. flock/examples.py +7 -7
  45. flock/logging/logging.py +1 -16
  46. flock/models/__init__.py +10 -0
  47. flock/orchestrator/__init__.py +45 -0
  48. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  49. flock/orchestrator/artifact_manager.py +168 -0
  50. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  51. flock/orchestrator/component_runner.py +389 -0
  52. flock/orchestrator/context_builder.py +167 -0
  53. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  54. flock/orchestrator/event_emitter.py +167 -0
  55. flock/orchestrator/initialization.py +184 -0
  56. flock/orchestrator/lifecycle_manager.py +226 -0
  57. flock/orchestrator/mcp_manager.py +202 -0
  58. flock/orchestrator/scheduler.py +189 -0
  59. flock/orchestrator/server_manager.py +234 -0
  60. flock/orchestrator/tracing.py +147 -0
  61. flock/storage/__init__.py +10 -0
  62. flock/storage/artifact_aggregator.py +158 -0
  63. flock/storage/in_memory/__init__.py +6 -0
  64. flock/storage/in_memory/artifact_filter.py +114 -0
  65. flock/storage/in_memory/history_aggregator.py +115 -0
  66. flock/storage/sqlite/__init__.py +10 -0
  67. flock/storage/sqlite/agent_history_queries.py +154 -0
  68. flock/storage/sqlite/consumption_loader.py +100 -0
  69. flock/storage/sqlite/query_builder.py +112 -0
  70. flock/storage/sqlite/query_params_builder.py +91 -0
  71. flock/storage/sqlite/schema_manager.py +168 -0
  72. flock/storage/sqlite/summary_queries.py +194 -0
  73. flock/utils/__init__.py +14 -0
  74. flock/utils/async_utils.py +67 -0
  75. flock/{runtime.py → utils/runtime.py} +3 -3
  76. flock/utils/time_utils.py +53 -0
  77. flock/utils/type_resolution.py +38 -0
  78. flock/{utilities.py → utils/utilities.py} +2 -2
  79. flock/utils/validation.py +57 -0
  80. flock/utils/visibility.py +79 -0
  81. flock/utils/visibility_utils.py +134 -0
  82. {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/METADATA +18 -4
  83. {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -33
  84. flock/agent.py +0 -1578
  85. flock/orchestrator.py +0 -1983
  86. /flock/{visibility.py → core/visibility.py} +0 -0
  87. /flock/{system_artifacts.py → models/system_artifacts.py} +0 -0
  88. /flock/{helper → utils}/cli_helper.py +0 -0
  89. {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
  90. {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
  91. {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
@@ -2,23 +2,21 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- import asyncio
6
5
  import json
7
6
  import os
8
- from collections import OrderedDict, defaultdict
9
7
  from collections.abc import Iterable, Mapping, Sequence
10
- from contextlib import nullcontext
11
- from datetime import UTC
12
8
  from typing import Any, Literal
13
9
 
14
10
  from pydantic import BaseModel, Field
15
11
 
16
- from flock.artifacts import Artifact
17
- from flock.components import EngineComponent
18
- from flock.dashboard.events import StreamingOutputEvent
12
+ from flock.components.agent import EngineComponent
13
+ from flock.core.artifacts import Artifact
14
+ from flock.engines.dspy.artifact_materializer import DSPyArtifactMaterializer
15
+ from flock.engines.dspy.signature_builder import DSPySignatureBuilder
16
+ from flock.engines.dspy.streaming_executor import DSPyStreamingExecutor
19
17
  from flock.logging.logging import get_logger
20
18
  from flock.registry import type_registry
21
- from flock.runtime import EvalInputs, EvalResult
19
+ from flock.utils.runtime import EvalInputs, EvalResult
22
20
 
23
21
 
24
22
  logger = get_logger(__name__)
@@ -157,6 +155,19 @@ class DSPyEngine(EngineComponent):
157
155
  description="Enable caching of DSPy program results",
158
156
  )
159
157
 
158
+ def model_post_init(self, __context: Any) -> None:
159
+ """Initialize helper instances after Pydantic model initialization."""
160
+ super().model_post_init(__context)
161
+ # Initialize delegated helper classes
162
+ self._signature_builder = DSPySignatureBuilder()
163
+ self._streaming_executor = DSPyStreamingExecutor(
164
+ status_output_field=self.status_output_field,
165
+ stream_vertical_overflow=self.stream_vertical_overflow,
166
+ theme=self.theme,
167
+ no_output=self.no_output,
168
+ )
169
+ self._artifact_materializer = DSPyArtifactMaterializer()
170
+
160
171
  async def evaluate(
161
172
  self, agent, ctx, inputs: EvalInputs, output_group
162
173
  ) -> EvalResult: # type: ignore[override]
@@ -241,14 +252,15 @@ class DSPyEngine(EngineComponent):
241
252
 
242
253
  has_context = bool(context_history) and self.should_use_context(inputs)
243
254
 
244
- # Generate signature with semantic field naming
245
- signature = self._prepare_signature_for_output_group(
255
+ # Generate signature with semantic field naming (delegated to SignatureBuilder)
256
+ signature = self._signature_builder.prepare_signature_for_output_group(
246
257
  dspy_mod,
247
258
  agent=agent,
248
259
  inputs=inputs,
249
260
  output_group=output_group,
250
261
  has_context=has_context,
251
262
  batched=batched,
263
+ engine_instructions=self.instructions,
252
264
  )
253
265
 
254
266
  sys_desc = self._system_description(self.instructions or agent.description)
@@ -258,14 +270,16 @@ class DSPyEngine(EngineComponent):
258
270
 
259
271
  pre_generated_artifact_id = uuid4()
260
272
 
261
- # Build execution payload with semantic field names matching signature
262
- execution_payload = self._prepare_execution_payload_for_output_group(
263
- inputs,
264
- output_group,
265
- batched=batched,
266
- has_context=has_context,
267
- context_history=context_history,
268
- sys_desc=sys_desc,
273
+ # Build execution payload with semantic field names matching signature (delegated to SignatureBuilder)
274
+ execution_payload = (
275
+ self._signature_builder.prepare_execution_payload_for_output_group(
276
+ inputs,
277
+ output_group,
278
+ batched=batched,
279
+ has_context=has_context,
280
+ context_history=context_history,
281
+ sys_desc=sys_desc,
282
+ )
269
283
  )
270
284
 
271
285
  # Merge native tools with MCP tools
@@ -295,7 +309,7 @@ class DSPyEngine(EngineComponent):
295
309
  should_stream = self.stream
296
310
  # Phase 6+7 Security Fix: Use Agent class variables for streaming coordination
297
311
  if ctx:
298
- from flock.agent import Agent
312
+ from flock.core import Agent
299
313
 
300
314
  # Check if dashboard mode (WebSocket broadcast is set)
301
315
  is_dashboard = Agent._websocket_broadcast_global is not None
@@ -315,7 +329,7 @@ class DSPyEngine(EngineComponent):
315
329
  if should_stream:
316
330
  # Choose streaming method based on dashboard mode
317
331
  # Phase 6+7 Security Fix: Check dashboard mode via Agent class variable
318
- from flock.agent import Agent
332
+ from flock.core import Agent
319
333
 
320
334
  is_dashboard = (
321
335
  Agent._websocket_broadcast_global is not None if ctx else False
@@ -335,7 +349,7 @@ class DSPyEngine(EngineComponent):
335
349
  (
336
350
  raw_result,
337
351
  _stream_final_display_data,
338
- ) = await self._execute_streaming_websocket_only(
352
+ ) = await self._streaming_executor.execute_streaming_websocket_only(
339
353
  dspy_mod,
340
354
  program,
341
355
  signature,
@@ -354,7 +368,7 @@ class DSPyEngine(EngineComponent):
354
368
  (
355
369
  raw_result,
356
370
  _stream_final_display_data,
357
- ) = await self._execute_streaming(
371
+ ) = await self._streaming_executor.execute_streaming(
358
372
  dspy_mod,
359
373
  program,
360
374
  signature,
@@ -368,28 +382,31 @@ class DSPyEngine(EngineComponent):
368
382
  if not self.no_output and ctx:
369
383
  ctx.state["_flock_stream_live_active"] = True
370
384
  else:
371
- raw_result = await self._execute_standard(
385
+ raw_result = await self._streaming_executor.execute_standard(
372
386
  dspy_mod,
373
387
  program,
374
388
  description=sys_desc,
375
389
  payload=execution_payload,
376
390
  )
377
391
  # Phase 6+7 Security Fix: Check streaming state from Agent class variable
378
- from flock.agent import Agent
392
+ from flock.core import Agent
379
393
 
380
394
  if ctx and Agent._streaming_counter > 0:
381
395
  ctx.state["_flock_output_queued"] = True
382
396
  finally:
383
397
  # Phase 6+7 Security Fix: Decrement counter using Agent class variable
384
398
  if should_stream and ctx:
385
- from flock.agent import Agent
399
+ from flock.core import Agent
386
400
 
387
401
  Agent._streaming_counter = max(0, Agent._streaming_counter - 1)
388
402
 
389
- # Extract semantic fields from Prediction
390
- normalized_output = self._extract_multi_output_payload(raw_result, output_group)
403
+ # Extract semantic fields from Prediction (delegated to SignatureBuilder)
404
+ normalized_output = self._signature_builder.extract_multi_output_payload(
405
+ raw_result, output_group
406
+ )
391
407
 
392
- artifacts, errors = self._materialize_artifacts(
408
+ # Materialize artifacts (delegated to ArtifactMaterializer)
409
+ artifacts, errors = self._artifact_materializer.materialize_artifacts(
393
410
  normalized_output,
394
411
  output_group.outputs,
395
412
  agent.name,
@@ -461,420 +478,6 @@ class DSPyEngine(EngineComponent):
461
478
  except Exception:
462
479
  return data
463
480
 
464
- def _type_to_field_name(self, type_class: type) -> str:
465
- """Convert Pydantic model class name to snake_case field name.
466
-
467
- Examples:
468
- Movie → "movie"
469
- ResearchQuestion → "research_question"
470
- APIResponse → "api_response"
471
- UserAuthToken → "user_auth_token"
472
-
473
- Args:
474
- type_class: The Pydantic model class
475
-
476
- Returns:
477
- snake_case field name
478
- """
479
- import re
480
-
481
- name = type_class.__name__
482
- # Convert CamelCase to snake_case
483
- snake_case = re.sub(r"(?<!^)(?=[A-Z])", "_", name).lower()
484
- return snake_case
485
-
486
- def _pluralize(self, field_name: str) -> str:
487
- """Convert singular field name to plural for lists.
488
-
489
- Examples:
490
- "idea" → "ideas"
491
- "movie" → "movies"
492
- "story" → "stories" (y → ies)
493
- "analysis" → "analyses" (is → es)
494
- "research_question" → "research_questions"
495
-
496
- Args:
497
- field_name: Singular field name in snake_case
498
-
499
- Returns:
500
- Pluralized field name
501
- """
502
- # Simple English pluralization rules
503
- if (
504
- field_name.endswith("y")
505
- and len(field_name) > 1
506
- and field_name[-2] not in "aeiou"
507
- ):
508
- # story → stories (consonant + y)
509
- return field_name[:-1] + "ies"
510
- if field_name.endswith(("s", "x", "z", "ch", "sh")):
511
- # analysis → analyses, box → boxes
512
- return field_name + "es"
513
- # idea → ideas, movie → movies
514
- return field_name + "s"
515
-
516
- def _needs_multioutput_signature(self, output_group) -> bool:
517
- """Determine if OutputGroup requires multi-output signature generation.
518
-
519
- Args:
520
- output_group: OutputGroup to analyze
521
-
522
- Returns:
523
- True if multi-output signature needed, False for single output (backward compat)
524
- """
525
- if (
526
- not output_group
527
- or not hasattr(output_group, "outputs")
528
- or not output_group.outputs
529
- ):
530
- return False
531
-
532
- # Multiple different types → multi-output
533
- if len(output_group.outputs) > 1:
534
- return True
535
-
536
- # Fan-out (single type, count > 1) → multi-output
537
- if output_group.outputs[0].count > 1:
538
- return True
539
-
540
- return False
541
-
542
- def _prepare_signature_with_context(
543
- self,
544
- dspy_mod,
545
- *,
546
- description: str | None,
547
- input_schema: type[BaseModel] | None,
548
- output_schema: type[BaseModel] | None,
549
- has_context: bool = False,
550
- batched: bool = False,
551
- ) -> Any:
552
- """Prepare DSPy signature, optionally including context field."""
553
- fields = {
554
- "description": (str, dspy_mod.InputField()),
555
- }
556
-
557
- # Add context field if we have conversation history
558
- if has_context:
559
- fields["context"] = (
560
- list,
561
- dspy_mod.InputField(
562
- desc="Previous conversation artifacts providing context for this request"
563
- ),
564
- )
565
-
566
- if batched:
567
- if input_schema is not None:
568
- input_type = list[input_schema]
569
- else:
570
- input_type = list[dict[str, Any]]
571
- else:
572
- input_type = input_schema or dict
573
-
574
- fields["input"] = (input_type, dspy_mod.InputField())
575
- fields["output"] = (output_schema or dict, dspy_mod.OutputField())
576
-
577
- signature = dspy_mod.Signature(fields)
578
-
579
- instruction = (
580
- description or "Produce a valid output that matches the 'output' schema."
581
- )
582
- if has_context:
583
- instruction += (
584
- " Consider the conversation context provided to inform your response."
585
- )
586
- if batched:
587
- instruction += (
588
- " The 'input' field will contain a list of items representing the batch; "
589
- "process the entire collection coherently."
590
- )
591
- # instruction += " Return only JSON."
592
-
593
- return signature.with_instructions(instruction)
594
-
595
- def _prepare_signature_for_output_group(
596
- self,
597
- dspy_mod,
598
- *,
599
- agent,
600
- inputs: EvalInputs,
601
- output_group,
602
- has_context: bool = False,
603
- batched: bool = False,
604
- ) -> Any:
605
- """Prepare DSPy signature dynamically based on OutputGroup with semantic field names.
606
-
607
- This method generates signatures using semantic field naming:
608
- - Type names → snake_case field names (Task → "task", ResearchQuestion → "research_question")
609
- - Pluralization for fan-out (Idea → "ideas" for lists)
610
- - Pluralization for batching (Task → "tasks" for list[Task])
611
- - Multi-input support for joins (multiple input artifacts with semantic names)
612
- - Collision handling (same input/output type → prefix with "input_" or "output_")
613
-
614
- Examples:
615
- Single output: .consumes(Task).publishes(Report)
616
- → {"task": (Task, InputField()), "report": (Report, OutputField())}
617
-
618
- Multiple inputs (joins): .consumes(Document, Guidelines).publishes(Report)
619
- → {"document": (Document, InputField()), "guidelines": (Guidelines, InputField()),
620
- "report": (Report, OutputField())}
621
-
622
- Multiple outputs: .consumes(Task).publishes(Summary, Analysis)
623
- → {"task": (Task, InputField()), "summary": (Summary, OutputField()),
624
- "analysis": (Analysis, OutputField())}
625
-
626
- Fan-out: .publishes(Idea, fan_out=5)
627
- → {"topic": (Topic, InputField()), "ideas": (list[Idea], OutputField(...))}
628
-
629
- Batching: evaluate_batch([task1, task2, task3])
630
- → {"tasks": (list[Task], InputField()), "reports": (list[Report], OutputField())}
631
-
632
- Args:
633
- dspy_mod: DSPy module
634
- agent: Agent instance
635
- inputs: EvalInputs with input artifacts
636
- output_group: OutputGroup defining what to generate
637
- has_context: Whether conversation context should be included
638
- batched: Whether this is a batch evaluation (pluralizes input fields)
639
-
640
- Returns:
641
- DSPy Signature with semantic field names
642
- """
643
- fields = {
644
- "description": (str, dspy_mod.InputField()),
645
- }
646
-
647
- # Add context field if we have conversation history
648
- if has_context:
649
- fields["context"] = (
650
- list,
651
- dspy_mod.InputField(
652
- desc="Previous conversation artifacts providing context for this request"
653
- ),
654
- )
655
-
656
- # Track used field names for collision detection
657
- used_field_names: set[str] = {"description", "context"}
658
-
659
- # 1. Generate INPUT fields with semantic names
660
- # Multi-input support: handle all input artifacts for joins
661
- # Batching support: pluralize field names and use list[Type] when batched=True
662
- if inputs.artifacts:
663
- # Collect unique input types (avoid duplicates if multiple artifacts of same type)
664
- input_types_seen: dict[type, list[Artifact]] = {}
665
- for artifact in inputs.artifacts:
666
- input_model = self._resolve_input_model(artifact)
667
- if input_model is not None:
668
- if input_model not in input_types_seen:
669
- input_types_seen[input_model] = []
670
- input_types_seen[input_model].append(artifact)
671
-
672
- # Generate fields for each unique input type
673
- for input_model, artifacts_of_type in input_types_seen.items():
674
- field_name = self._type_to_field_name(input_model)
675
-
676
- # Handle batching: pluralize field name and use list[Type]
677
- if batched:
678
- field_name = self._pluralize(field_name)
679
- input_type = list[input_model]
680
- desc = f"Batch of {input_model.__name__} instances to process"
681
- fields[field_name] = (input_type, dspy_mod.InputField(desc=desc))
682
- else:
683
- # Single input: use singular field name
684
- input_type = input_model
685
- fields[field_name] = (input_type, dspy_mod.InputField())
686
-
687
- used_field_names.add(field_name)
688
-
689
- # Fallback: if we couldn't resolve any types, use generic "input"
690
- if not input_types_seen:
691
- fields["input"] = (dict, dspy_mod.InputField())
692
- used_field_names.add("input")
693
-
694
- # 2. Generate OUTPUT fields with semantic names
695
- for output_decl in output_group.outputs:
696
- output_schema = output_decl.spec.model
697
- type_name = output_decl.spec.type_name
698
-
699
- # Generate semantic field name
700
- field_name = self._type_to_field_name(output_schema)
701
-
702
- # Handle fan-out: pluralize field name and use list[Type]
703
- if output_decl.count > 1:
704
- field_name = self._pluralize(field_name)
705
- output_type = list[output_schema]
706
-
707
- # Create description with count hint
708
- desc = f"Generate exactly {output_decl.count} {type_name} instances"
709
- if output_decl.group_description:
710
- desc = f"{desc}. {output_decl.group_description}"
711
-
712
- fields[field_name] = (output_type, dspy_mod.OutputField(desc=desc))
713
- else:
714
- # Single output
715
- output_type = output_schema
716
-
717
- # Handle collision: if field name already used, prefix with "output_"
718
- if field_name in used_field_names:
719
- field_name = f"output_{field_name}"
720
-
721
- desc = f"{type_name} output"
722
- if output_decl.group_description:
723
- desc = output_decl.group_description
724
-
725
- fields[field_name] = (output_type, dspy_mod.OutputField(desc=desc))
726
-
727
- used_field_names.add(field_name)
728
-
729
- # 3. Create signature
730
- signature = dspy_mod.Signature(fields)
731
-
732
- # 4. Build instruction
733
- description = self.instructions or agent.description
734
- instruction = (
735
- description
736
- or f"Process input and generate {len(output_group.outputs)} outputs."
737
- )
738
-
739
- if has_context:
740
- instruction += (
741
- " Consider the conversation context provided to inform your response."
742
- )
743
-
744
- # Add batching hint
745
- if batched:
746
- instruction += " Process the batch of inputs coherently, generating outputs for each item."
747
-
748
- # Add semantic field names to instruction for clarity
749
- output_field_names = [
750
- name for name in fields.keys() if name not in {"description", "context"}
751
- ]
752
- if len(output_field_names) > 2: # Multiple outputs
753
- instruction += f" Generate ALL output fields as specified: {', '.join(output_field_names[1:])}."
754
-
755
- # instruction += " Return only valid JSON."
756
-
757
- return signature.with_instructions(instruction)
758
-
759
- def _prepare_execution_payload_for_output_group(
760
- self,
761
- inputs: EvalInputs,
762
- output_group,
763
- *,
764
- batched: bool,
765
- has_context: bool,
766
- context_history: list | None,
767
- sys_desc: str,
768
- ) -> dict[str, Any]:
769
- """Prepare execution payload with semantic field names matching signature.
770
-
771
- This method builds a payload dict with semantic field names that match the signature
772
- generated by `_prepare_signature_for_output_group()`.
773
-
774
- Args:
775
- inputs: EvalInputs with input artifacts
776
- output_group: OutputGroup (not used here but kept for symmetry)
777
- batched: Whether this is a batch evaluation
778
- has_context: Whether conversation context should be included
779
- context_history: Optional conversation history
780
- sys_desc: System description for the "description" field
781
-
782
- Returns:
783
- Dict with semantic field names ready for DSPy program execution
784
-
785
- Examples:
786
- Single input: {"description": desc, "task": {...}}
787
- Multi-input: {"description": desc, "task": {...}, "topic": {...}}
788
- Batched: {"description": desc, "tasks": [{...}, {...}, {...}]}
789
- """
790
- payload = {"description": sys_desc}
791
-
792
- # Add context if present
793
- if has_context and context_history:
794
- payload["context"] = context_history
795
-
796
- # Build semantic input fields
797
- if inputs.artifacts:
798
- # Collect unique input types (same logic as signature generation)
799
- input_types_seen: dict[type, list[Artifact]] = {}
800
- for artifact in inputs.artifacts:
801
- input_model = self._resolve_input_model(artifact)
802
- if input_model is not None:
803
- if input_model not in input_types_seen:
804
- input_types_seen[input_model] = []
805
- input_types_seen[input_model].append(artifact)
806
-
807
- # Generate payload fields for each unique input type
808
- for input_model, artifacts_of_type in input_types_seen.items():
809
- field_name = self._type_to_field_name(input_model)
810
-
811
- # Validate and prepare payloads
812
- validated_payloads = [
813
- self._validate_input_payload(input_model, art.payload)
814
- for art in artifacts_of_type
815
- ]
816
-
817
- if batched:
818
- # Batch mode: pluralize field name and use list
819
- field_name = self._pluralize(field_name)
820
- payload[field_name] = validated_payloads
821
- else:
822
- # Single mode: use first (or only) artifact
823
- # For multi-input joins, we have one artifact per type
824
- payload[field_name] = (
825
- validated_payloads[0] if validated_payloads else {}
826
- )
827
-
828
- return payload
829
-
830
- def _extract_multi_output_payload(self, prediction, output_group) -> dict[str, Any]:
831
- """Extract semantic fields from DSPy Prediction for multi-output scenarios.
832
-
833
- Maps semantic field names (e.g., "movie", "ideas") back to type names (e.g., "Movie", "Idea")
834
- for artifact materialization compatibility.
835
-
836
- Args:
837
- prediction: DSPy Prediction object with semantic field names
838
- output_group: OutputGroup defining expected outputs
839
-
840
- Returns:
841
- Dict mapping type names to extracted values
842
-
843
- Examples:
844
- Prediction(movie={...}, summary={...})
845
- → {"Movie": {...}, "Summary": {...}}
846
-
847
- Prediction(ideas=[{...}, {...}, {...}])
848
- → {"Idea": [{...}, {...}, {...}]}
849
- """
850
- payload = {}
851
-
852
- for output_decl in output_group.outputs:
853
- output_schema = output_decl.spec.model
854
- type_name = output_decl.spec.type_name
855
-
856
- # Generate the same semantic field name used in signature
857
- field_name = self._type_to_field_name(output_schema)
858
-
859
- # Handle fan-out: field name is pluralized
860
- if output_decl.count > 1:
861
- field_name = self._pluralize(field_name)
862
-
863
- # Extract value from Prediction
864
- if hasattr(prediction, field_name):
865
- value = getattr(prediction, field_name)
866
-
867
- # Store using type_name as key (for _select_output_payload compatibility)
868
- payload[type_name] = value
869
- else:
870
- # Fallback: try with "output_" prefix (collision handling)
871
- prefixed_name = f"output_{field_name}"
872
- if hasattr(prediction, prefixed_name):
873
- value = getattr(prediction, prefixed_name)
874
- payload[type_name] = value
875
-
876
- return payload
877
-
878
481
  def _choose_program(self, dspy_mod, signature, tools: Iterable[Any]):
879
482
  tools_list = list(tools or [])
880
483
  try:
@@ -891,894 +494,6 @@ class DSPyEngine(EngineComponent):
891
494
  return description
892
495
  return "Produce a valid output that matches the 'output' schema." # Return only JSON.
893
496
 
894
- def _normalize_output_payload(self, raw: Any) -> dict[str, Any]:
895
- if isinstance(raw, BaseModel):
896
- return raw.model_dump()
897
- if isinstance(raw, str):
898
- text = raw.strip()
899
- candidates: list[str] = []
900
-
901
- # Primary attempt - full string
902
- if text:
903
- candidates.append(text)
904
-
905
- # Handle DSPy streaming markers like `[[ ## output ## ]]`
906
- if text.startswith("[[") and "]]" in text:
907
- _, remainder = text.split("]]", 1)
908
- remainder = remainder.strip()
909
- if remainder:
910
- candidates.append(remainder)
911
-
912
- # Handle Markdown-style fenced blocks
913
- if text.startswith("```") and text.endswith("```"):
914
- fenced = text.strip("`").strip()
915
- if fenced:
916
- candidates.append(fenced)
917
-
918
- # Extract first JSON-looking segment if present
919
- for opener, closer in (("{", "}"), ("[", "]")):
920
- start = text.find(opener)
921
- end = text.rfind(closer)
922
- if start != -1 and end != -1 and end > start:
923
- segment = text[start : end + 1].strip()
924
- if segment:
925
- candidates.append(segment)
926
-
927
- seen: set[str] = set()
928
- for candidate in candidates:
929
- if candidate in seen:
930
- continue
931
- seen.add(candidate)
932
- try:
933
- return json.loads(candidate)
934
- except json.JSONDecodeError:
935
- continue
936
-
937
- return {"text": text}
938
- if isinstance(raw, Mapping):
939
- return dict(raw)
940
- return {"value": raw}
941
-
942
- def _materialize_artifacts(
943
- self,
944
- payload: dict[str, Any],
945
- outputs: Iterable[Any],
946
- produced_by: str,
947
- pre_generated_id: Any = None,
948
- ):
949
- """Materialize artifacts from payload, handling fan-out (count > 1).
950
-
951
- For fan-out outputs (count > 1), splits the list into individual artifacts.
952
- For single outputs (count = 1), creates one artifact from dict.
953
-
954
- Args:
955
- payload: Normalized output dict from DSPy
956
- outputs: AgentOutput declarations defining what to create
957
- produced_by: Agent name
958
- pre_generated_id: Pre-generated ID for streaming (only used for single outputs)
959
-
960
- Returns:
961
- Tuple of (artifacts list, errors list)
962
- """
963
- artifacts: list[Artifact] = []
964
- errors: list[str] = []
965
- for output in outputs or []:
966
- model_cls = output.spec.model
967
- data = self._select_output_payload(
968
- payload, model_cls, output.spec.type_name
969
- )
970
-
971
- # FAN-OUT: If count > 1, data should be a list and we create multiple artifacts
972
- if output.count > 1:
973
- if not isinstance(data, list):
974
- errors.append(
975
- f"Fan-out expected list for {output.spec.type_name} (count={output.count}), "
976
- f"got {type(data).__name__}"
977
- )
978
- continue
979
-
980
- # Create one artifact for each item in the list
981
- for item_data in data:
982
- try:
983
- instance = model_cls(**item_data)
984
- except Exception as exc: # noqa: BLE001 - collect validation errors for logs
985
- errors.append(f"{output.spec.type_name}: {exc!s}")
986
- continue
987
-
988
- # Fan-out artifacts auto-generate their IDs (can't reuse pre_generated_id)
989
- artifact_kwargs = {
990
- "type": output.spec.type_name,
991
- "payload": instance.model_dump(),
992
- "produced_by": produced_by,
993
- }
994
- artifacts.append(Artifact(**artifact_kwargs))
995
- else:
996
- # SINGLE OUTPUT: Create one artifact from dict
997
- try:
998
- instance = model_cls(**data)
999
- except Exception as exc: # noqa: BLE001 - collect validation errors for logs
1000
- errors.append(str(exc))
1001
- continue
1002
-
1003
- # Use the pre-generated ID if provided (for streaming), otherwise let Artifact auto-generate
1004
- artifact_kwargs = {
1005
- "type": output.spec.type_name,
1006
- "payload": instance.model_dump(),
1007
- "produced_by": produced_by,
1008
- }
1009
- if pre_generated_id is not None:
1010
- artifact_kwargs["id"] = pre_generated_id
1011
-
1012
- artifacts.append(Artifact(**artifact_kwargs))
1013
- return artifacts, errors
1014
-
1015
- def _select_output_payload(
1016
- self,
1017
- payload: Mapping[str, Any],
1018
- model_cls: type[BaseModel],
1019
- type_name: str,
1020
- ) -> dict[str, Any] | list[dict[str, Any]]:
1021
- """Select the correct output payload from the normalized output dict.
1022
-
1023
- Handles both simple type names and fully qualified names (with module prefix).
1024
- Returns either a dict (single output) or list[dict] (fan-out/batch).
1025
- """
1026
- candidates = [
1027
- payload.get(type_name), # Try exact type_name (may be "__main__.Movie")
1028
- payload.get(model_cls.__name__), # Try simple class name ("Movie")
1029
- payload.get(model_cls.__name__.lower()), # Try lowercase ("movie")
1030
- ]
1031
-
1032
- # Extract value based on type
1033
- for candidate in candidates:
1034
- if candidate is not None:
1035
- # Handle lists (fan-out and batching)
1036
- if isinstance(candidate, list):
1037
- # Convert Pydantic instances to dicts
1038
- return [
1039
- item.model_dump() if isinstance(item, BaseModel) else item
1040
- for item in candidate
1041
- ]
1042
- # Handle single Pydantic instance
1043
- if isinstance(candidate, BaseModel):
1044
- return candidate.model_dump()
1045
- # Handle dict
1046
- if isinstance(candidate, Mapping):
1047
- return dict(candidate)
1048
-
1049
- # Fallback: return entire payload (will likely fail validation)
1050
- if isinstance(payload, Mapping):
1051
- return dict(payload)
1052
- return {}
1053
-
1054
- async def _execute_standard(
1055
- self, dspy_mod, program, *, description: str, payload: dict[str, Any]
1056
- ) -> Any:
1057
- """Execute DSPy program in standard mode (no streaming)."""
1058
- # Handle semantic fields format: {"description": ..., "task": ..., "report": ...}
1059
- if isinstance(payload, dict) and "description" in payload:
1060
- # Semantic fields: pass all fields as kwargs
1061
- return program(**payload)
1062
-
1063
- # Handle legacy format: {"input": ..., "context": ...}
1064
- if isinstance(payload, dict) and "input" in payload:
1065
- return program(
1066
- description=description,
1067
- input=payload["input"],
1068
- context=payload.get("context", []),
1069
- )
1070
-
1071
- # Handle old format: direct payload (backwards compatible)
1072
- return program(description=description, input=payload, context=[])
1073
-
1074
- async def _execute_streaming_websocket_only(
1075
- self,
1076
- dspy_mod,
1077
- program,
1078
- signature,
1079
- *,
1080
- description: str,
1081
- payload: dict[str, Any],
1082
- agent: Any,
1083
- ctx: Any = None,
1084
- pre_generated_artifact_id: Any = None,
1085
- output_group=None,
1086
- ) -> tuple[Any, None]:
1087
- """Execute streaming for WebSocket only (no Rich display).
1088
-
1089
- Optimized path for dashboard mode that skips all Rich formatting overhead.
1090
- Used when multiple agents stream in parallel to avoid terminal conflicts
1091
- and deadlocks with MCP tools.
1092
-
1093
- This method eliminates the Rich Live context that can cause deadlocks when
1094
- combined with MCP tool execution and parallel agent streaming.
1095
- """
1096
- logger.info(
1097
- f"Agent {agent.name}: Starting WebSocket-only streaming (dashboard mode)"
1098
- )
1099
-
1100
- # Get WebSocket broadcast function (security: wrapper prevents object traversal)
1101
- # Phase 6+7 Security Fix: Use broadcast wrapper from Agent class variable (prevents GOD MODE restoration)
1102
- from flock.agent import Agent
1103
-
1104
- ws_broadcast = Agent._websocket_broadcast_global
1105
-
1106
- if not ws_broadcast:
1107
- logger.warning(
1108
- f"Agent {agent.name}: No WebSocket manager, falling back to standard execution"
1109
- )
1110
- result = await self._execute_standard(
1111
- dspy_mod, program, description=description, payload=payload
1112
- )
1113
- return result, None
1114
-
1115
- # Get artifact type name for WebSocket events
1116
- artifact_type_name = "output"
1117
- # Use output_group.outputs (current group) if available, otherwise fallback to agent.outputs (all groups)
1118
- outputs_to_display = (
1119
- output_group.outputs
1120
- if output_group and hasattr(output_group, "outputs")
1121
- else agent.outputs
1122
- if hasattr(agent, "outputs")
1123
- else []
1124
- )
1125
-
1126
- if outputs_to_display:
1127
- artifact_type_name = outputs_to_display[0].spec.type_name
1128
-
1129
- # Prepare stream listeners
1130
- listeners = []
1131
- try:
1132
- streaming_mod = getattr(dspy_mod, "streaming", None)
1133
- if streaming_mod and hasattr(streaming_mod, "StreamListener"):
1134
- for name, field in signature.output_fields.items():
1135
- if field.annotation is str:
1136
- listeners.append(
1137
- streaming_mod.StreamListener(signature_field_name=name)
1138
- )
1139
- except Exception:
1140
- listeners = []
1141
-
1142
- # Create streaming task
1143
- streaming_task = dspy_mod.streamify(
1144
- program,
1145
- is_async_program=True,
1146
- stream_listeners=listeners if listeners else None,
1147
- )
1148
-
1149
- # Execute with appropriate payload format
1150
- if isinstance(payload, dict) and "description" in payload:
1151
- # Semantic fields: pass all fields as kwargs
1152
- stream_generator = streaming_task(**payload)
1153
- elif isinstance(payload, dict) and "input" in payload:
1154
- # Legacy format: {"input": ..., "context": ...}
1155
- stream_generator = streaming_task(
1156
- description=description,
1157
- input=payload["input"],
1158
- context=payload.get("context", []),
1159
- )
1160
- else:
1161
- # Old format: direct payload
1162
- stream_generator = streaming_task(
1163
- description=description, input=payload, context=[]
1164
- )
1165
-
1166
- # Process stream (WebSocket only, no Rich display)
1167
- final_result = None
1168
- stream_sequence = 0
1169
-
1170
- # Track background WebSocket broadcast tasks to prevent garbage collection
1171
- # Using fire-and-forget pattern to avoid blocking DSPy's streaming loop
1172
- ws_broadcast_tasks: set[asyncio.Task] = set()
1173
-
1174
- async for value in stream_generator:
1175
- try:
1176
- from dspy.streaming import StatusMessage, StreamResponse
1177
- from litellm import ModelResponseStream
1178
- except Exception:
1179
- StatusMessage = object # type: ignore
1180
- StreamResponse = object # type: ignore
1181
- ModelResponseStream = object # type: ignore
1182
-
1183
- if isinstance(value, StatusMessage):
1184
- token = getattr(value, "message", "")
1185
- if token:
1186
- try:
1187
- event = StreamingOutputEvent(
1188
- correlation_id=str(ctx.correlation_id)
1189
- if ctx and ctx.correlation_id
1190
- else "",
1191
- agent_name=agent.name,
1192
- run_id=ctx.task_id if ctx else "",
1193
- output_type="log",
1194
- content=str(token + "\n"),
1195
- sequence=stream_sequence,
1196
- is_final=False,
1197
- artifact_id=str(pre_generated_artifact_id),
1198
- artifact_type=artifact_type_name,
1199
- )
1200
- # Fire-and-forget to avoid blocking DSPy's streaming loop
1201
- task = asyncio.create_task(ws_broadcast(event))
1202
- ws_broadcast_tasks.add(task)
1203
- task.add_done_callback(ws_broadcast_tasks.discard)
1204
- stream_sequence += 1
1205
- except Exception as e:
1206
- logger.warning(f"Failed to emit streaming event: {e}")
1207
-
1208
- elif isinstance(value, StreamResponse):
1209
- token = getattr(value, "chunk", None)
1210
- if token:
1211
- try:
1212
- event = StreamingOutputEvent(
1213
- correlation_id=str(ctx.correlation_id)
1214
- if ctx and ctx.correlation_id
1215
- else "",
1216
- agent_name=agent.name,
1217
- run_id=ctx.task_id if ctx else "",
1218
- output_type="llm_token",
1219
- content=str(token),
1220
- sequence=stream_sequence,
1221
- is_final=False,
1222
- artifact_id=str(pre_generated_artifact_id),
1223
- artifact_type=artifact_type_name,
1224
- )
1225
- # Fire-and-forget to avoid blocking DSPy's streaming loop
1226
- task = asyncio.create_task(ws_broadcast(event))
1227
- ws_broadcast_tasks.add(task)
1228
- task.add_done_callback(ws_broadcast_tasks.discard)
1229
- stream_sequence += 1
1230
- except Exception as e:
1231
- logger.warning(f"Failed to emit streaming event: {e}")
1232
-
1233
- elif isinstance(value, ModelResponseStream):
1234
- chunk = value
1235
- token = chunk.choices[0].delta.content or ""
1236
- if token:
1237
- try:
1238
- event = StreamingOutputEvent(
1239
- correlation_id=str(ctx.correlation_id)
1240
- if ctx and ctx.correlation_id
1241
- else "",
1242
- agent_name=agent.name,
1243
- run_id=ctx.task_id if ctx else "",
1244
- output_type="llm_token",
1245
- content=str(token),
1246
- sequence=stream_sequence,
1247
- is_final=False,
1248
- artifact_id=str(pre_generated_artifact_id),
1249
- artifact_type=artifact_type_name,
1250
- )
1251
- # Fire-and-forget to avoid blocking DSPy's streaming loop
1252
- task = asyncio.create_task(ws_broadcast(event))
1253
- ws_broadcast_tasks.add(task)
1254
- task.add_done_callback(ws_broadcast_tasks.discard)
1255
- stream_sequence += 1
1256
- except Exception as e:
1257
- logger.warning(f"Failed to emit streaming event: {e}")
1258
-
1259
- elif isinstance(value, dspy_mod.Prediction):
1260
- final_result = value
1261
- # Send final events
1262
- try:
1263
- event = StreamingOutputEvent(
1264
- correlation_id=str(ctx.correlation_id)
1265
- if ctx and ctx.correlation_id
1266
- else "",
1267
- agent_name=agent.name,
1268
- run_id=ctx.task_id if ctx else "",
1269
- output_type="log",
1270
- content=f"\nAmount of output tokens: {stream_sequence}",
1271
- sequence=stream_sequence,
1272
- is_final=True,
1273
- artifact_id=str(pre_generated_artifact_id),
1274
- artifact_type=artifact_type_name,
1275
- )
1276
- # Fire-and-forget to avoid blocking DSPy's streaming loop
1277
- task = asyncio.create_task(ws_broadcast(event))
1278
- ws_broadcast_tasks.add(task)
1279
- task.add_done_callback(ws_broadcast_tasks.discard)
1280
-
1281
- event = StreamingOutputEvent(
1282
- correlation_id=str(ctx.correlation_id)
1283
- if ctx and ctx.correlation_id
1284
- else "",
1285
- agent_name=agent.name,
1286
- run_id=ctx.task_id if ctx else "",
1287
- output_type="log",
1288
- content="--- End of output ---",
1289
- sequence=stream_sequence + 1,
1290
- is_final=True,
1291
- artifact_id=str(pre_generated_artifact_id),
1292
- artifact_type=artifact_type_name,
1293
- )
1294
- # Fire-and-forget to avoid blocking DSPy's streaming loop
1295
- task = asyncio.create_task(ws_broadcast(event))
1296
- ws_broadcast_tasks.add(task)
1297
- task.add_done_callback(ws_broadcast_tasks.discard)
1298
- except Exception as e:
1299
- logger.warning(f"Failed to emit final streaming event: {e}")
1300
-
1301
- if final_result is None:
1302
- raise RuntimeError(
1303
- f"Agent {agent.name}: Streaming did not yield a final prediction"
1304
- )
1305
-
1306
- logger.info(
1307
- f"Agent {agent.name}: WebSocket streaming completed ({stream_sequence} tokens)"
1308
- )
1309
- return final_result, None
1310
-
1311
- async def _execute_streaming(
1312
- self,
1313
- dspy_mod,
1314
- program,
1315
- signature,
1316
- *,
1317
- description: str,
1318
- payload: dict[str, Any],
1319
- agent: Any,
1320
- ctx: Any = None,
1321
- pre_generated_artifact_id: Any = None,
1322
- output_group=None,
1323
- ) -> Any:
1324
- """Execute DSPy program in streaming mode with Rich table updates."""
1325
- from rich.console import Console
1326
- from rich.live import Live
1327
-
1328
- console = Console()
1329
-
1330
- # Get WebSocket broadcast function (security: wrapper prevents object traversal)
1331
- # Phase 6+7 Security Fix: Use broadcast wrapper from Agent class variable (prevents GOD MODE restoration)
1332
- from flock.agent import Agent
1333
-
1334
- ws_broadcast = Agent._websocket_broadcast_global
1335
-
1336
- # Prepare stream listeners for output field
1337
- listeners = []
1338
- try:
1339
- streaming_mod = getattr(dspy_mod, "streaming", None)
1340
- if streaming_mod and hasattr(streaming_mod, "StreamListener"):
1341
- for name, field in signature.output_fields.items():
1342
- if field.annotation is str:
1343
- listeners.append(
1344
- streaming_mod.StreamListener(signature_field_name=name)
1345
- )
1346
- except Exception:
1347
- listeners = []
1348
-
1349
- streaming_task = dspy_mod.streamify(
1350
- program,
1351
- is_async_program=True,
1352
- stream_listeners=listeners if listeners else None,
1353
- )
1354
-
1355
- # Execute with appropriate payload format
1356
- if isinstance(payload, dict) and "description" in payload:
1357
- # Semantic fields: pass all fields as kwargs
1358
- stream_generator = streaming_task(**payload)
1359
- elif isinstance(payload, dict) and "input" in payload:
1360
- # Legacy format: {"input": ..., "context": ...}
1361
- stream_generator = streaming_task(
1362
- description=description,
1363
- input=payload["input"],
1364
- context=payload.get("context", []),
1365
- )
1366
- else:
1367
- # Old format: direct payload
1368
- stream_generator = streaming_task(
1369
- description=description, input=payload, context=[]
1370
- )
1371
-
1372
- signature_order = []
1373
- status_field = self.status_output_field
1374
- try:
1375
- signature_order = list(signature.output_fields.keys())
1376
- except Exception:
1377
- signature_order = []
1378
-
1379
- # Initialize display data in full artifact format (matching OutputUtilityComponent display)
1380
- display_data: OrderedDict[str, Any] = OrderedDict()
1381
-
1382
- # Use the pre-generated artifact ID that was created before execution started
1383
- display_data["id"] = str(pre_generated_artifact_id)
1384
-
1385
- # Get the artifact type name from agent configuration
1386
- artifact_type_name = "output"
1387
- # Use output_group.outputs (current group) if available, otherwise fallback to agent.outputs (all groups)
1388
- outputs_to_display = (
1389
- output_group.outputs
1390
- if output_group and hasattr(output_group, "outputs")
1391
- else agent.outputs
1392
- if hasattr(agent, "outputs")
1393
- else []
1394
- )
1395
-
1396
- if outputs_to_display:
1397
- artifact_type_name = outputs_to_display[0].spec.type_name
1398
- for output in outputs_to_display:
1399
- if output.spec.type_name not in artifact_type_name:
1400
- artifact_type_name += ", " + output.spec.type_name
1401
-
1402
- display_data["type"] = artifact_type_name
1403
- display_data["payload"] = OrderedDict()
1404
-
1405
- # Add output fields to payload section
1406
- for field_name in signature_order:
1407
- if field_name != "description": # Skip description field
1408
- display_data["payload"][field_name] = ""
1409
-
1410
- display_data["produced_by"] = agent.name
1411
- display_data["correlation_id"] = (
1412
- str(ctx.correlation_id) if ctx and ctx.correlation_id else None
1413
- )
1414
- display_data["partition_key"] = None
1415
- display_data["tags"] = "set()"
1416
- display_data["visibility"] = OrderedDict([("kind", "Public")])
1417
- display_data["created_at"] = "streaming..."
1418
- display_data["version"] = 1
1419
- display_data["status"] = status_field
1420
-
1421
- stream_buffers: defaultdict[str, list[str]] = defaultdict(list)
1422
- stream_buffers[status_field] = []
1423
- stream_sequence = 0 # Monotonic sequence for ordering
1424
-
1425
- # Track background WebSocket broadcast tasks to prevent garbage collection
1426
- ws_broadcast_tasks: set[asyncio.Task] = set()
1427
-
1428
- formatter = theme_dict = styles = agent_label = None
1429
- live_cm = nullcontext()
1430
- overflow_mode = self.stream_vertical_overflow
1431
-
1432
- if not self.no_output:
1433
- _ensure_live_crop_above()
1434
- (
1435
- formatter,
1436
- theme_dict,
1437
- styles,
1438
- agent_label,
1439
- ) = self._prepare_stream_formatter(agent)
1440
- initial_panel = formatter.format_result(
1441
- display_data, agent_label, theme_dict, styles
1442
- )
1443
- live_cm = Live(
1444
- initial_panel,
1445
- console=console,
1446
- refresh_per_second=4,
1447
- transient=False,
1448
- vertical_overflow=overflow_mode,
1449
- )
1450
-
1451
- final_result: Any = None
1452
-
1453
- with live_cm as live:
1454
-
1455
- def _refresh_panel() -> None:
1456
- if formatter is None or live is None:
1457
- return
1458
- live.update(
1459
- formatter.format_result(
1460
- display_data, agent_label, theme_dict, styles
1461
- )
1462
- )
1463
-
1464
- async for value in stream_generator:
1465
- try:
1466
- from dspy.streaming import StatusMessage, StreamResponse
1467
- from litellm import ModelResponseStream
1468
- except Exception:
1469
- StatusMessage = object # type: ignore
1470
- StreamResponse = object # type: ignore
1471
- ModelResponseStream = object # type: ignore
1472
-
1473
- if isinstance(value, StatusMessage):
1474
- token = getattr(value, "message", "")
1475
- if token:
1476
- stream_buffers[status_field].append(str(token) + "\n")
1477
- display_data["status"] = "".join(stream_buffers[status_field])
1478
-
1479
- # Emit to WebSocket (non-blocking to prevent deadlock)
1480
- if ws_broadcast and token:
1481
- try:
1482
- event = StreamingOutputEvent(
1483
- correlation_id=str(ctx.correlation_id)
1484
- if ctx and ctx.correlation_id
1485
- else "",
1486
- agent_name=agent.name,
1487
- run_id=ctx.task_id if ctx else "",
1488
- output_type="llm_token",
1489
- content=str(token + "\n"),
1490
- sequence=stream_sequence,
1491
- is_final=False,
1492
- artifact_id=str(
1493
- pre_generated_artifact_id
1494
- ), # Phase 6: Track artifact for message streaming
1495
- artifact_type=artifact_type_name, # Phase 6: Artifact type name
1496
- )
1497
- # Use create_task to avoid blocking the streaming loop
1498
- task = asyncio.create_task(ws_broadcast(event))
1499
- ws_broadcast_tasks.add(task)
1500
- task.add_done_callback(ws_broadcast_tasks.discard)
1501
- stream_sequence += 1
1502
- except Exception as e:
1503
- logger.warning(f"Failed to emit streaming event: {e}")
1504
- else:
1505
- logger.debug(
1506
- "No WebSocket manager present for streaming event."
1507
- )
1508
-
1509
- if formatter is not None:
1510
- _refresh_panel()
1511
- continue
1512
-
1513
- if isinstance(value, StreamResponse):
1514
- token = getattr(value, "chunk", None)
1515
- signature_field = getattr(value, "signature_field_name", None)
1516
- if signature_field and signature_field != "description":
1517
- # Update payload section - accumulate in "output" buffer
1518
- buffer_key = f"_stream_{signature_field}"
1519
- if token:
1520
- stream_buffers[buffer_key].append(str(token))
1521
- # Show streaming text in payload
1522
- display_data["payload"]["_streaming"] = "".join(
1523
- stream_buffers[buffer_key]
1524
- )
1525
-
1526
- # Emit to WebSocket (non-blocking to prevent deadlock)
1527
- if ws_broadcast:
1528
- logger.info(
1529
- f"[STREAMING] Emitting StreamResponse token='{token}', sequence={stream_sequence}"
1530
- )
1531
- try:
1532
- event = StreamingOutputEvent(
1533
- correlation_id=str(ctx.correlation_id)
1534
- if ctx and ctx.correlation_id
1535
- else "",
1536
- agent_name=agent.name,
1537
- run_id=ctx.task_id if ctx else "",
1538
- output_type="llm_token",
1539
- content=str(token),
1540
- sequence=stream_sequence,
1541
- is_final=False,
1542
- artifact_id=str(
1543
- pre_generated_artifact_id
1544
- ), # Phase 6: Track artifact for message streaming
1545
- artifact_type=artifact_type_name, # Phase 6: Artifact type name
1546
- )
1547
- # Use create_task to avoid blocking the streaming loop
1548
- task = asyncio.create_task(ws_broadcast(event))
1549
- ws_broadcast_tasks.add(task)
1550
- task.add_done_callback(ws_broadcast_tasks.discard)
1551
- stream_sequence += 1
1552
- except Exception as e:
1553
- logger.warning(
1554
- f"Failed to emit streaming event: {e}"
1555
- )
1556
-
1557
- if formatter is not None:
1558
- _refresh_panel()
1559
- continue
1560
-
1561
- if isinstance(value, ModelResponseStream):
1562
- chunk = value
1563
- token = chunk.choices[0].delta.content or ""
1564
- signature_field = getattr(value, "signature_field_name", None)
1565
-
1566
- if signature_field and signature_field != "description":
1567
- # Update payload section - accumulate in buffer
1568
- buffer_key = f"_stream_{signature_field}"
1569
- if token:
1570
- stream_buffers[buffer_key].append(str(token))
1571
- # Show streaming text in payload
1572
- display_data["payload"]["_streaming"] = "".join(
1573
- stream_buffers[buffer_key]
1574
- )
1575
- elif token:
1576
- stream_buffers[status_field].append(str(token))
1577
- display_data["status"] = "".join(stream_buffers[status_field])
1578
-
1579
- # Emit to WebSocket (non-blocking to prevent deadlock)
1580
- if ws_broadcast and token:
1581
- try:
1582
- event = StreamingOutputEvent(
1583
- correlation_id=str(ctx.correlation_id)
1584
- if ctx and ctx.correlation_id
1585
- else "",
1586
- agent_name=agent.name,
1587
- run_id=ctx.task_id if ctx else "",
1588
- output_type="llm_token",
1589
- content=str(token),
1590
- sequence=stream_sequence,
1591
- is_final=False,
1592
- artifact_id=str(
1593
- pre_generated_artifact_id
1594
- ), # Phase 6: Track artifact for message streaming
1595
- artifact_type=display_data[
1596
- "type"
1597
- ], # Phase 6: Artifact type name from display_data
1598
- )
1599
- # Use create_task to avoid blocking the streaming loop
1600
- task = asyncio.create_task(ws_broadcast(event))
1601
- ws_broadcast_tasks.add(task)
1602
- task.add_done_callback(ws_broadcast_tasks.discard)
1603
- stream_sequence += 1
1604
- except Exception as e:
1605
- logger.warning(f"Failed to emit streaming event: {e}")
1606
-
1607
- if formatter is not None:
1608
- _refresh_panel()
1609
- continue
1610
-
1611
- if isinstance(value, dspy_mod.Prediction):
1612
- final_result = value
1613
-
1614
- # Emit final streaming event (non-blocking to prevent deadlock)
1615
- if ws_broadcast:
1616
- try:
1617
- event = StreamingOutputEvent(
1618
- correlation_id=str(ctx.correlation_id)
1619
- if ctx and ctx.correlation_id
1620
- else "",
1621
- agent_name=agent.name,
1622
- run_id=ctx.task_id if ctx else "",
1623
- output_type="log",
1624
- content="\nAmount of output tokens: "
1625
- + str(stream_sequence),
1626
- sequence=stream_sequence,
1627
- is_final=True, # Mark as final
1628
- artifact_id=str(
1629
- pre_generated_artifact_id
1630
- ), # Phase 6: Track artifact for message streaming
1631
- artifact_type=display_data[
1632
- "type"
1633
- ], # Phase 6: Artifact type name
1634
- )
1635
- # Use create_task to avoid blocking the streaming loop
1636
- task = asyncio.create_task(ws_broadcast(event))
1637
- ws_broadcast_tasks.add(task)
1638
- task.add_done_callback(ws_broadcast_tasks.discard)
1639
- event = StreamingOutputEvent(
1640
- correlation_id=str(ctx.correlation_id)
1641
- if ctx and ctx.correlation_id
1642
- else "",
1643
- agent_name=agent.name,
1644
- run_id=ctx.task_id if ctx else "",
1645
- output_type="log",
1646
- content="--- End of output ---",
1647
- sequence=stream_sequence,
1648
- is_final=True, # Mark as final
1649
- artifact_id=str(
1650
- pre_generated_artifact_id
1651
- ), # Phase 6: Track artifact for message streaming
1652
- artifact_type=display_data[
1653
- "type"
1654
- ], # Phase 6: Artifact type name
1655
- )
1656
- # Use create_task to avoid blocking the streaming loop
1657
- task = asyncio.create_task(ws_broadcast(event))
1658
- ws_broadcast_tasks.add(task)
1659
- task.add_done_callback(ws_broadcast_tasks.discard)
1660
- except Exception as e:
1661
- logger.warning(f"Failed to emit final streaming event: {e}")
1662
-
1663
- if formatter is not None:
1664
- # Update payload section with final values
1665
- payload_data = OrderedDict()
1666
- for field_name in signature_order:
1667
- if field_name != "description" and hasattr(
1668
- final_result, field_name
1669
- ):
1670
- field_value = getattr(final_result, field_name)
1671
-
1672
- # Convert BaseModel instances to dicts for proper table rendering
1673
- if isinstance(field_value, list):
1674
- # Handle lists of BaseModel instances (fan-out/batch)
1675
- payload_data[field_name] = [
1676
- item.model_dump()
1677
- if isinstance(item, BaseModel)
1678
- else item
1679
- for item in field_value
1680
- ]
1681
- elif isinstance(field_value, BaseModel):
1682
- # Handle single BaseModel instance
1683
- payload_data[field_name] = field_value.model_dump()
1684
- else:
1685
- # Handle primitive types
1686
- payload_data[field_name] = field_value
1687
-
1688
- # Update all fields with actual values
1689
- display_data["payload"].clear()
1690
- display_data["payload"].update(payload_data)
1691
-
1692
- # Update timestamp
1693
- from datetime import datetime
1694
-
1695
- display_data["created_at"] = datetime.now(UTC).isoformat()
1696
-
1697
- # Remove status field from display
1698
- display_data.pop("status", None)
1699
- _refresh_panel()
1700
-
1701
- if final_result is None:
1702
- raise RuntimeError("Streaming did not yield a final prediction.")
1703
-
1704
- # Return both the result and the display data for final ID update
1705
- return final_result, (formatter, display_data, theme_dict, styles, agent_label)
1706
-
1707
- def _prepare_stream_formatter(
1708
- self, agent: Any
1709
- ) -> tuple[Any, dict[str, Any], dict[str, Any], str]:
1710
- """Build formatter + theme metadata for streaming tables."""
1711
- import pathlib
1712
-
1713
- from flock.logging.formatters.themed_formatter import (
1714
- ThemedAgentResultFormatter,
1715
- create_pygments_syntax_theme,
1716
- get_default_styles,
1717
- load_syntax_theme_from_file,
1718
- load_theme_from_file,
1719
- )
1720
-
1721
- themes_dir = pathlib.Path(__file__).resolve().parents[1] / "themes"
1722
- theme_filename = self.theme
1723
- if not theme_filename.endswith(".toml"):
1724
- theme_filename = f"{theme_filename}.toml"
1725
- theme_path = themes_dir / theme_filename
1726
-
1727
- try:
1728
- theme_dict = load_theme_from_file(theme_path)
1729
- except Exception:
1730
- fallback_path = themes_dir / "afterglow.toml"
1731
- theme_dict = load_theme_from_file(fallback_path)
1732
- theme_path = fallback_path
1733
-
1734
- from flock.logging.formatters.themes import OutputTheme
1735
-
1736
- formatter = ThemedAgentResultFormatter(theme=OutputTheme.afterglow)
1737
- styles = get_default_styles(theme_dict)
1738
- formatter.styles = styles
1739
-
1740
- try:
1741
- syntax_theme = load_syntax_theme_from_file(theme_path)
1742
- formatter.syntax_style = create_pygments_syntax_theme(syntax_theme)
1743
- except Exception:
1744
- formatter.syntax_style = None
1745
-
1746
- model_label = self.model or ""
1747
- agent_label = agent.name if not model_label else f"{agent.name} - {model_label}"
1748
-
1749
- return formatter, theme_dict, styles, agent_label
1750
-
1751
- def _print_final_stream_display(
1752
- self,
1753
- stream_display_data: tuple[Any, OrderedDict, dict, dict, str],
1754
- artifact_id: str,
1755
- artifact: Artifact,
1756
- ) -> None:
1757
- """Print the final streaming display with the real artifact ID."""
1758
- from rich.console import Console
1759
-
1760
- formatter, display_data, theme_dict, styles, agent_label = stream_display_data
1761
-
1762
- # Update display_data with the real artifact information
1763
- display_data["id"] = artifact_id
1764
- display_data["created_at"] = artifact.created_at.isoformat()
1765
-
1766
- # Update all artifact metadata
1767
- display_data["correlation_id"] = (
1768
- str(artifact.correlation_id) if artifact.correlation_id else None
1769
- )
1770
- display_data["partition_key"] = artifact.partition_key
1771
- display_data["tags"] = (
1772
- "set()" if not artifact.tags else f"set({list(artifact.tags)})"
1773
- )
1774
-
1775
- # Print the final panel
1776
- console = Console()
1777
- final_panel = formatter.format_result(
1778
- display_data, agent_label, theme_dict, styles
1779
- )
1780
- console.print(final_panel)
1781
-
1782
497
 
1783
498
  __all__ = ["DSPyEngine"]
1784
499