jaf-py 2.4.5__py3-none-any.whl → 2.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jaf/core/types.py CHANGED
@@ -288,6 +288,7 @@ class Agent(Generic[Ctx, Out]):
288
288
  output_codec: Optional[Any] = None # Type that can validate Out (like Pydantic model or Zod equivalent)
289
289
  handoffs: Optional[List[str]] = None
290
290
  model_config: Optional[ModelConfig] = None
291
+ advanced_config: Optional['AdvancedConfig'] = None
291
292
 
292
293
  def as_tool(
293
294
  self,
@@ -331,6 +332,74 @@ class Agent(Generic[Ctx, Out]):
331
332
  # Guardrail type
332
333
  Guardrail = Callable[[Any], Union[ValidationResult, Awaitable[ValidationResult]]]
333
334
 
335
+ @dataclass(frozen=True)
336
+ class AdvancedGuardrailsConfig:
337
+ """Configuration for advanced guardrails with LLM-based validation."""
338
+ input_prompt: Optional[str] = None
339
+ output_prompt: Optional[str] = None
340
+ require_citations: bool = False
341
+ fast_model: Optional[str] = None
342
+ fail_safe: Literal['allow', 'block'] = 'allow'
343
+ execution_mode: Literal['parallel', 'sequential'] = 'parallel'
344
+ timeout_ms: int = 30000
345
+
346
+ def __post_init__(self):
347
+ """Validate configuration."""
348
+ if self.timeout_ms < 1000:
349
+ object.__setattr__(self, 'timeout_ms', 1000)
350
+
351
+ @dataclass(frozen=True)
352
+ class AdvancedConfig:
353
+ """Advanced agent configuration including guardrails."""
354
+ guardrails: Optional[AdvancedGuardrailsConfig] = None
355
+
356
+ def validate_guardrails_config(config: Optional[AdvancedGuardrailsConfig]) -> AdvancedGuardrailsConfig:
357
+ """Validate and provide defaults for guardrails configuration."""
358
+ if config is None:
359
+ return AdvancedGuardrailsConfig()
360
+
361
+ return AdvancedGuardrailsConfig(
362
+ input_prompt=config.input_prompt.strip() if isinstance(config.input_prompt, str) and config.input_prompt else None,
363
+ output_prompt=config.output_prompt.strip() if isinstance(config.output_prompt, str) and config.output_prompt else None,
364
+ require_citations=config.require_citations,
365
+ fast_model=config.fast_model.strip() if isinstance(config.fast_model, str) and config.fast_model else None,
366
+ fail_safe=config.fail_safe,
367
+ execution_mode=config.execution_mode,
368
+ timeout_ms=max(1000, config.timeout_ms)
369
+ )
370
+
371
+ def json_parse_llm_output(text: str) -> Optional[Dict[str, Any]]:
372
+ """Parse JSON from LLM output, handling common formatting issues."""
373
+ import json
374
+ import re
375
+
376
+ if not text:
377
+ return None
378
+
379
+ # Try direct parsing first
380
+ try:
381
+ return json.loads(text)
382
+ except json.JSONDecodeError:
383
+ pass
384
+
385
+ # Try to extract JSON from markdown code blocks
386
+ json_match = re.search(r'```(?:json)?\s*(\{.*?\})\s*```', text, re.DOTALL)
387
+ if json_match:
388
+ try:
389
+ return json.loads(json_match.group(1))
390
+ except json.JSONDecodeError:
391
+ pass
392
+
393
+ # Try to find the first JSON object in the text
394
+ json_match = re.search(r'\{.*?\}', text, re.DOTALL)
395
+ if json_match:
396
+ try:
397
+ return json.loads(json_match.group(0))
398
+ except json.JSONDecodeError:
399
+ pass
400
+
401
+ return None
402
+
334
403
  @dataclass(frozen=True)
335
404
  class ApprovalValue:
336
405
  """Represents an approval decision with context."""
@@ -600,6 +669,17 @@ class GuardrailEvent:
600
669
  type: Literal['guardrail_check'] = 'guardrail_check'
601
670
  data: GuardrailEventData = field(default_factory=lambda: GuardrailEventData(""))
602
671
 
672
+ @dataclass(frozen=True)
673
+ class GuardrailViolationEventData:
674
+ """Data for guardrail violation events."""
675
+ stage: Literal['input', 'output']
676
+ reason: str
677
+
678
+ @dataclass(frozen=True)
679
+ class GuardrailViolationEvent:
680
+ type: Literal['guardrail_violation'] = 'guardrail_violation'
681
+ data: GuardrailViolationEventData = field(default_factory=lambda: GuardrailViolationEventData("input", ""))
682
+
603
683
  @dataclass(frozen=True)
604
684
  class MemoryEventData:
605
685
  """Data for memory operation events."""
@@ -632,6 +712,7 @@ class OutputParseEvent:
632
712
  TraceEvent = Union[
633
713
  RunStartEvent,
634
714
  GuardrailEvent,
715
+ GuardrailViolationEvent,
635
716
  MemoryEvent,
636
717
  OutputParseEvent,
637
718
  LLMCallStartEvent,
@@ -710,7 +791,8 @@ class RunConfig(Generic[Ctx]):
710
791
  initial_input_guardrails: Optional[List[Guardrail]] = None
711
792
  final_output_guardrails: Optional[List[Guardrail]] = None
712
793
  on_event: Optional[Callable[[TraceEvent], None]] = None
713
- memory: Optional['MemoryConfig'] = None
794
+ memory: Optional[Any] = None # MemoryConfig - avoiding circular import
714
795
  conversation_id: Optional[str] = None
796
+ default_fast_model: Optional[str] = None # Default model for fast operations like guardrails
715
797
  default_tool_timeout: Optional[float] = 300.0 # Default timeout for tool execution in seconds
716
798
  approval_storage: Optional['ApprovalStorage'] = None # Storage for approval decisions
jaf/providers/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .mcp import (
7
7
  create_mcp_sse_tools,
8
8
  create_mcp_http_tools,
9
9
  )
10
- from .model import make_litellm_provider
10
+ from .model import make_litellm_provider, make_litellm_sdk_provider
11
11
 
12
12
  # Back-compat for renamed/removed exports (do not add to __all__)
13
13
  import warnings as _warnings
@@ -57,4 +57,5 @@ __all__ = [
57
57
  "create_mcp_sse_tools",
58
58
  "create_mcp_http_tools",
59
59
  "make_litellm_provider",
60
+ "make_litellm_sdk_provider",
60
61
  ]
jaf/providers/model.py CHANGED
@@ -14,6 +14,7 @@ import base64
14
14
 
15
15
  from openai import OpenAI
16
16
  from pydantic import BaseModel
17
+ import litellm
17
18
 
18
19
  from ..core.types import (
19
20
  Agent, ContentRole, Message, ModelProvider, RunConfig, RunState,
@@ -445,9 +446,327 @@ def make_litellm_provider(
445
446
 
446
447
  return LiteLLMProvider()
447
448
 
449
+ def make_litellm_sdk_provider(
450
+ api_key: Optional[str] = None,
451
+ model: str = "gpt-3.5-turbo",
452
+ base_url: Optional[str] = None,
453
+ default_timeout: Optional[float] = None,
454
+ **litellm_kwargs: Any
455
+ ) -> ModelProvider[Ctx]:
456
+ """
457
+ Create a LiteLLM SDK-based model provider with universal provider support.
458
+
459
+ LiteLLM automatically detects the provider from the model name and handles
460
+ API key management through environment variables or direct parameters.
461
+
462
+ Args:
463
+ api_key: API key for the provider (optional, can use env vars)
464
+ model: Model name (e.g., "gpt-4", "claude-3-sonnet", "gemini-pro", "llama2", etc.)
465
+ base_url: Optional base URL for custom endpoints
466
+ default_timeout: Default timeout for model API calls in seconds
467
+ **litellm_kwargs: Additional arguments passed to litellm.completion()
468
+ Common examples:
469
+ - vertex_project: "your-project" (for Google models)
470
+ - vertex_location: "us-central1" (for Google models)
471
+ - azure_deployment: "your-deployment" (for Azure OpenAI)
472
+ - api_base: "https://your-endpoint.com" (custom endpoint)
473
+ - custom_llm_provider: "custom_provider_name"
474
+
475
+ Returns:
476
+ ModelProvider instance
477
+
478
+ Examples:
479
+ # OpenAI
480
+ make_litellm_sdk_provider(api_key="sk-...", model="gpt-4")
481
+
482
+ # Anthropic Claude
483
+ make_litellm_sdk_provider(api_key="sk-ant-...", model="claude-3-sonnet-20240229")
484
+
485
+ # Google Gemini
486
+ make_litellm_sdk_provider(model="gemini-pro", vertex_project="my-project")
487
+
488
+ # Ollama (local)
489
+ make_litellm_sdk_provider(model="ollama/llama2", base_url="http://localhost:11434")
490
+
491
+ # Azure OpenAI
492
+ make_litellm_sdk_provider(
493
+ model="azure/gpt-4",
494
+ api_key="your-azure-key",
495
+ azure_deployment="gpt-4-deployment",
496
+ api_base="https://your-resource.openai.azure.com"
497
+ )
498
+
499
+ # Hugging Face
500
+ make_litellm_sdk_provider(
501
+ model="huggingface/microsoft/DialoGPT-medium",
502
+ api_key="hf_..."
503
+ )
504
+
505
+ # Any custom provider
506
+ make_litellm_sdk_provider(
507
+ model="custom_provider/model-name",
508
+ api_key="your-key",
509
+ custom_llm_provider="your_provider"
510
+ )
511
+ """
512
+
513
+ class LiteLLMSDKProvider:
514
+ def __init__(self):
515
+ self.api_key = api_key
516
+ self.model = model
517
+ self.base_url = base_url
518
+ self.default_timeout = default_timeout
519
+ self.litellm_kwargs = litellm_kwargs
520
+
521
+ async def get_completion(
522
+ self,
523
+ state: RunState[Ctx],
524
+ agent: Agent[Ctx, Any],
525
+ config: RunConfig[Ctx]
526
+ ) -> Dict[str, Any]:
527
+ """Get completion from the model using LiteLLM SDK."""
528
+
529
+ # Determine model to use
530
+ model_name = config.model_override or self.model
531
+
532
+ # Create system message
533
+ system_message = {
534
+ "role": "system",
535
+ "content": agent.instructions(state)
536
+ }
537
+
538
+ # Convert messages to OpenAI format
539
+ messages = [system_message]
540
+ for msg in state.messages:
541
+ converted_msg = await _convert_message(msg)
542
+ messages.append(converted_msg)
543
+
544
+ # Convert tools to OpenAI format
545
+ tools = None
546
+ if agent.tools:
547
+ tools = [
548
+ {
549
+ "type": "function",
550
+ "function": {
551
+ "name": tool.schema.name,
552
+ "description": tool.schema.description,
553
+ "parameters": _pydantic_to_json_schema(tool.schema.parameters),
554
+ }
555
+ }
556
+ for tool in agent.tools
557
+ ]
558
+
559
+ # Prepare request parameters for LiteLLM
560
+ request_params = {
561
+ "model": model_name,
562
+ "messages": messages,
563
+ **self.litellm_kwargs
564
+ }
565
+
566
+ # Add API key if provided
567
+ if self.api_key:
568
+ request_params["api_key"] = self.api_key
569
+
570
+ # Add optional parameters
571
+ if agent.model_config:
572
+ if agent.model_config.temperature is not None:
573
+ request_params["temperature"] = agent.model_config.temperature
574
+ if agent.model_config.max_tokens is not None:
575
+ request_params["max_tokens"] = agent.model_config.max_tokens
576
+
577
+ if tools:
578
+ request_params["tools"] = tools
579
+ request_params["tool_choice"] = "auto"
580
+
581
+ if agent.output_codec:
582
+ request_params["response_format"] = {"type": "json_object"}
583
+
584
+ # LiteLLM will use api_base from kwargs or base_url parameter
585
+ if self.base_url:
586
+ request_params["api_base"] = self.base_url
587
+
588
+ # Make the API call using litellm
589
+ response = await litellm.acompletion(**request_params)
590
+
591
+ # Return in the expected format that the engine expects
592
+ choice = response.choices[0]
593
+
594
+ # Convert tool_calls to dict format if present
595
+ tool_calls = None
596
+ if choice.message.tool_calls:
597
+ tool_calls = [
598
+ {
599
+ 'id': tc.id,
600
+ 'type': tc.type,
601
+ 'function': {
602
+ 'name': tc.function.name,
603
+ 'arguments': tc.function.arguments
604
+ }
605
+ }
606
+ for tc in choice.message.tool_calls
607
+ ]
608
+
609
+ # Extract usage data
610
+ usage_data = None
611
+ if response.usage:
612
+ usage_data = {
613
+ "prompt_tokens": response.usage.prompt_tokens,
614
+ "completion_tokens": response.usage.completion_tokens,
615
+ "total_tokens": response.usage.total_tokens,
616
+ }
617
+
618
+ return {
619
+ 'id': response.id,
620
+ 'created': response.created,
621
+ 'model': response.model,
622
+ 'system_fingerprint': getattr(response, 'system_fingerprint', None),
623
+ 'message': {
624
+ 'content': choice.message.content,
625
+ 'tool_calls': tool_calls
626
+ },
627
+ 'usage': usage_data,
628
+ 'prompt': messages
629
+ }
630
+
631
+ async def get_completion_stream(
632
+ self,
633
+ state: RunState[Ctx],
634
+ agent: Agent[Ctx, Any],
635
+ config: RunConfig[Ctx]
636
+ ) -> AsyncIterator[CompletionStreamChunk]:
637
+ """
638
+ Stream completion chunks from the model provider using LiteLLM SDK.
639
+ """
640
+ # Determine model to use
641
+ model_name = config.model_override or self.model
642
+
643
+ # Create system message
644
+ system_message = {
645
+ "role": "system",
646
+ "content": agent.instructions(state)
647
+ }
648
+
649
+ # Convert messages to OpenAI format
650
+ messages = [system_message]
651
+ for msg in state.messages:
652
+ converted_msg = await _convert_message(msg)
653
+ messages.append(converted_msg)
654
+
655
+ # Convert tools to OpenAI format
656
+ tools = None
657
+ if agent.tools:
658
+ tools = [
659
+ {
660
+ "type": "function",
661
+ "function": {
662
+ "name": tool.schema.name,
663
+ "description": tool.schema.description,
664
+ "parameters": _pydantic_to_json_schema(tool.schema.parameters),
665
+ }
666
+ }
667
+ for tool in agent.tools
668
+ ]
669
+
670
+ # Prepare request parameters for LiteLLM streaming
671
+ request_params: Dict[str, Any] = {
672
+ "model": model_name,
673
+ "messages": messages,
674
+ "stream": True,
675
+ **self.litellm_kwargs
676
+ }
677
+
678
+ # Add API key if provided
679
+ if self.api_key:
680
+ request_params["api_key"] = self.api_key
681
+
682
+ # Add optional parameters
683
+ if agent.model_config:
684
+ if agent.model_config.temperature is not None:
685
+ request_params["temperature"] = agent.model_config.temperature
686
+ if agent.model_config.max_tokens is not None:
687
+ request_params["max_tokens"] = agent.model_config.max_tokens
688
+
689
+ if tools:
690
+ request_params["tools"] = tools
691
+ request_params["tool_choice"] = "auto"
692
+
693
+ if agent.output_codec:
694
+ request_params["response_format"] = {"type": "json_object"}
695
+
696
+ # LiteLLM will use api_base from kwargs or base_url parameter
697
+ if self.base_url:
698
+ request_params["api_base"] = self.base_url
699
+
700
+ # Stream using litellm
701
+ stream = await litellm.acompletion(**request_params)
702
+
703
+ async for chunk in stream:
704
+ try:
705
+ # Best-effort extraction of raw for debugging
706
+ try:
707
+ raw_obj = chunk.model_dump() if hasattr(chunk, 'model_dump') else None
708
+ except Exception:
709
+ raw_obj = None
710
+
711
+ choice = None
712
+ if getattr(chunk, "choices", None):
713
+ choice = chunk.choices[0]
714
+
715
+ if choice is None:
716
+ continue
717
+
718
+ delta = getattr(choice, "delta", None)
719
+ finish_reason = getattr(choice, "finish_reason", None)
720
+
721
+ # Text content delta
722
+ if delta is not None:
723
+ content_delta = getattr(delta, "content", None)
724
+ if content_delta:
725
+ yield CompletionStreamChunk(delta=content_delta, raw=raw_obj)
726
+
727
+ # Tool call deltas
728
+ tool_calls = getattr(delta, "tool_calls", None)
729
+ if isinstance(tool_calls, list):
730
+ for tc in tool_calls:
731
+ try:
732
+ idx = getattr(tc, "index", 0) or 0
733
+ tc_id = getattr(tc, "id", None)
734
+ fn = getattr(tc, "function", None)
735
+ fn_name = getattr(fn, "name", None) if fn is not None else None
736
+ args_delta = getattr(fn, "arguments", None) if fn is not None else None
737
+
738
+ yield CompletionStreamChunk(
739
+ tool_call_delta=ToolCallDelta(
740
+ index=idx,
741
+ id=tc_id,
742
+ type='function',
743
+ function=ToolCallFunctionDelta(
744
+ name=fn_name,
745
+ arguments_delta=args_delta
746
+ )
747
+ ),
748
+ raw=raw_obj
749
+ )
750
+ except Exception:
751
+ continue
752
+
753
+ # Completion ended
754
+ if finish_reason:
755
+ yield CompletionStreamChunk(is_done=True, finish_reason=finish_reason, raw=raw_obj)
756
+ except Exception:
757
+ continue
758
+
759
+ return LiteLLMSDKProvider()
760
+
448
761
  async def _convert_message(msg: Message) -> Dict[str, Any]:
449
- """Convert JAF Message to OpenAI message format with attachment support."""
450
- if msg.role == 'user':
762
+ """
763
+ Handles all possible role types (string and enum) and content formats.
764
+ """
765
+ # Normalize role to handle both string and enum values
766
+ role_value = msg.role.value if hasattr(msg.role, 'value') else str(msg.role).lower()
767
+
768
+ # Handle user messages
769
+ if role_value in ('user', ContentRole.USER.value if hasattr(ContentRole, 'USER') else 'user'):
451
770
  if isinstance(msg.content, list):
452
771
  # Multi-part content
453
772
  return {
@@ -457,12 +776,16 @@ async def _convert_message(msg: Message) -> Dict[str, Any]:
457
776
  else:
458
777
  # Build message with attachments if available
459
778
  return await _build_chat_message_with_attachments('user', msg)
460
- elif msg.role == 'assistant':
779
+
780
+ # Handle assistant messages
781
+ elif role_value in ('assistant', ContentRole.ASSISTANT.value if hasattr(ContentRole, 'ASSISTANT') else 'assistant'):
461
782
  result = {
462
783
  "role": "assistant",
463
- "content": get_text_content(msg.content),
784
+ "content": get_text_content(msg.content) or "", # Ensure content is never None
464
785
  }
465
- if msg.tool_calls:
786
+
787
+ # Add tool calls if present
788
+ if msg.tool_calls and len(msg.tool_calls) > 0:
466
789
  result["tool_calls"] = [
467
790
  {
468
791
  "id": tc.id,
@@ -473,16 +796,48 @@ async def _convert_message(msg: Message) -> Dict[str, Any]:
473
796
  }
474
797
  }
475
798
  for tc in msg.tool_calls
799
+ if tc.id and tc.function and tc.function.name # Validate tool call structure
476
800
  ]
801
+
477
802
  return result
478
- elif msg.role == ContentRole.TOOL:
803
+
804
+ # Handle system messages
805
+ elif role_value in ('system', ContentRole.SYSTEM.value if hasattr(ContentRole, 'SYSTEM') else 'system'):
806
+ return {
807
+ "role": "system",
808
+ "content": get_text_content(msg.content) or ""
809
+ }
810
+
811
+ # Handle tool messages
812
+ elif role_value in ('tool', ContentRole.TOOL.value if hasattr(ContentRole, 'TOOL') else 'tool'):
813
+ if not msg.tool_call_id:
814
+ raise ValueError(f"Tool message must have tool_call_id. Message: {msg}")
815
+
479
816
  return {
480
817
  "role": "tool",
481
- "content": get_text_content(msg.content),
818
+ "content": get_text_content(msg.content) or "",
482
819
  "tool_call_id": msg.tool_call_id
483
820
  }
821
+
822
+ # Handle function messages (legacy support)
823
+ elif role_value == 'function':
824
+ if not msg.tool_call_id:
825
+ raise ValueError(f"Function message must have tool_call_id. Message: {msg}")
826
+
827
+ return {
828
+ "role": "function",
829
+ "content": get_text_content(msg.content) or "",
830
+ "name": getattr(msg, 'name', 'unknown_function')
831
+ }
832
+
833
+ # Unknown role - provide helpful error message
484
834
  else:
485
- raise ValueError(f"Unknown message role: {msg.role}")
835
+ available_roles = ['user', 'assistant', 'system', 'tool', 'function']
836
+ raise ValueError(
837
+ f"Unknown message role: {msg.role} (type: {type(msg.role)}). "
838
+ f"Supported roles: {available_roles}. "
839
+ f"Message content: {get_text_content(msg.content)[:100] if msg.content else 'None'}"
840
+ )
486
841
 
487
842
 
488
843
  def _convert_content_part(part: MessageContentPart) -> Dict[str, Any]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jaf-py
3
- Version: 2.4.5
3
+ Version: 2.4.7
4
4
  Summary: A purely functional agent framework with immutable state and composable tools - Python implementation
5
5
  Author: JAF Contributors
6
6
  Maintainer: JAF Contributors
@@ -43,6 +43,7 @@ Requires-Dist: opentelemetry-api>=1.22.0
43
43
  Requires-Dist: opentelemetry-sdk>=1.22.0
44
44
  Requires-Dist: opentelemetry-exporter-otlp>=1.22.0
45
45
  Requires-Dist: langfuse<3.0.0
46
+ Requires-Dist: litellm>=1.76.3
46
47
  Provides-Extra: tracing
47
48
  Requires-Dist: opentelemetry-api>=1.22.0; extra == "tracing"
48
49
  Requires-Dist: opentelemetry-sdk>=1.22.0; extra == "tracing"
@@ -42,8 +42,9 @@ jaf/core/__init__.py,sha256=PIGKm8n6OQ8jcXRS0Hn3_Zsl8m2qX91N80YJoLCJ4eU,1762
42
42
  jaf/core/agent_tool.py,sha256=tfLNaTIcOZ0dR9GBP1AHLPkLExm_dLbURnVIN4R84FQ,11806
43
43
  jaf/core/analytics.py,sha256=zFHIWqWal0bbEFCmJDc4DKeM0Ja7b_D19PqVaBI12pA,23338
44
44
  jaf/core/composition.py,sha256=IVxRO1Q9nK7JRH32qQ4p8WMIUu66BhqPNrlTNMGFVwE,26317
45
- jaf/core/engine.py,sha256=bjHNn8MoE3o0BAuBCMBY3EjtpMckjWlBotJ-oinfSZ0,52111
45
+ jaf/core/engine.py,sha256=7j8LRf52inRKN4gcCPNuXzoBKMr19S9VMyjrrb3Xlek,57406
46
46
  jaf/core/errors.py,sha256=5fwTNhkojKRQ4wZj3lZlgDnAsrYyjYOwXJkIr5EGNUc,5539
47
+ jaf/core/guardrails.py,sha256=nv7pQuCx7-9DDZrecWO1DsDqFoujL81FBDrafOsXgcI,26179
47
48
  jaf/core/parallel_agents.py,sha256=ahwYoTnkrF4xQgV-hjc5sUaWhQWQFENMZG5riNa_Ieg,12165
48
49
  jaf/core/performance.py,sha256=jedQmTEkrKMD6_Aw1h8PdG-5TsdYSFFT7Or6k5dmN2g,9974
49
50
  jaf/core/proxy.py,sha256=_WM3cpRlSQLYpgSBrnY30UPMe2iZtlqDQ65kppE-WY0,4609
@@ -53,7 +54,7 @@ jaf/core/streaming.py,sha256=h_lYHQA9ee_D5QsDO9-Vhevgi7rFXPslPzd9605AJGo,17034
53
54
  jaf/core/tool_results.py,sha256=-bTOqOX02lMyslp5Z4Dmuhx0cLd5o7kgR88qK2HO_sw,11323
54
55
  jaf/core/tools.py,sha256=84N9A7QQ3xxcOs2eUUot3nmCnt5i7iZT9VwkuzuFBxQ,16274
55
56
  jaf/core/tracing.py,sha256=iuVgykFUSkoBjem1k6jdVLrhRZzJn-avyxc_6W9BXPI,40159
56
- jaf/core/types.py,sha256=8TA5cyNAc_rp5Tn-zmvt6rMi_0atRXbewpaiB5Ss7-g,23186
57
+ jaf/core/types.py,sha256=FCc9uWTUS6P1iU-_RxJM7k-HNorsHM-0XHqwwaUGLkE,26267
57
58
  jaf/core/workflows.py,sha256=Ul-82gzjIXtkhnSMSPv-8igikjkMtW1EBo9yrfodtvI,26294
58
59
  jaf/memory/__init__.py,sha256=-L98xlvihurGAzF0DnXtkueDVvO_wV2XxxEwAWdAj50,1400
59
60
  jaf/memory/approval_storage.py,sha256=HHZ_b57kIthdR53QE5XNSII9xy1Cg-1cFUCSAZ8A4Rk,11083
@@ -69,9 +70,9 @@ jaf/plugins/base.py,sha256=clQohPxT19zj-AjRtDoevE26xS5-cm7qdUOW2VX2InY,12575
69
70
  jaf/policies/__init__.py,sha256=tfbUgIPMwMKvZsyDItP_zvbhGXGY5scKSo78LIomgDU,416
70
71
  jaf/policies/handoff.py,sha256=KJYYuL9T6v6DECRhnsS2Je6q4Aj9_zC5d_KBnvEnZNE,8318
71
72
  jaf/policies/validation.py,sha256=wn-7ynH10E5nk-_r1_kHIYHrBGmLX0EFr-FUTHrsxvc,10903
72
- jaf/providers/__init__.py,sha256=j_o-Rubr8d9tNYlFWb6fvzkxIBl3JKK_iabj9wTFia0,2114
73
+ jaf/providers/__init__.py,sha256=lIbl1JvGrDhI9CzEk79N8yJNhf7ww_aWD-F40MnG3vY,2174
73
74
  jaf/providers/mcp.py,sha256=WxcC8gUFpDBBYyhorMcc1jHq3xMDMBtnwyRPthfL0S0,13074
74
- jaf/providers/model.py,sha256=NJTa-1k0EruDdLf2HS1ZdDpFJhHXzzfQyXAbJx9kZVc,25468
75
+ jaf/providers/model.py,sha256=bN2Hhr0N3soZzMrCdJ1pJa4rvo80oedCphDPcNgrVMY,39336
75
76
  jaf/server/__init__.py,sha256=fMPnLZBRm6t3yQrr7-PnoHAQ8qj9o6Z1AJLM1M6bIS0,392
76
77
  jaf/server/main.py,sha256=CTb0ywbPIq9ELfay5MKChVR7BpIQOoEbPjPfpzo2aBQ,2152
77
78
  jaf/server/server.py,sha256=K8XKNyadP_YqkCRSK9jCVZh52d2_IbHp_jHkKzBeB9Q,38786
@@ -85,9 +86,9 @@ jaf/visualization/functional_core.py,sha256=zedMDZbvjuOugWwnh6SJ2stvRNQX1Hlkb9Ab
85
86
  jaf/visualization/graphviz.py,sha256=WTOM6UP72-lVKwI4_SAr5-GCC3ouckxHv88ypCDQWJ0,12056
86
87
  jaf/visualization/imperative_shell.py,sha256=GpMrAlMnLo2IQgyB2nardCz09vMvAzaYI46MyrvJ0i4,2593
87
88
  jaf/visualization/types.py,sha256=QQcbVeQJLuAOXk8ynd08DXIS-PVCnv3R-XVE9iAcglw,1389
88
- jaf_py-2.4.5.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
89
- jaf_py-2.4.5.dist-info/METADATA,sha256=9oUGQOOBTFoMdCVPfnyC9ucxrBXWzvporO5hDuDXkjA,27712
90
- jaf_py-2.4.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
91
- jaf_py-2.4.5.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
92
- jaf_py-2.4.5.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
93
- jaf_py-2.4.5.dist-info/RECORD,,
89
+ jaf_py-2.4.7.dist-info/licenses/LICENSE,sha256=LXUQBJxdyr-7C4bk9cQBwvsF_xwA-UVstDTKabpcjlI,1063
90
+ jaf_py-2.4.7.dist-info/METADATA,sha256=fBAZFR5TR6J-fDS-Rufo3G7Xw4vEr0_0dPRX-riLT0Q,27743
91
+ jaf_py-2.4.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
+ jaf_py-2.4.7.dist-info/entry_points.txt,sha256=OtIJeNJpb24kgGrqRx9szGgDx1vL9ayq8uHErmu7U5w,41
93
+ jaf_py-2.4.7.dist-info/top_level.txt,sha256=Xu1RZbGaM4_yQX7bpalo881hg7N_dybaOW282F15ruE,4
94
+ jaf_py-2.4.7.dist-info/RECORD,,
File without changes