agnt5 0.2.5__cp39-abi3-macosx_11_0_arm64.whl → 0.2.7__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/__init__.py CHANGED
@@ -6,7 +6,7 @@ with built-in durability guarantees and state management.
6
6
  """
7
7
 
8
8
  from ._compat import _import_error, _rust_available
9
- from .agent import Agent, AgentRegistry, AgentResult, Handoff, agent, handoff
9
+ from .agent import Agent, AgentContext, AgentRegistry, AgentResult, Handoff, agent, handoff
10
10
  from .client import Client, RunError
11
11
  from .context import Context
12
12
  from .function import FunctionContext
@@ -32,7 +32,7 @@ from .tool import Tool, ToolRegistry, tool
32
32
  from .types import BackoffPolicy, BackoffType, FunctionConfig, RetryPolicy, WorkflowConfig
33
33
  from .version import _get_version
34
34
  from .worker import Worker
35
- from .workflow import WorkflowRegistry, chatflow, workflow
35
+ from .workflow import WorkflowRegistry, workflow
36
36
 
37
37
  # Expose simplified language model API (recommended)
38
38
  from . import lm
@@ -46,6 +46,7 @@ __all__ = [
46
46
  "Context",
47
47
  "FunctionContext",
48
48
  "WorkflowContext",
49
+ "AgentContext",
49
50
  "Client",
50
51
  "Worker",
51
52
  "function",
@@ -57,7 +58,6 @@ __all__ = [
57
58
  "with_entity_context",
58
59
  "create_entity_context",
59
60
  "workflow",
60
- "chatflow",
61
61
  "WorkflowRegistry",
62
62
  "tool",
63
63
  "Tool",
agnt5/_core.abi3.so CHANGED
Binary file
agnt5/agent.py CHANGED
@@ -13,7 +13,7 @@ from typing import Any, Callable, Dict, List, Optional
13
13
 
14
14
  from .context import Context
15
15
  from . import lm
16
- from .lm import GenerateRequest, GenerateResponse, Message, ModelConfig, ToolDefinition
16
+ from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ModelConfig, ToolDefinition
17
17
  from .tool import Tool, ToolRegistry
18
18
  from ._telemetry import setup_module_logger
19
19
 
@@ -23,6 +23,175 @@ logger = setup_module_logger(__name__)
23
23
  _AGENT_REGISTRY: Dict[str, "Agent"] = {}
24
24
 
25
25
 
26
+ class AgentContext(Context):
27
+ """
28
+ Context for agent execution with conversation state management.
29
+
30
+ Extends base Context with:
31
+ - State management via EntityStateManager
32
+ - Conversation history persistence
33
+ - Context inheritance (child agents share parent's state)
34
+
35
+ Three initialization modes:
36
+ 1. Standalone: Creates own state manager (playground testing)
37
+ 2. Inherit WorkflowContext: Shares parent's state manager
38
+ 3. Inherit parent AgentContext: Shares parent's state manager
39
+
40
+ Example:
41
+ ```python
42
+ # Standalone agent with conversation history
43
+ ctx = AgentContext(run_id="session-1", agent_name="tutor")
44
+ result = await agent.run("Hello", context=ctx)
45
+ result = await agent.run("Continue", context=ctx) # Remembers previous message
46
+
47
+ # Agent in workflow - shares workflow state
48
+ @workflow
49
+ async def research_workflow(ctx: WorkflowContext):
50
+ agent_result = await research_agent.run("Find AI trends", context=ctx)
51
+ # Agent has access to workflow state via inherited context
52
+ ```
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ run_id: str,
58
+ agent_name: str,
59
+ session_id: Optional[str] = None,
60
+ state_manager: Optional[Any] = None,
61
+ parent_context: Optional[Context] = None,
62
+ attempt: int = 0,
63
+ runtime_context: Optional[Any] = None,
64
+ ):
65
+ """
66
+ Initialize agent context.
67
+
68
+ Args:
69
+ run_id: Unique execution identifier
70
+ agent_name: Name of the agent
71
+ session_id: Session identifier for conversation history (default: run_id)
72
+ state_manager: Optional state manager (for context inheritance)
73
+ parent_context: Parent context to inherit state from
74
+ attempt: Retry attempt number
75
+ runtime_context: RuntimeContext for trace correlation
76
+ """
77
+ super().__init__(run_id, attempt, runtime_context)
78
+
79
+ self._agent_name = agent_name
80
+ self._session_id = session_id or run_id
81
+
82
+ # Determine state manager based on parent context
83
+ from .entity import EntityStateManager, _get_state_manager
84
+
85
+ if state_manager:
86
+ # Explicit state manager provided
87
+ self._state_manager = state_manager
88
+ logger.debug(f"AgentContext using provided state manager")
89
+ elif parent_context:
90
+ # Try to inherit state manager from parent
91
+ try:
92
+ # Check if parent is WorkflowContext or AgentContext
93
+ if hasattr(parent_context, '_workflow_entity'):
94
+ # WorkflowContext - get state manager from worker context
95
+ self._state_manager = _get_state_manager()
96
+ logger.debug(f"AgentContext inheriting state from WorkflowContext")
97
+ elif hasattr(parent_context, '_state_manager'):
98
+ # Parent AgentContext - share state manager
99
+ self._state_manager = parent_context._state_manager
100
+ logger.debug(f"AgentContext inheriting state from parent AgentContext")
101
+ else:
102
+ # FunctionContext or base Context - create new state manager
103
+ self._state_manager = EntityStateManager()
104
+ logger.debug(f"AgentContext created new state manager (parent has no state)")
105
+ except RuntimeError:
106
+ # _get_state_manager() failed (not in worker context) - create standalone
107
+ self._state_manager = EntityStateManager()
108
+ logger.debug(f"AgentContext created standalone state manager")
109
+ else:
110
+ # Standalone - create new state manager
111
+ self._state_manager = EntityStateManager()
112
+ logger.debug(f"AgentContext created standalone state manager")
113
+
114
+ # Conversation key for state storage
115
+ self._conversation_key = f"agent:{agent_name}:{self._session_id}:messages"
116
+
117
+ @property
118
+ def state(self):
119
+ """
120
+ Get state interface for agent state management.
121
+
122
+ Returns:
123
+ EntityState instance for state operations
124
+
125
+ Example:
126
+ # Store conversation history
127
+ messages = ctx.state.get(f"agent:{agent_name}:{session_id}:messages", [])
128
+ messages.append({"role": "user", "content": "Hello"})
129
+ ctx.state.set(f"agent:{agent_name}:{session_id}:messages", messages)
130
+
131
+ # Store agent-specific data
132
+ ctx.state.set("research_results", data)
133
+ """
134
+ from .entity import EntityState
135
+
136
+ # Use agent's conversation key as the state key
137
+ state_key = ("agent", self._conversation_key)
138
+ state_dict = self._state_manager.get_or_create_state(state_key)
139
+ return EntityState(state_dict)
140
+
141
+ @property
142
+ def session_id(self) -> str:
143
+ """Get session identifier for this agent context."""
144
+ return self._session_id
145
+
146
+ def get_conversation_history(self) -> List[Message]:
147
+ """
148
+ Retrieve conversation history from state.
149
+
150
+ Returns:
151
+ List of Message objects from conversation history
152
+ """
153
+ messages_data = self.state.get(self._conversation_key, [])
154
+
155
+ # Convert dict representations back to Message objects
156
+ messages = []
157
+ for msg_dict in messages_data:
158
+ if isinstance(msg_dict, dict):
159
+ role = msg_dict.get("role", "user")
160
+ content = msg_dict.get("content", "")
161
+ if role == "user":
162
+ messages.append(Message.user(content))
163
+ elif role == "assistant":
164
+ messages.append(Message.assistant(content))
165
+ else:
166
+ # Generic message - create with MessageRole enum
167
+ from .lm import MessageRole
168
+ msg_role = MessageRole(role) if role in ("user", "assistant", "system") else MessageRole.USER
169
+ msg = Message(role=msg_role, content=content)
170
+ messages.append(msg)
171
+ else:
172
+ # Already a Message object
173
+ messages.append(msg_dict)
174
+
175
+ return messages
176
+
177
+ def save_conversation_history(self, messages: List[Message]) -> None:
178
+ """
179
+ Save conversation history to state.
180
+
181
+ Args:
182
+ messages: List of Message objects to persist
183
+ """
184
+ # Convert Message objects to dict for JSON serialization
185
+ messages_data = []
186
+ for msg in messages:
187
+ messages_data.append({
188
+ "role": msg.role.value if hasattr(msg.role, 'value') else str(msg.role),
189
+ "content": msg.content
190
+ })
191
+
192
+ self.state.set(self._conversation_key, messages_data)
193
+
194
+
26
195
  class Handoff:
27
196
  """Configuration for agent-to-agent handoff.
28
197
 
@@ -201,7 +370,7 @@ class Agent:
201
370
  def __init__(
202
371
  self,
203
372
  name: str,
204
- model: str,
373
+ model: Any, # Can be string like "openai/gpt-4o-mini" OR LanguageModel instance
205
374
  instructions: str,
206
375
  tools: Optional[List[Any]] = None,
207
376
  handoffs: Optional[List[Handoff]] = None,
@@ -210,12 +379,13 @@ class Agent:
210
379
  top_p: Optional[float] = None,
211
380
  model_config: Optional[ModelConfig] = None,
212
381
  max_iterations: int = 10,
382
+ model_name: Optional[str] = None, # For backwards compatibility with tests
213
383
  ):
214
384
  """Initialize agent.
215
385
 
216
386
  Args:
217
387
  name: Agent name/identifier
218
- model: Model string with provider prefix (e.g., "openai/gpt-4o-mini")
388
+ model: Model string with provider prefix (e.g., "openai/gpt-4o-mini") OR LanguageModel instance
219
389
  instructions: System instructions for the agent
220
390
  tools: List of tools available to the agent (functions, Tool instances, or Agent instances)
221
391
  handoffs: List of Handoff configurations for agent-to-agent delegation
@@ -224,9 +394,9 @@ class Agent:
224
394
  top_p: Nucleus sampling parameter
225
395
  model_config: Optional advanced configuration (custom endpoints, headers, etc.)
226
396
  max_iterations: Maximum reasoning iterations
397
+ model_name: Optional model name (for backwards compatibility, used when model is a LanguageModel instance)
227
398
  """
228
399
  self.name = name
229
- self.model = model
230
400
  self.instructions = instructions
231
401
  self.temperature = temperature
232
402
  self.max_tokens = max_tokens
@@ -234,6 +404,20 @@ class Agent:
234
404
  self.model_config = model_config
235
405
  self.max_iterations = max_iterations
236
406
 
407
+ # Support both string model names and LanguageModel instances
408
+ if isinstance(model, str):
409
+ # New API: model is a string like "openai/gpt-4o-mini"
410
+ self.model = model
411
+ self.model_name = model_name or model
412
+ self._language_model = None # Will create on demand
413
+ elif isinstance(model, LanguageModel):
414
+ # Old API (for tests): model is a LanguageModel instance
415
+ self._language_model = model
416
+ self.model = model # Keep for backwards compatibility
417
+ self.model_name = model_name or "mock-model"
418
+ else:
419
+ raise TypeError(f"model must be a string or LanguageModel instance, got {type(model)}")
420
+
237
421
  # Store handoffs for building handoff tools
238
422
  self.handoffs = handoffs or []
239
423
 
@@ -448,13 +632,47 @@ class Agent:
448
632
  print(result.output)
449
633
  ```
450
634
  """
451
- # Create context if not provided
635
+ # Create or adapt context
452
636
  if context is None:
637
+ # Standalone execution - create AgentContext
453
638
  import uuid
454
-
455
- context = Context(
456
- run_id=f"agent-{self.name}-{uuid.uuid4().hex[:8]}",
639
+ run_id = f"agent-{self.name}-{uuid.uuid4().hex[:8]}"
640
+ context = AgentContext(
641
+ run_id=run_id,
642
+ agent_name=self.name,
643
+ )
644
+ elif isinstance(context, AgentContext):
645
+ # Already AgentContext - use as-is
646
+ pass
647
+ elif hasattr(context, '_workflow_entity'):
648
+ # WorkflowContext - create AgentContext that inherits state
649
+ import uuid
650
+ run_id = f"{context.run_id}:agent:{self.name}"
651
+ context = AgentContext(
652
+ run_id=run_id,
653
+ agent_name=self.name,
654
+ session_id=context.run_id, # Share workflow's session
655
+ parent_context=context,
457
656
  )
657
+ else:
658
+ # FunctionContext or other - create new AgentContext
659
+ import uuid
660
+ run_id = f"{context.run_id}:agent:{self.name}"
661
+ context = AgentContext(
662
+ run_id=run_id,
663
+ agent_name=self.name,
664
+ )
665
+
666
+ # Load conversation history from state (if AgentContext)
667
+ if isinstance(context, AgentContext):
668
+ messages: List[Message] = context.get_conversation_history()
669
+ # Add new user message
670
+ messages.append(Message.user(user_message))
671
+ # Save updated conversation
672
+ context.save_conversation_history(messages)
673
+ else:
674
+ # Fallback for non-AgentContext (shouldn't happen with code above)
675
+ messages = [Message.user(user_message)]
458
676
 
459
677
  # Create span for agent execution with trace linking
460
678
  from ._core import create_span
@@ -465,12 +683,10 @@ class Agent:
465
683
  context._runtime_context if hasattr(context, "_runtime_context") else None,
466
684
  {
467
685
  "agent.name": self.name,
468
- "agent.model": self.model,
686
+ "agent.model": self.model_name, # Use model_name (always a string)
469
687
  "agent.max_iterations": str(self.max_iterations),
470
688
  },
471
689
  ) as span:
472
- # Initialize conversation
473
- messages: List[Message] = [Message.user(user_message)]
474
690
  all_tool_calls: List[Dict[str, Any]] = []
475
691
 
476
692
  # Reasoning loop
@@ -493,26 +709,42 @@ class Agent:
493
709
  "content": msg.content
494
710
  })
495
711
 
496
- # Call LLM using simplified API
497
- # TODO: Support tools in lm.generate() - for now using GenerateRequest internally
498
- request = GenerateRequest(
499
- model=self.model,
500
- system_prompt=self.instructions,
501
- messages=messages,
502
- tools=tool_defs if tool_defs else [],
503
- )
504
- request.config.temperature = self.temperature
505
- if self.max_tokens:
506
- request.config.max_tokens = self.max_tokens
507
- if self.top_p:
508
- request.config.top_p = self.top_p
509
-
510
- # Create internal LM instance for generation
511
- # TODO: Use model_config when provided
512
- from .lm import _LanguageModel
513
- provider, model_name = self.model.split('/', 1)
514
- internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
515
- response = await internal_lm.generate(request)
712
+ # Call LLM
713
+ # Check if we have a legacy LanguageModel instance or need to create one
714
+ if self._language_model is not None:
715
+ # Legacy API: use provided LanguageModel instance
716
+ request = GenerateRequest(
717
+ model="mock-model", # Not used by MockLanguageModel
718
+ system_prompt=self.instructions,
719
+ messages=messages,
720
+ tools=tool_defs if tool_defs else [],
721
+ )
722
+ request.config.temperature = self.temperature
723
+ if self.max_tokens:
724
+ request.config.max_tokens = self.max_tokens
725
+ if self.top_p:
726
+ request.config.top_p = self.top_p
727
+ response = await self._language_model.generate(request)
728
+ else:
729
+ # New API: model is a string, create internal LM instance
730
+ request = GenerateRequest(
731
+ model=self.model,
732
+ system_prompt=self.instructions,
733
+ messages=messages,
734
+ tools=tool_defs if tool_defs else [],
735
+ )
736
+ request.config.temperature = self.temperature
737
+ if self.max_tokens:
738
+ request.config.max_tokens = self.max_tokens
739
+ if self.top_p:
740
+ request.config.top_p = self.top_p
741
+
742
+ # Create internal LM instance for generation
743
+ # TODO: Use model_config when provided
744
+ from .lm import _LanguageModel
745
+ provider, model_name = self.model.split('/', 1)
746
+ internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
747
+ response = await internal_lm.generate(request)
516
748
 
517
749
  # Add assistant response to messages
518
750
  messages.append(Message.assistant(response.text))
@@ -561,6 +793,9 @@ class Agent:
561
793
  f"Handoff detected to '{result['to_agent']}', "
562
794
  f"terminating current agent"
563
795
  )
796
+ # Save conversation before returning
797
+ if isinstance(context, AgentContext):
798
+ context.save_conversation_history(messages)
564
799
  # Return immediately with handoff result
565
800
  return AgentResult(
566
801
  output=result["output"],
@@ -598,6 +833,9 @@ class Agent:
598
833
  else:
599
834
  # No tool calls - agent is done
600
835
  self.logger.debug(f"Agent completed after {iteration + 1} iterations")
836
+ # Save conversation before returning
837
+ if isinstance(context, AgentContext):
838
+ context.save_conversation_history(messages)
601
839
  return AgentResult(
602
840
  output=response.text,
603
841
  tool_calls=all_tool_calls,
@@ -607,68 +845,15 @@ class Agent:
607
845
  # Max iterations reached
608
846
  self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
609
847
  final_output = messages[-1].content if messages else "No output generated"
848
+ # Save conversation before returning
849
+ if isinstance(context, AgentContext):
850
+ context.save_conversation_history(messages)
610
851
  return AgentResult(
611
852
  output=final_output,
612
853
  tool_calls=all_tool_calls,
613
854
  context=context,
614
855
  )
615
856
 
616
- async def chat(
617
- self,
618
- user_message: str,
619
- messages: List[Message],
620
- context: Optional[Context] = None,
621
- ) -> tuple[str, List[Message]]:
622
- """Continue multi-turn conversation.
623
-
624
- Args:
625
- user_message: New user message
626
- messages: Previous conversation messages
627
- context: Optional context
628
-
629
- Returns:
630
- Tuple of (assistant_response, updated_messages)
631
-
632
- Example:
633
- ```python
634
- messages = []
635
- response, messages = await agent.chat("Hello", messages)
636
- response, messages = await agent.chat("Tell me more", messages)
637
- ```
638
- """
639
- if context is None:
640
- import uuid
641
-
642
- context = Context(
643
- run_id=f"agent-chat-{self.name}-{uuid.uuid4().hex[:8]}",
644
- )
645
-
646
- # Add user message
647
- conversation = messages + [Message.user(user_message)]
648
-
649
- # Build request (no tools for simple chat)
650
- request = GenerateRequest(
651
- model=self.model,
652
- system_prompt=self.instructions,
653
- messages=conversation,
654
- )
655
- request.config.temperature = self.temperature
656
- if self.max_tokens:
657
- request.config.max_tokens = self.max_tokens
658
- if self.top_p:
659
- request.config.top_p = self.top_p
660
-
661
- # Create internal LM instance for generation
662
- from .lm import _LanguageModel
663
- provider, model_name = self.model.split('/', 1)
664
- internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
665
- response = await internal_lm.generate(request)
666
-
667
- # Add assistant response
668
- conversation.append(Message.assistant(response.text))
669
-
670
- return response.text, conversation
671
-
672
857
 
673
858
  def agent(
674
859
  _func: Optional[Callable] = None,