agnt5 0.2.2__cp39-abi3-macosx_11_0_arm64.whl → 0.2.4__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/agent.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """Agent component implementation for AGNT5 SDK.
2
2
 
3
- Phase 1: Simple agent with external LLM integration and tool orchestration.
4
- Phase 2: Platform-backed agents with durable execution and multi-agent coordination.
3
+ Provides simple agent with external LLM integration and tool orchestration.
4
+ Future: Platform-backed agents with durable execution and multi-agent coordination.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
@@ -163,13 +163,13 @@ class AgentResult:
163
163
  class Agent:
164
164
  """Autonomous LLM-driven agent with tool orchestration.
165
165
 
166
- Phase 1: Simple agent with:
166
+ Current features:
167
167
  - LLM integration (OpenAI, Anthropic, etc.)
168
168
  - Tool selection and execution
169
169
  - Multi-turn reasoning
170
170
  - Context and state management
171
171
 
172
- Phase 2 will add:
172
+ Future enhancements:
173
173
  - Durable execution with checkpointing
174
174
  - Multi-agent coordination
175
175
  - Platform-backed tool execution
@@ -452,148 +452,161 @@ class Agent:
452
452
  component_type="agent",
453
453
  )
454
454
 
455
- # Initialize conversation
456
- messages: List[Message] = [Message.user(user_message)]
457
- all_tool_calls: List[Dict[str, Any]] = []
458
-
459
- # Reasoning loop
460
- for iteration in range(self.max_iterations):
461
- self.logger.info(f"Agent iteration {iteration + 1}/{self.max_iterations}")
462
-
463
- # Build tool definitions for LLM
464
- tool_defs = [
465
- ToolDefinition(
466
- name=tool.name,
467
- description=tool.description,
468
- parameters=tool.input_schema,
469
- )
470
- for tool in self.tools.values()
471
- ]
472
-
473
- # Convert messages to dict format for lm.generate()
474
- messages_dict = []
475
- for msg in messages:
476
- messages_dict.append({
477
- "role": msg.role.value,
478
- "content": msg.content
479
- })
480
-
481
- # Call LLM using simplified API
482
- # TODO: Support tools in lm.generate() - for now using GenerateRequest internally
483
- request = GenerateRequest(
484
- model=self.model,
485
- system_prompt=self.instructions,
486
- messages=messages,
487
- tools=tool_defs if tool_defs else [],
488
- )
489
- request.config.temperature = self.temperature
490
- if self.max_tokens:
491
- request.config.max_tokens = self.max_tokens
492
- if self.top_p:
493
- request.config.top_p = self.top_p
494
-
495
- # Create internal LM instance for generation
496
- # TODO: Use model_config when provided
497
- from .lm import _LanguageModel
498
- provider, model_name = self.model.split('/', 1)
499
- internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
500
- response = await internal_lm.generate(request)
501
-
502
- # Add assistant response to messages
503
- messages.append(Message.assistant(response.text))
504
-
505
- # Check if LLM wants to use tools
506
- if response.tool_calls:
507
- self.logger.info(f"Agent calling {len(response.tool_calls)} tool(s)")
508
-
509
- # Store current conversation in context for potential handoffs
510
- context.set("_current_conversation", messages)
511
-
512
- # Execute tool calls
513
- tool_results = []
514
- for tool_call in response.tool_calls:
515
- tool_name = tool_call["name"]
516
- tool_args_str = tool_call["arguments"]
517
-
518
- # Track tool call
519
- all_tool_calls.append(
520
- {
521
- "name": tool_name,
522
- "arguments": tool_args_str,
523
- "iteration": iteration + 1,
524
- }
455
+ # Create span for agent execution with trace linking
456
+ from ._core import create_span
457
+
458
+ with create_span(
459
+ self.name,
460
+ "agent",
461
+ context._runtime_context if hasattr(context, "_runtime_context") else None,
462
+ {
463
+ "agent.name": self.name,
464
+ "agent.model": self.model,
465
+ "agent.max_iterations": str(self.max_iterations),
466
+ },
467
+ ) as span:
468
+ # Initialize conversation
469
+ messages: List[Message] = [Message.user(user_message)]
470
+ all_tool_calls: List[Dict[str, Any]] = []
471
+
472
+ # Reasoning loop
473
+ for iteration in range(self.max_iterations):
474
+ self.logger.info(f"Agent iteration {iteration + 1}/{self.max_iterations}")
475
+
476
+ # Build tool definitions for LLM
477
+ tool_defs = [
478
+ ToolDefinition(
479
+ name=tool.name,
480
+ description=tool.description,
481
+ parameters=tool.input_schema,
525
482
  )
526
-
527
- # Execute tool
528
- try:
529
- # Parse arguments
530
- tool_args = json.loads(tool_args_str)
531
-
532
- # Get tool
533
- tool = self.tools.get(tool_name)
534
- if not tool:
535
- result_text = f"Error: Tool '{tool_name}' not found"
536
- else:
537
- # Execute tool
538
- result = await tool.invoke(context, **tool_args)
539
-
540
- # Check if this was a handoff
541
- if isinstance(result, dict) and result.get("_handoff"):
542
- self.logger.info(
543
- f"Handoff detected to '{result['to_agent']}', "
544
- f"terminating current agent"
545
- )
546
- # Return immediately with handoff result
547
- return AgentResult(
548
- output=result["output"],
549
- tool_calls=all_tool_calls + result.get("tool_calls", []),
550
- context=context,
551
- handoff_to=result["to_agent"],
552
- handoff_metadata=result,
553
- )
554
-
555
- result_text = json.dumps(result) if result else "null"
556
-
557
- tool_results.append(
558
- {"tool": tool_name, "result": result_text, "error": None}
559
- )
560
-
561
- except Exception as e:
562
- self.logger.error(f"Tool execution error: {e}")
563
- tool_results.append(
564
- {"tool": tool_name, "result": None, "error": str(e)}
483
+ for tool in self.tools.values()
484
+ ]
485
+
486
+ # Convert messages to dict format for lm.generate()
487
+ messages_dict = []
488
+ for msg in messages:
489
+ messages_dict.append({
490
+ "role": msg.role.value,
491
+ "content": msg.content
492
+ })
493
+
494
+ # Call LLM using simplified API
495
+ # TODO: Support tools in lm.generate() - for now using GenerateRequest internally
496
+ request = GenerateRequest(
497
+ model=self.model,
498
+ system_prompt=self.instructions,
499
+ messages=messages,
500
+ tools=tool_defs if tool_defs else [],
501
+ )
502
+ request.config.temperature = self.temperature
503
+ if self.max_tokens:
504
+ request.config.max_tokens = self.max_tokens
505
+ if self.top_p:
506
+ request.config.top_p = self.top_p
507
+
508
+ # Create internal LM instance for generation
509
+ # TODO: Use model_config when provided
510
+ from .lm import _LanguageModel
511
+ provider, model_name = self.model.split('/', 1)
512
+ internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
513
+ response = await internal_lm.generate(request)
514
+
515
+ # Add assistant response to messages
516
+ messages.append(Message.assistant(response.text))
517
+
518
+ # Check if LLM wants to use tools
519
+ if response.tool_calls:
520
+ self.logger.info(f"Agent calling {len(response.tool_calls)} tool(s)")
521
+
522
+ # Store current conversation in context for potential handoffs
523
+ context.set("_current_conversation", messages)
524
+
525
+ # Execute tool calls
526
+ tool_results = []
527
+ for tool_call in response.tool_calls:
528
+ tool_name = tool_call["name"]
529
+ tool_args_str = tool_call["arguments"]
530
+
531
+ # Track tool call
532
+ all_tool_calls.append(
533
+ {
534
+ "name": tool_name,
535
+ "arguments": tool_args_str,
536
+ "iteration": iteration + 1,
537
+ }
565
538
  )
566
539
 
567
- # Add tool results to conversation
568
- results_text = "\n".join(
569
- [
570
- f"Tool: {tr['tool']}\nResult: {tr['result']}"
571
- if tr["error"] is None
572
- else f"Tool: {tr['tool']}\nError: {tr['error']}"
573
- for tr in tool_results
574
- ]
575
- )
576
- messages.append(Message.user(f"Tool results:\n{results_text}"))
540
+ # Execute tool
541
+ try:
542
+ # Parse arguments
543
+ tool_args = json.loads(tool_args_str)
544
+
545
+ # Get tool
546
+ tool = self.tools.get(tool_name)
547
+ if not tool:
548
+ result_text = f"Error: Tool '{tool_name}' not found"
549
+ else:
550
+ # Execute tool
551
+ result = await tool.invoke(context, **tool_args)
552
+
553
+ # Check if this was a handoff
554
+ if isinstance(result, dict) and result.get("_handoff"):
555
+ self.logger.info(
556
+ f"Handoff detected to '{result['to_agent']}', "
557
+ f"terminating current agent"
558
+ )
559
+ # Return immediately with handoff result
560
+ return AgentResult(
561
+ output=result["output"],
562
+ tool_calls=all_tool_calls + result.get("tool_calls", []),
563
+ context=context,
564
+ handoff_to=result["to_agent"],
565
+ handoff_metadata=result,
566
+ )
567
+
568
+ result_text = json.dumps(result) if result else "null"
569
+
570
+ tool_results.append(
571
+ {"tool": tool_name, "result": result_text, "error": None}
572
+ )
573
+
574
+ except Exception as e:
575
+ self.logger.error(f"Tool execution error: {e}")
576
+ tool_results.append(
577
+ {"tool": tool_name, "result": None, "error": str(e)}
578
+ )
579
+
580
+ # Add tool results to conversation
581
+ results_text = "\n".join(
582
+ [
583
+ f"Tool: {tr['tool']}\nResult: {tr['result']}"
584
+ if tr["error"] is None
585
+ else f"Tool: {tr['tool']}\nError: {tr['error']}"
586
+ for tr in tool_results
587
+ ]
588
+ )
589
+ messages.append(Message.user(f"Tool results:\n{results_text}"))
577
590
 
578
- # Continue loop for agent to process results
591
+ # Continue loop for agent to process results
579
592
 
580
- else:
581
- # No tool calls - agent is done
582
- self.logger.info(f"Agent completed after {iteration + 1} iterations")
583
- return AgentResult(
584
- output=response.text,
585
- tool_calls=all_tool_calls,
586
- context=context,
587
- )
593
+ else:
594
+ # No tool calls - agent is done
595
+ self.logger.info(f"Agent completed after {iteration + 1} iterations")
596
+ return AgentResult(
597
+ output=response.text,
598
+ tool_calls=all_tool_calls,
599
+ context=context,
600
+ )
588
601
 
589
- # Max iterations reached
590
- self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
591
- final_output = messages[-1].content if messages else "No output generated"
592
- return AgentResult(
593
- output=final_output,
594
- tool_calls=all_tool_calls,
595
- context=context,
596
- )
602
+ # Max iterations reached
603
+ self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
604
+ final_output = messages[-1].content if messages else "No output generated"
605
+ return AgentResult(
606
+ output=final_output,
607
+ tool_calls=all_tool_calls,
608
+ context=context,
609
+ )
597
610
 
598
611
  async def chat(
599
612
  self,
agnt5/client.py CHANGED
@@ -42,6 +42,7 @@ class Client:
42
42
  self,
43
43
  component: str,
44
44
  input_data: Optional[Dict[str, Any]] = None,
45
+ component_type: str = "function",
45
46
  ) -> Dict[str, Any]:
46
47
  """Execute a component synchronously and wait for the result.
47
48
 
@@ -50,6 +51,7 @@ class Client:
50
51
  Args:
51
52
  component: Name of the component to execute
52
53
  input_data: Input data for the component (will be sent as JSON body)
54
+ component_type: Type of component - "function", "workflow", "agent", "tool" (default: "function")
53
55
 
54
56
  Returns:
55
57
  Dictionary containing the component's output
@@ -60,9 +62,12 @@ class Client:
60
62
 
61
63
  Example:
62
64
  ```python
63
- # Simple function call
65
+ # Simple function call (default)
64
66
  result = client.run("greet", {"name": "Alice"})
65
67
 
68
+ # Workflow execution (explicit)
69
+ result = client.run("order_fulfillment", {"order_id": "123"}, component_type="workflow")
70
+
66
71
  # No input data
67
72
  result = client.run("get_status")
68
73
  ```
@@ -70,8 +75,8 @@ class Client:
70
75
  if input_data is None:
71
76
  input_data = {}
72
77
 
73
- # Build URL
74
- url = urljoin(self.gateway_url + "/", f"run/{component}")
78
+ # Build URL with component type
79
+ url = urljoin(self.gateway_url + "/", f"v1/run/{component_type}/{component}")
75
80
 
76
81
  # Make request
77
82
  response = self._client.post(
@@ -81,6 +86,17 @@ class Client:
81
86
  )
82
87
 
83
88
  # Handle errors
89
+ if response.status_code == 404:
90
+ try:
91
+ error_data = response.json()
92
+ raise RunError(
93
+ error_data.get("error", "Component not found"),
94
+ run_id=error_data.get("runId"),
95
+ )
96
+ except ValueError:
97
+ # JSON parsing failed
98
+ raise RunError(f"Component '{component}' not found")
99
+
84
100
  if response.status_code == 503:
85
101
  error_data = response.json()
86
102
  raise RunError(
@@ -127,6 +143,7 @@ class Client:
127
143
  self,
128
144
  component: str,
129
145
  input_data: Optional[Dict[str, Any]] = None,
146
+ component_type: str = "function",
130
147
  ) -> str:
131
148
  """Submit a component for async execution and return immediately.
132
149
 
@@ -136,6 +153,7 @@ class Client:
136
153
  Args:
137
154
  component: Name of the component to execute
138
155
  input_data: Input data for the component (will be sent as JSON body)
156
+ component_type: Type of component - "function", "workflow", "agent", "tool" (default: "function")
139
157
 
140
158
  Returns:
141
159
  String containing the run ID
@@ -145,10 +163,13 @@ class Client:
145
163
 
146
164
  Example:
147
165
  ```python
148
- # Submit async task
166
+ # Submit async function (default)
149
167
  run_id = client.submit("process_video", {"url": "https://..."})
150
168
  print(f"Submitted: {run_id}")
151
169
 
170
+ # Submit workflow
171
+ run_id = client.submit("order_fulfillment", {"order_id": "123"}, component_type="workflow")
172
+
152
173
  # Check status later
153
174
  status = client.get_status(run_id)
154
175
  if status["status"] == "completed":
@@ -158,8 +179,8 @@ class Client:
158
179
  if input_data is None:
159
180
  input_data = {}
160
181
 
161
- # Build URL
162
- url = urljoin(self.gateway_url + "/", f"submit/{component}")
182
+ # Build URL with component type
183
+ url = urljoin(self.gateway_url + "/", f"v1/submit/{component_type}/{component}")
163
184
 
164
185
  # Make request
165
186
  response = self._client.post(
@@ -200,7 +221,7 @@ class Client:
200
221
  print(f"Status: {status['status']}")
201
222
  ```
202
223
  """
203
- url = urljoin(self.gateway_url + "/", f"status/{run_id}")
224
+ url = urljoin(self.gateway_url + "/", f"v1/status/{run_id}")
204
225
 
205
226
  response = self._client.get(url)
206
227
  response.raise_for_status()
@@ -234,7 +255,7 @@ class Client:
234
255
  print(f"Run failed: {e}")
235
256
  ```
236
257
  """
237
- url = urljoin(self.gateway_url + "/", f"result/{run_id}")
258
+ url = urljoin(self.gateway_url + "/", f"v1/result/{run_id}")
238
259
 
239
260
  response = self._client.get(url)
240
261
 
@@ -351,7 +372,7 @@ class Client:
351
372
  input_data = {}
352
373
 
353
374
  # Build URL
354
- url = urljoin(self.gateway_url + "/", f"stream/{component}")
375
+ url = urljoin(self.gateway_url + "/", f"v1/stream/{component}")
355
376
 
356
377
  # Use streaming request
357
378
  with self._client.stream(
@@ -511,10 +532,11 @@ class EntityProxy:
511
532
  Callable that executes the entity method
512
533
  """
513
534
 
514
- def method_caller(**kwargs) -> Any:
535
+ def method_caller(*args, **kwargs) -> Any:
515
536
  """Call an entity method with the given parameters.
516
537
 
517
538
  Args:
539
+ *args: Positional arguments (not recommended, use kwargs)
518
540
  **kwargs: Method parameters as keyword arguments
519
541
 
520
542
  Returns:
@@ -522,11 +544,27 @@ class EntityProxy:
522
544
 
523
545
  Raises:
524
546
  RunError: If the method execution fails
547
+ ValueError: If both positional and keyword arguments are provided
525
548
  """
526
- # Build URL: /entity/:entityType/:key/:method
549
+ # Convert positional args to kwargs if provided
550
+ if args and kwargs:
551
+ raise ValueError(
552
+ f"Cannot mix positional and keyword arguments when calling entity method '{method_name}'. "
553
+ "Please use keyword arguments only."
554
+ )
555
+
556
+ # If positional args provided, we can't convert them without knowing parameter names
557
+ # Raise helpful error
558
+ if args:
559
+ raise ValueError(
560
+ f"Entity method '{method_name}' requires keyword arguments, but got {len(args)} positional arguments. "
561
+ f"Example: .{method_name}(param1=value1, param2=value2)"
562
+ )
563
+
564
+ # Build URL: /v1/entity/:entityType/:key/:method
527
565
  url = urljoin(
528
566
  self._client.gateway_url + "/",
529
- f"entity/{self._entity_type}/{self._key}/{method_name}",
567
+ f"v1/entity/{self._entity_type}/{self._key}/{method_name}",
530
568
  )
531
569
 
532
570
  # Make request with method parameters as JSON body