agnt5 0.2.2__cp39-abi3-manylinux_2_34_aarch64.whl → 0.2.6__cp39-abi3-manylinux_2_34_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agnt5 might be problematic. Click here for more details.
- agnt5/__init__.py +12 -12
- agnt5/_core.abi3.so +0 -0
- agnt5/_retry_utils.py +169 -0
- agnt5/_schema_utils.py +312 -0
- agnt5/_telemetry.py +28 -7
- agnt5/agent.py +181 -149
- agnt5/client.py +50 -12
- agnt5/context.py +36 -756
- agnt5/entity.py +368 -1160
- agnt5/function.py +208 -235
- agnt5/lm.py +149 -26
- agnt5/tool.py +25 -11
- agnt5/tracing.py +196 -0
- agnt5/worker.py +568 -238
- agnt5/workflow.py +474 -21
- {agnt5-0.2.2.dist-info → agnt5-0.2.6.dist-info}/METADATA +2 -1
- agnt5-0.2.6.dist-info/RECORD +22 -0
- agnt5-0.2.2.dist-info/RECORD +0 -19
- {agnt5-0.2.2.dist-info → agnt5-0.2.6.dist-info}/WHEEL +0 -0
agnt5/agent.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Agent component implementation for AGNT5 SDK.
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
Provides simple agent with external LLM integration and tool orchestration.
|
|
4
|
+
Future: Platform-backed agents with durable execution and multi-agent coordination.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from __future__ import annotations
|
|
@@ -13,7 +13,7 @@ from typing import Any, Callable, Dict, List, Optional
|
|
|
13
13
|
|
|
14
14
|
from .context import Context
|
|
15
15
|
from . import lm
|
|
16
|
-
from .lm import GenerateRequest, GenerateResponse, Message, ModelConfig, ToolDefinition
|
|
16
|
+
from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ModelConfig, ToolDefinition
|
|
17
17
|
from .tool import Tool, ToolRegistry
|
|
18
18
|
from ._telemetry import setup_module_logger
|
|
19
19
|
|
|
@@ -163,13 +163,13 @@ class AgentResult:
|
|
|
163
163
|
class Agent:
|
|
164
164
|
"""Autonomous LLM-driven agent with tool orchestration.
|
|
165
165
|
|
|
166
|
-
|
|
166
|
+
Current features:
|
|
167
167
|
- LLM integration (OpenAI, Anthropic, etc.)
|
|
168
168
|
- Tool selection and execution
|
|
169
169
|
- Multi-turn reasoning
|
|
170
170
|
- Context and state management
|
|
171
171
|
|
|
172
|
-
|
|
172
|
+
Future enhancements:
|
|
173
173
|
- Durable execution with checkpointing
|
|
174
174
|
- Multi-agent coordination
|
|
175
175
|
- Platform-backed tool execution
|
|
@@ -201,7 +201,7 @@ class Agent:
|
|
|
201
201
|
def __init__(
|
|
202
202
|
self,
|
|
203
203
|
name: str,
|
|
204
|
-
model:
|
|
204
|
+
model: Any, # Can be string like "openai/gpt-4o-mini" OR LanguageModel instance
|
|
205
205
|
instructions: str,
|
|
206
206
|
tools: Optional[List[Any]] = None,
|
|
207
207
|
handoffs: Optional[List[Handoff]] = None,
|
|
@@ -210,12 +210,13 @@ class Agent:
|
|
|
210
210
|
top_p: Optional[float] = None,
|
|
211
211
|
model_config: Optional[ModelConfig] = None,
|
|
212
212
|
max_iterations: int = 10,
|
|
213
|
+
model_name: Optional[str] = None, # For backwards compatibility with tests
|
|
213
214
|
):
|
|
214
215
|
"""Initialize agent.
|
|
215
216
|
|
|
216
217
|
Args:
|
|
217
218
|
name: Agent name/identifier
|
|
218
|
-
model: Model string with provider prefix (e.g., "openai/gpt-4o-mini")
|
|
219
|
+
model: Model string with provider prefix (e.g., "openai/gpt-4o-mini") OR LanguageModel instance
|
|
219
220
|
instructions: System instructions for the agent
|
|
220
221
|
tools: List of tools available to the agent (functions, Tool instances, or Agent instances)
|
|
221
222
|
handoffs: List of Handoff configurations for agent-to-agent delegation
|
|
@@ -224,9 +225,9 @@ class Agent:
|
|
|
224
225
|
top_p: Nucleus sampling parameter
|
|
225
226
|
model_config: Optional advanced configuration (custom endpoints, headers, etc.)
|
|
226
227
|
max_iterations: Maximum reasoning iterations
|
|
228
|
+
model_name: Optional model name (for backwards compatibility, used when model is a LanguageModel instance)
|
|
227
229
|
"""
|
|
228
230
|
self.name = name
|
|
229
|
-
self.model = model
|
|
230
231
|
self.instructions = instructions
|
|
231
232
|
self.temperature = temperature
|
|
232
233
|
self.max_tokens = max_tokens
|
|
@@ -234,6 +235,20 @@ class Agent:
|
|
|
234
235
|
self.model_config = model_config
|
|
235
236
|
self.max_iterations = max_iterations
|
|
236
237
|
|
|
238
|
+
# Support both string model names and LanguageModel instances
|
|
239
|
+
if isinstance(model, str):
|
|
240
|
+
# New API: model is a string like "openai/gpt-4o-mini"
|
|
241
|
+
self.model = model
|
|
242
|
+
self.model_name = model_name or model
|
|
243
|
+
self._language_model = None # Will create on demand
|
|
244
|
+
elif isinstance(model, LanguageModel):
|
|
245
|
+
# Old API (for tests): model is a LanguageModel instance
|
|
246
|
+
self._language_model = model
|
|
247
|
+
self.model = model # Keep for backwards compatibility
|
|
248
|
+
self.model_name = model_name or "mock-model"
|
|
249
|
+
else:
|
|
250
|
+
raise TypeError(f"model must be a string or LanguageModel instance, got {type(model)}")
|
|
251
|
+
|
|
237
252
|
# Store handoffs for building handoff tools
|
|
238
253
|
self.handoffs = handoffs or []
|
|
239
254
|
|
|
@@ -377,13 +392,16 @@ class Agent:
|
|
|
377
392
|
if handoff_config.pass_full_history:
|
|
378
393
|
# Get current conversation from the agent's run loop
|
|
379
394
|
# (This will be set when we detect the handoff in run())
|
|
380
|
-
conversation_history = ctx.get("_current_conversation", [])
|
|
395
|
+
conversation_history = getattr(ctx, '_agent_data', {}).get("_current_conversation", [])
|
|
396
|
+
|
|
381
397
|
if conversation_history:
|
|
382
398
|
ctx.logger.info(
|
|
383
399
|
f"Passing {len(conversation_history)} messages to target agent"
|
|
384
400
|
)
|
|
385
401
|
# Store in context for target agent to optionally use
|
|
386
|
-
ctx
|
|
402
|
+
if not hasattr(ctx, '_agent_data'):
|
|
403
|
+
ctx._agent_data = {}
|
|
404
|
+
ctx._agent_data["_handoff_conversation_history"] = conversation_history
|
|
387
405
|
|
|
388
406
|
# Execute target agent with the message and shared context
|
|
389
407
|
result = await target_agent.run(message, context=ctx)
|
|
@@ -398,7 +416,9 @@ class Agent:
|
|
|
398
416
|
"tool_calls": result.tool_calls,
|
|
399
417
|
}
|
|
400
418
|
|
|
401
|
-
ctx
|
|
419
|
+
if not hasattr(ctx, '_agent_data'):
|
|
420
|
+
ctx._agent_data = {}
|
|
421
|
+
ctx._agent_data["_handoff_result"] = handoff_data
|
|
402
422
|
|
|
403
423
|
# Return the handoff data (will be detected in run() loop)
|
|
404
424
|
return handoff_data
|
|
@@ -449,151 +469,164 @@ class Agent:
|
|
|
449
469
|
|
|
450
470
|
context = Context(
|
|
451
471
|
run_id=f"agent-{self.name}-{uuid.uuid4().hex[:8]}",
|
|
452
|
-
component_type="agent",
|
|
453
472
|
)
|
|
454
473
|
|
|
455
|
-
#
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
# Call LLM using simplified API
|
|
482
|
-
# TODO: Support tools in lm.generate() - for now using GenerateRequest internally
|
|
483
|
-
request = GenerateRequest(
|
|
484
|
-
model=self.model,
|
|
485
|
-
system_prompt=self.instructions,
|
|
486
|
-
messages=messages,
|
|
487
|
-
tools=tool_defs if tool_defs else [],
|
|
488
|
-
)
|
|
489
|
-
request.config.temperature = self.temperature
|
|
490
|
-
if self.max_tokens:
|
|
491
|
-
request.config.max_tokens = self.max_tokens
|
|
492
|
-
if self.top_p:
|
|
493
|
-
request.config.top_p = self.top_p
|
|
494
|
-
|
|
495
|
-
# Create internal LM instance for generation
|
|
496
|
-
# TODO: Use model_config when provided
|
|
497
|
-
from .lm import _LanguageModel
|
|
498
|
-
provider, model_name = self.model.split('/', 1)
|
|
499
|
-
internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
|
|
500
|
-
response = await internal_lm.generate(request)
|
|
501
|
-
|
|
502
|
-
# Add assistant response to messages
|
|
503
|
-
messages.append(Message.assistant(response.text))
|
|
504
|
-
|
|
505
|
-
# Check if LLM wants to use tools
|
|
506
|
-
if response.tool_calls:
|
|
507
|
-
self.logger.info(f"Agent calling {len(response.tool_calls)} tool(s)")
|
|
508
|
-
|
|
509
|
-
# Store current conversation in context for potential handoffs
|
|
510
|
-
context.set("_current_conversation", messages)
|
|
511
|
-
|
|
512
|
-
# Execute tool calls
|
|
513
|
-
tool_results = []
|
|
514
|
-
for tool_call in response.tool_calls:
|
|
515
|
-
tool_name = tool_call["name"]
|
|
516
|
-
tool_args_str = tool_call["arguments"]
|
|
517
|
-
|
|
518
|
-
# Track tool call
|
|
519
|
-
all_tool_calls.append(
|
|
520
|
-
{
|
|
521
|
-
"name": tool_name,
|
|
522
|
-
"arguments": tool_args_str,
|
|
523
|
-
"iteration": iteration + 1,
|
|
524
|
-
}
|
|
474
|
+
# Create span for agent execution with trace linking
|
|
475
|
+
from ._core import create_span
|
|
476
|
+
|
|
477
|
+
with create_span(
|
|
478
|
+
self.name,
|
|
479
|
+
"agent",
|
|
480
|
+
context._runtime_context if hasattr(context, "_runtime_context") else None,
|
|
481
|
+
{
|
|
482
|
+
"agent.name": self.name,
|
|
483
|
+
"agent.model": self.model,
|
|
484
|
+
"agent.max_iterations": str(self.max_iterations),
|
|
485
|
+
},
|
|
486
|
+
) as span:
|
|
487
|
+
# Initialize conversation
|
|
488
|
+
messages: List[Message] = [Message.user(user_message)]
|
|
489
|
+
all_tool_calls: List[Dict[str, Any]] = []
|
|
490
|
+
|
|
491
|
+
# Reasoning loop
|
|
492
|
+
for iteration in range(self.max_iterations):
|
|
493
|
+
# Build tool definitions for LLM
|
|
494
|
+
tool_defs = [
|
|
495
|
+
ToolDefinition(
|
|
496
|
+
name=tool.name,
|
|
497
|
+
description=tool.description,
|
|
498
|
+
parameters=tool.input_schema,
|
|
525
499
|
)
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
500
|
+
for tool in self.tools.values()
|
|
501
|
+
]
|
|
502
|
+
|
|
503
|
+
# Convert messages to dict format for lm.generate()
|
|
504
|
+
messages_dict = []
|
|
505
|
+
for msg in messages:
|
|
506
|
+
messages_dict.append({
|
|
507
|
+
"role": msg.role.value,
|
|
508
|
+
"content": msg.content
|
|
509
|
+
})
|
|
510
|
+
|
|
511
|
+
# Call LLM using simplified API
|
|
512
|
+
# TODO: Support tools in lm.generate() - for now using GenerateRequest internally
|
|
513
|
+
request = GenerateRequest(
|
|
514
|
+
model=self.model,
|
|
515
|
+
system_prompt=self.instructions,
|
|
516
|
+
messages=messages,
|
|
517
|
+
tools=tool_defs if tool_defs else [],
|
|
518
|
+
)
|
|
519
|
+
request.config.temperature = self.temperature
|
|
520
|
+
if self.max_tokens:
|
|
521
|
+
request.config.max_tokens = self.max_tokens
|
|
522
|
+
if self.top_p:
|
|
523
|
+
request.config.top_p = self.top_p
|
|
524
|
+
|
|
525
|
+
# Create internal LM instance for generation
|
|
526
|
+
# TODO: Use model_config when provided
|
|
527
|
+
from .lm import _LanguageModel
|
|
528
|
+
provider, model_name = self.model.split('/', 1)
|
|
529
|
+
internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
|
|
530
|
+
response = await internal_lm.generate(request)
|
|
531
|
+
|
|
532
|
+
# Add assistant response to messages
|
|
533
|
+
messages.append(Message.assistant(response.text))
|
|
534
|
+
|
|
535
|
+
# Check if LLM wants to use tools
|
|
536
|
+
if response.tool_calls:
|
|
537
|
+
self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
|
|
538
|
+
|
|
539
|
+
# Store current conversation in context for potential handoffs
|
|
540
|
+
# Use a simple dict attribute since we don't need full state persistence for this
|
|
541
|
+
if not hasattr(context, '_agent_data'):
|
|
542
|
+
context._agent_data = {}
|
|
543
|
+
context._agent_data["_current_conversation"] = messages
|
|
544
|
+
|
|
545
|
+
# Execute tool calls
|
|
546
|
+
tool_results = []
|
|
547
|
+
for tool_call in response.tool_calls:
|
|
548
|
+
tool_name = tool_call["name"]
|
|
549
|
+
tool_args_str = tool_call["arguments"]
|
|
550
|
+
|
|
551
|
+
# Track tool call
|
|
552
|
+
all_tool_calls.append(
|
|
553
|
+
{
|
|
554
|
+
"name": tool_name,
|
|
555
|
+
"arguments": tool_args_str,
|
|
556
|
+
"iteration": iteration + 1,
|
|
557
|
+
}
|
|
565
558
|
)
|
|
566
559
|
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
560
|
+
# Execute tool
|
|
561
|
+
try:
|
|
562
|
+
# Parse arguments
|
|
563
|
+
tool_args = json.loads(tool_args_str)
|
|
564
|
+
|
|
565
|
+
# Get tool
|
|
566
|
+
tool = self.tools.get(tool_name)
|
|
567
|
+
if not tool:
|
|
568
|
+
result_text = f"Error: Tool '{tool_name}' not found"
|
|
569
|
+
else:
|
|
570
|
+
# Execute tool
|
|
571
|
+
result = await tool.invoke(context, **tool_args)
|
|
572
|
+
|
|
573
|
+
# Check if this was a handoff
|
|
574
|
+
if isinstance(result, dict) and result.get("_handoff"):
|
|
575
|
+
self.logger.info(
|
|
576
|
+
f"Handoff detected to '{result['to_agent']}', "
|
|
577
|
+
f"terminating current agent"
|
|
578
|
+
)
|
|
579
|
+
# Return immediately with handoff result
|
|
580
|
+
return AgentResult(
|
|
581
|
+
output=result["output"],
|
|
582
|
+
tool_calls=all_tool_calls + result.get("tool_calls", []),
|
|
583
|
+
context=context,
|
|
584
|
+
handoff_to=result["to_agent"],
|
|
585
|
+
handoff_metadata=result,
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
result_text = json.dumps(result) if result else "null"
|
|
589
|
+
|
|
590
|
+
tool_results.append(
|
|
591
|
+
{"tool": tool_name, "result": result_text, "error": None}
|
|
592
|
+
)
|
|
593
|
+
|
|
594
|
+
except Exception as e:
|
|
595
|
+
self.logger.error(f"Tool execution error: {e}")
|
|
596
|
+
tool_results.append(
|
|
597
|
+
{"tool": tool_name, "result": None, "error": str(e)}
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
# Add tool results to conversation
|
|
601
|
+
results_text = "\n".join(
|
|
602
|
+
[
|
|
603
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
604
|
+
if tr["error"] is None
|
|
605
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
606
|
+
for tr in tool_results
|
|
607
|
+
]
|
|
608
|
+
)
|
|
609
|
+
messages.append(Message.user(f"Tool results:\n{results_text}\n\nPlease provide your final answer based on these results."))
|
|
577
610
|
|
|
578
|
-
|
|
611
|
+
# Continue loop for agent to process results
|
|
579
612
|
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
613
|
+
else:
|
|
614
|
+
# No tool calls - agent is done
|
|
615
|
+
self.logger.debug(f"Agent completed after {iteration + 1} iterations")
|
|
616
|
+
return AgentResult(
|
|
617
|
+
output=response.text,
|
|
618
|
+
tool_calls=all_tool_calls,
|
|
619
|
+
context=context,
|
|
620
|
+
)
|
|
588
621
|
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
622
|
+
# Max iterations reached
|
|
623
|
+
self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
|
|
624
|
+
final_output = messages[-1].content if messages else "No output generated"
|
|
625
|
+
return AgentResult(
|
|
626
|
+
output=final_output,
|
|
627
|
+
tool_calls=all_tool_calls,
|
|
628
|
+
context=context,
|
|
629
|
+
)
|
|
597
630
|
|
|
598
631
|
async def chat(
|
|
599
632
|
self,
|
|
@@ -623,7 +656,6 @@ class Agent:
|
|
|
623
656
|
|
|
624
657
|
context = Context(
|
|
625
658
|
run_id=f"agent-chat-{self.name}-{uuid.uuid4().hex[:8]}",
|
|
626
|
-
component_type="agent",
|
|
627
659
|
)
|
|
628
660
|
|
|
629
661
|
# Add user message
|
agnt5/client.py
CHANGED
|
@@ -42,6 +42,7 @@ class Client:
|
|
|
42
42
|
self,
|
|
43
43
|
component: str,
|
|
44
44
|
input_data: Optional[Dict[str, Any]] = None,
|
|
45
|
+
component_type: str = "function",
|
|
45
46
|
) -> Dict[str, Any]:
|
|
46
47
|
"""Execute a component synchronously and wait for the result.
|
|
47
48
|
|
|
@@ -50,6 +51,7 @@ class Client:
|
|
|
50
51
|
Args:
|
|
51
52
|
component: Name of the component to execute
|
|
52
53
|
input_data: Input data for the component (will be sent as JSON body)
|
|
54
|
+
component_type: Type of component - "function", "workflow", "agent", "tool" (default: "function")
|
|
53
55
|
|
|
54
56
|
Returns:
|
|
55
57
|
Dictionary containing the component's output
|
|
@@ -60,9 +62,12 @@ class Client:
|
|
|
60
62
|
|
|
61
63
|
Example:
|
|
62
64
|
```python
|
|
63
|
-
# Simple function call
|
|
65
|
+
# Simple function call (default)
|
|
64
66
|
result = client.run("greet", {"name": "Alice"})
|
|
65
67
|
|
|
68
|
+
# Workflow execution (explicit)
|
|
69
|
+
result = client.run("order_fulfillment", {"order_id": "123"}, component_type="workflow")
|
|
70
|
+
|
|
66
71
|
# No input data
|
|
67
72
|
result = client.run("get_status")
|
|
68
73
|
```
|
|
@@ -70,8 +75,8 @@ class Client:
|
|
|
70
75
|
if input_data is None:
|
|
71
76
|
input_data = {}
|
|
72
77
|
|
|
73
|
-
# Build URL
|
|
74
|
-
url = urljoin(self.gateway_url + "/", f"run/{component}")
|
|
78
|
+
# Build URL with component type
|
|
79
|
+
url = urljoin(self.gateway_url + "/", f"v1/run/{component_type}/{component}")
|
|
75
80
|
|
|
76
81
|
# Make request
|
|
77
82
|
response = self._client.post(
|
|
@@ -81,6 +86,17 @@ class Client:
|
|
|
81
86
|
)
|
|
82
87
|
|
|
83
88
|
# Handle errors
|
|
89
|
+
if response.status_code == 404:
|
|
90
|
+
try:
|
|
91
|
+
error_data = response.json()
|
|
92
|
+
raise RunError(
|
|
93
|
+
error_data.get("error", "Component not found"),
|
|
94
|
+
run_id=error_data.get("runId"),
|
|
95
|
+
)
|
|
96
|
+
except ValueError:
|
|
97
|
+
# JSON parsing failed
|
|
98
|
+
raise RunError(f"Component '{component}' not found")
|
|
99
|
+
|
|
84
100
|
if response.status_code == 503:
|
|
85
101
|
error_data = response.json()
|
|
86
102
|
raise RunError(
|
|
@@ -127,6 +143,7 @@ class Client:
|
|
|
127
143
|
self,
|
|
128
144
|
component: str,
|
|
129
145
|
input_data: Optional[Dict[str, Any]] = None,
|
|
146
|
+
component_type: str = "function",
|
|
130
147
|
) -> str:
|
|
131
148
|
"""Submit a component for async execution and return immediately.
|
|
132
149
|
|
|
@@ -136,6 +153,7 @@ class Client:
|
|
|
136
153
|
Args:
|
|
137
154
|
component: Name of the component to execute
|
|
138
155
|
input_data: Input data for the component (will be sent as JSON body)
|
|
156
|
+
component_type: Type of component - "function", "workflow", "agent", "tool" (default: "function")
|
|
139
157
|
|
|
140
158
|
Returns:
|
|
141
159
|
String containing the run ID
|
|
@@ -145,10 +163,13 @@ class Client:
|
|
|
145
163
|
|
|
146
164
|
Example:
|
|
147
165
|
```python
|
|
148
|
-
# Submit async
|
|
166
|
+
# Submit async function (default)
|
|
149
167
|
run_id = client.submit("process_video", {"url": "https://..."})
|
|
150
168
|
print(f"Submitted: {run_id}")
|
|
151
169
|
|
|
170
|
+
# Submit workflow
|
|
171
|
+
run_id = client.submit("order_fulfillment", {"order_id": "123"}, component_type="workflow")
|
|
172
|
+
|
|
152
173
|
# Check status later
|
|
153
174
|
status = client.get_status(run_id)
|
|
154
175
|
if status["status"] == "completed":
|
|
@@ -158,8 +179,8 @@ class Client:
|
|
|
158
179
|
if input_data is None:
|
|
159
180
|
input_data = {}
|
|
160
181
|
|
|
161
|
-
# Build URL
|
|
162
|
-
url = urljoin(self.gateway_url + "/", f"submit/{component}")
|
|
182
|
+
# Build URL with component type
|
|
183
|
+
url = urljoin(self.gateway_url + "/", f"v1/submit/{component_type}/{component}")
|
|
163
184
|
|
|
164
185
|
# Make request
|
|
165
186
|
response = self._client.post(
|
|
@@ -200,7 +221,7 @@ class Client:
|
|
|
200
221
|
print(f"Status: {status['status']}")
|
|
201
222
|
```
|
|
202
223
|
"""
|
|
203
|
-
url = urljoin(self.gateway_url + "/", f"status/{run_id}")
|
|
224
|
+
url = urljoin(self.gateway_url + "/", f"v1/status/{run_id}")
|
|
204
225
|
|
|
205
226
|
response = self._client.get(url)
|
|
206
227
|
response.raise_for_status()
|
|
@@ -234,7 +255,7 @@ class Client:
|
|
|
234
255
|
print(f"Run failed: {e}")
|
|
235
256
|
```
|
|
236
257
|
"""
|
|
237
|
-
url = urljoin(self.gateway_url + "/", f"result/{run_id}")
|
|
258
|
+
url = urljoin(self.gateway_url + "/", f"v1/result/{run_id}")
|
|
238
259
|
|
|
239
260
|
response = self._client.get(url)
|
|
240
261
|
|
|
@@ -351,7 +372,7 @@ class Client:
|
|
|
351
372
|
input_data = {}
|
|
352
373
|
|
|
353
374
|
# Build URL
|
|
354
|
-
url = urljoin(self.gateway_url + "/", f"stream/{component}")
|
|
375
|
+
url = urljoin(self.gateway_url + "/", f"v1/stream/{component}")
|
|
355
376
|
|
|
356
377
|
# Use streaming request
|
|
357
378
|
with self._client.stream(
|
|
@@ -511,10 +532,11 @@ class EntityProxy:
|
|
|
511
532
|
Callable that executes the entity method
|
|
512
533
|
"""
|
|
513
534
|
|
|
514
|
-
def method_caller(**kwargs) -> Any:
|
|
535
|
+
def method_caller(*args, **kwargs) -> Any:
|
|
515
536
|
"""Call an entity method with the given parameters.
|
|
516
537
|
|
|
517
538
|
Args:
|
|
539
|
+
*args: Positional arguments (not recommended, use kwargs)
|
|
518
540
|
**kwargs: Method parameters as keyword arguments
|
|
519
541
|
|
|
520
542
|
Returns:
|
|
@@ -522,11 +544,27 @@ class EntityProxy:
|
|
|
522
544
|
|
|
523
545
|
Raises:
|
|
524
546
|
RunError: If the method execution fails
|
|
547
|
+
ValueError: If both positional and keyword arguments are provided
|
|
525
548
|
"""
|
|
526
|
-
#
|
|
549
|
+
# Convert positional args to kwargs if provided
|
|
550
|
+
if args and kwargs:
|
|
551
|
+
raise ValueError(
|
|
552
|
+
f"Cannot mix positional and keyword arguments when calling entity method '{method_name}'. "
|
|
553
|
+
"Please use keyword arguments only."
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
# If positional args provided, we can't convert them without knowing parameter names
|
|
557
|
+
# Raise helpful error
|
|
558
|
+
if args:
|
|
559
|
+
raise ValueError(
|
|
560
|
+
f"Entity method '{method_name}' requires keyword arguments, but got {len(args)} positional arguments. "
|
|
561
|
+
f"Example: .{method_name}(param1=value1, param2=value2)"
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Build URL: /v1/entity/:entityType/:key/:method
|
|
527
565
|
url = urljoin(
|
|
528
566
|
self._client.gateway_url + "/",
|
|
529
|
-
f"entity/{self._entity_type}/{self._key}/{method_name}",
|
|
567
|
+
f"v1/entity/{self._entity_type}/{self._key}/{method_name}",
|
|
530
568
|
)
|
|
531
569
|
|
|
532
570
|
# Make request with method parameters as JSON body
|