agentex-sdk 0.7.0__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentex/_version.py +1 -1
- agentex/lib/adk/providers/_modules/sync_provider.py +116 -15
- agentex/lib/cli/commands/init.py +16 -1
- agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 +43 -0
- agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 +48 -0
- agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 +48 -0
- agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 +224 -0
- agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 +126 -0
- agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 +64 -0
- agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 +140 -0
- agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 +80 -0
- agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 +116 -0
- agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 +56 -0
- agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 +169 -0
- agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 +35 -0
- agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 +4 -0
- agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 +147 -0
- agentex/resources/messages/messages.py +155 -3
- agentex/types/__init__.py +2 -0
- agentex/types/message_list_paginated_params.py +19 -0
- agentex/types/message_list_paginated_response.py +21 -0
- {agentex_sdk-0.7.0.dist-info → agentex_sdk-0.7.2.dist-info}/METADATA +1 -1
- {agentex_sdk-0.7.0.dist-info → agentex_sdk-0.7.2.dist-info}/RECORD +26 -10
- {agentex_sdk-0.7.0.dist-info → agentex_sdk-0.7.2.dist-info}/WHEEL +0 -0
- {agentex_sdk-0.7.0.dist-info → agentex_sdk-0.7.2.dist-info}/entry_points.txt +0 -0
- {agentex_sdk-0.7.0.dist-info → agentex_sdk-0.7.2.dist-info}/licenses/LICENSE +0 -0
agentex/_version.py
CHANGED
|
@@ -21,11 +21,13 @@ from openai.types.responses import (
|
|
|
21
21
|
ResponseOutputItemDoneEvent,
|
|
22
22
|
ResponseOutputItemAddedEvent,
|
|
23
23
|
ResponseCodeInterpreterToolCall,
|
|
24
|
-
ResponseReasoningSummaryPartDoneEvent,
|
|
25
24
|
ResponseReasoningSummaryPartAddedEvent,
|
|
26
25
|
ResponseReasoningSummaryTextDeltaEvent,
|
|
27
26
|
)
|
|
28
27
|
from agents.models.openai_provider import OpenAIProvider
|
|
28
|
+
from openai.types.responses.response_reasoning_text_done_event import ResponseReasoningTextDoneEvent
|
|
29
|
+
from openai.types.responses.response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent
|
|
30
|
+
from openai.types.responses.response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
|
|
29
31
|
|
|
30
32
|
from agentex import AsyncAgentex
|
|
31
33
|
from agentex.lib.utils.logging import make_logger
|
|
@@ -40,6 +42,8 @@ from agentex.types.task_message_update import (
|
|
|
40
42
|
from agentex.types.task_message_content import TextContent
|
|
41
43
|
from agentex.types.tool_request_content import ToolRequestContent
|
|
42
44
|
from agentex.types.tool_response_content import ToolResponseContent
|
|
45
|
+
from agentex.types.reasoning_content_delta import ReasoningContentDelta
|
|
46
|
+
from agentex.types.reasoning_summary_delta import ReasoningSummaryDelta
|
|
43
47
|
|
|
44
48
|
logger = make_logger(__name__)
|
|
45
49
|
|
|
@@ -460,14 +464,17 @@ def _extract_tool_response_info(tool_map: dict[str, Any], tool_output_item: Any)
|
|
|
460
464
|
return call_id, tool_name, content
|
|
461
465
|
|
|
462
466
|
|
|
463
|
-
async def
|
|
464
|
-
"""Convert OpenAI streaming events to AgentEx TaskMessageUpdate events.
|
|
465
|
-
|
|
466
|
-
|
|
467
|
+
async def convert_openai_to_agentex_events_with_reasoning(stream_response):
|
|
468
|
+
"""Convert OpenAI streaming events to AgentEx TaskMessageUpdate events with reasoning support.
|
|
469
|
+
|
|
470
|
+
This is an enhanced version of the base converter that includes support for:
|
|
471
|
+
- Reasoning content deltas (for o1 models)
|
|
472
|
+
- Reasoning summary deltas (for o1 models)
|
|
473
|
+
|
|
467
474
|
Args:
|
|
468
475
|
stream_response: An async iterator of OpenAI streaming events
|
|
469
476
|
Yields:
|
|
470
|
-
TaskMessageUpdate: AgentEx streaming events (StreamTaskMessageDelta or StreamTaskMessageDone)
|
|
477
|
+
TaskMessageUpdate: AgentEx streaming events (StreamTaskMessageDelta, StreamTaskMessageFull, or StreamTaskMessageDone)
|
|
471
478
|
"""
|
|
472
479
|
|
|
473
480
|
tool_map = {}
|
|
@@ -475,7 +482,7 @@ async def convert_openai_to_agentex_events(stream_response):
|
|
|
475
482
|
message_index = 0 # Track message index for proper sequencing
|
|
476
483
|
seen_tool_output = False # Track if we've seen tool output to know when final text starts
|
|
477
484
|
item_id_to_index = {} # Map item_id to message index
|
|
478
|
-
|
|
485
|
+
item_id_to_type = {} # Map item_id to content type (text, reasoning_content, reasoning_summary)
|
|
479
486
|
|
|
480
487
|
async for event in stream_response:
|
|
481
488
|
event_count += 1
|
|
@@ -495,16 +502,107 @@ async def convert_openai_to_agentex_events(stream_response):
|
|
|
495
502
|
elif isinstance(raw_event, ResponseOutputItemDoneEvent):
|
|
496
503
|
item_id = raw_event.item.id
|
|
497
504
|
if item_id in item_id_to_index:
|
|
498
|
-
#
|
|
499
|
-
|
|
500
|
-
|
|
505
|
+
# Get the message type to decide whether to send done event
|
|
506
|
+
message_type = item_id_to_type.get(item_id, "text")
|
|
507
|
+
|
|
508
|
+
# Don't send done events for reasoning content/summary
|
|
509
|
+
# They just end with their last delta
|
|
510
|
+
if message_type not in ("reasoning_content", "reasoning_summary"):
|
|
511
|
+
yield StreamTaskMessageDone(
|
|
512
|
+
type="done",
|
|
513
|
+
index=item_id_to_index[item_id],
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
# Skip reasoning summary part added events - we handle them on delta
|
|
517
|
+
elif isinstance(raw_event, ResponseReasoningSummaryPartAddedEvent):
|
|
518
|
+
pass
|
|
519
|
+
|
|
520
|
+
# Handle reasoning summary text delta events
|
|
521
|
+
elif isinstance(raw_event, ResponseReasoningSummaryTextDeltaEvent):
|
|
522
|
+
item_id = raw_event.item_id
|
|
523
|
+
summary_index = raw_event.summary_index
|
|
524
|
+
|
|
525
|
+
# If this is a new item_id we haven't seen, create a new message
|
|
526
|
+
if item_id and item_id not in item_id_to_index:
|
|
527
|
+
message_index += 1
|
|
528
|
+
item_id_to_index[item_id] = message_index
|
|
529
|
+
item_id_to_type[item_id] = "reasoning_summary"
|
|
530
|
+
|
|
531
|
+
# Send a start event for this new reasoning summary message
|
|
532
|
+
yield StreamTaskMessageStart(
|
|
533
|
+
type="start",
|
|
501
534
|
index=item_id_to_index[item_id],
|
|
535
|
+
content=TextContent(
|
|
536
|
+
type="text",
|
|
537
|
+
author="agent",
|
|
538
|
+
content="", # Start with empty content
|
|
539
|
+
),
|
|
502
540
|
)
|
|
503
541
|
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
542
|
+
# Use the index for this item_id
|
|
543
|
+
current_index = item_id_to_index.get(item_id, message_index)
|
|
544
|
+
|
|
545
|
+
# Yield reasoning summary delta
|
|
546
|
+
yield StreamTaskMessageDelta(
|
|
547
|
+
type="delta",
|
|
548
|
+
index=current_index,
|
|
549
|
+
delta=ReasoningSummaryDelta(
|
|
550
|
+
type="reasoning_summary",
|
|
551
|
+
summary_index=summary_index,
|
|
552
|
+
summary_delta=raw_event.delta,
|
|
553
|
+
),
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
# Handle reasoning summary text done events
|
|
557
|
+
elif isinstance(raw_event, ResponseReasoningSummaryTextDoneEvent):
|
|
558
|
+
# We do NOT close the streaming context here
|
|
559
|
+
# as there can be multiple reasoning summaries.
|
|
560
|
+
# The context will be closed when the entire
|
|
561
|
+
# output item is done (ResponseOutputItemDoneEvent)
|
|
562
|
+
pass
|
|
563
|
+
|
|
564
|
+
# Handle reasoning content text delta events
|
|
565
|
+
elif isinstance(raw_event, ResponseReasoningTextDeltaEvent):
|
|
566
|
+
item_id = raw_event.item_id
|
|
567
|
+
content_index = raw_event.content_index
|
|
568
|
+
|
|
569
|
+
# If this is a new item_id we haven't seen, create a new message
|
|
570
|
+
if item_id and item_id not in item_id_to_index:
|
|
571
|
+
message_index += 1
|
|
572
|
+
item_id_to_index[item_id] = message_index
|
|
573
|
+
item_id_to_type[item_id] = "reasoning_content"
|
|
574
|
+
|
|
575
|
+
# Send a start event for this new reasoning content message
|
|
576
|
+
yield StreamTaskMessageStart(
|
|
577
|
+
type="start",
|
|
578
|
+
index=item_id_to_index[item_id],
|
|
579
|
+
content=TextContent(
|
|
580
|
+
type="text",
|
|
581
|
+
author="agent",
|
|
582
|
+
content="", # Start with empty content
|
|
583
|
+
),
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
# Use the index for this item_id
|
|
587
|
+
current_index = item_id_to_index.get(item_id, message_index)
|
|
588
|
+
|
|
589
|
+
# Yield reasoning content delta
|
|
590
|
+
yield StreamTaskMessageDelta(
|
|
591
|
+
type="delta",
|
|
592
|
+
index=current_index,
|
|
593
|
+
delta=ReasoningContentDelta(
|
|
594
|
+
type="reasoning_content",
|
|
595
|
+
content_index=content_index,
|
|
596
|
+
content_delta=raw_event.delta,
|
|
597
|
+
),
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
# Handle reasoning content text done events
|
|
601
|
+
elif isinstance(raw_event, ResponseReasoningTextDoneEvent):
|
|
602
|
+
# We do NOT close the streaming context here
|
|
603
|
+
# as there can be multiple reasoning content texts.
|
|
604
|
+
# The context will be closed when the entire
|
|
605
|
+
# output item is done (ResponseOutputItemDoneEvent)
|
|
508
606
|
pass
|
|
509
607
|
|
|
510
608
|
# Check if this is a text delta event from OpenAI
|
|
@@ -523,6 +621,8 @@ async def convert_openai_to_agentex_events(stream_response):
|
|
|
523
621
|
else:
|
|
524
622
|
item_id_to_index[item_id] = message_index
|
|
525
623
|
|
|
624
|
+
item_id_to_type[item_id] = "text"
|
|
625
|
+
|
|
526
626
|
# Send a start event with empty content for this new text message
|
|
527
627
|
yield StreamTaskMessageStart(
|
|
528
628
|
type="start",
|
|
@@ -548,7 +648,7 @@ async def convert_openai_to_agentex_events(stream_response):
|
|
|
548
648
|
yield delta_message
|
|
549
649
|
|
|
550
650
|
elif hasattr(event, 'type') and event.type == 'run_item_stream_event':
|
|
551
|
-
# Skip reasoning_item events
|
|
651
|
+
# Skip reasoning_item events - they're handled via raw_response_event above
|
|
552
652
|
if hasattr(event, 'item') and event.item.type == 'reasoning_item':
|
|
553
653
|
continue
|
|
554
654
|
|
|
@@ -587,3 +687,4 @@ async def convert_openai_to_agentex_events(stream_response):
|
|
|
587
687
|
index=message_index,
|
|
588
688
|
content=tool_response_content,
|
|
589
689
|
)
|
|
690
|
+
|
agentex/lib/cli/commands/init.py
CHANGED
|
@@ -23,6 +23,7 @@ TEMPLATES_DIR = Path(__file__).parent.parent / "templates"
|
|
|
23
23
|
|
|
24
24
|
class TemplateType(str, Enum):
|
|
25
25
|
TEMPORAL = "temporal"
|
|
26
|
+
TEMPORAL_OPENAI_AGENTS = "temporal-openai-agents"
|
|
26
27
|
DEFAULT = "default"
|
|
27
28
|
SYNC = "sync"
|
|
28
29
|
|
|
@@ -54,6 +55,7 @@ def create_project_structure(
|
|
|
54
55
|
# Define project files based on template type
|
|
55
56
|
project_files = {
|
|
56
57
|
TemplateType.TEMPORAL: ["acp.py", "workflow.py", "run_worker.py"],
|
|
58
|
+
TemplateType.TEMPORAL_OPENAI_AGENTS: ["acp.py", "workflow.py", "run_worker.py", "activities.py"],
|
|
57
59
|
TemplateType.DEFAULT: ["acp.py"],
|
|
58
60
|
TemplateType.SYNC: ["acp.py"],
|
|
59
61
|
}[template_type]
|
|
@@ -152,13 +154,26 @@ def init():
|
|
|
152
154
|
"What type of template would you like to create?",
|
|
153
155
|
choices=[
|
|
154
156
|
{"name": "Async - ACP Only", "value": TemplateType.DEFAULT},
|
|
155
|
-
{"name": "Async - Temporal", "value":
|
|
157
|
+
{"name": "Async - Temporal", "value": "temporal_submenu"},
|
|
156
158
|
{"name": "Sync ACP", "value": TemplateType.SYNC},
|
|
157
159
|
],
|
|
158
160
|
).ask()
|
|
159
161
|
if not template_type:
|
|
160
162
|
return
|
|
161
163
|
|
|
164
|
+
# If Temporal was selected, show sub-menu for Temporal variants
|
|
165
|
+
if template_type == "temporal_submenu":
|
|
166
|
+
console.print()
|
|
167
|
+
template_type = questionary.select(
|
|
168
|
+
"Which Temporal template would you like to use?",
|
|
169
|
+
choices=[
|
|
170
|
+
{"name": "Basic Temporal", "value": TemplateType.TEMPORAL},
|
|
171
|
+
{"name": "Temporal + OpenAI Agents SDK (Recommended)", "value": TemplateType.TEMPORAL_OPENAI_AGENTS},
|
|
172
|
+
],
|
|
173
|
+
).ask()
|
|
174
|
+
if not template_type:
|
|
175
|
+
return
|
|
176
|
+
|
|
162
177
|
project_path = questionary.path(
|
|
163
178
|
"Where would you like to create your project?", default="."
|
|
164
179
|
).ask()
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
*.so
|
|
6
|
+
.Python
|
|
7
|
+
build/
|
|
8
|
+
develop-eggs/
|
|
9
|
+
dist/
|
|
10
|
+
downloads/
|
|
11
|
+
eggs/
|
|
12
|
+
.eggs/
|
|
13
|
+
lib/
|
|
14
|
+
lib64/
|
|
15
|
+
parts/
|
|
16
|
+
sdist/
|
|
17
|
+
var/
|
|
18
|
+
wheels/
|
|
19
|
+
*.egg-info/
|
|
20
|
+
.installed.cfg
|
|
21
|
+
*.egg
|
|
22
|
+
|
|
23
|
+
# Environments
|
|
24
|
+
.env**
|
|
25
|
+
.venv
|
|
26
|
+
env/
|
|
27
|
+
venv/
|
|
28
|
+
ENV/
|
|
29
|
+
env.bak/
|
|
30
|
+
venv.bak/
|
|
31
|
+
|
|
32
|
+
# IDE
|
|
33
|
+
.idea/
|
|
34
|
+
.vscode/
|
|
35
|
+
*.swp
|
|
36
|
+
*.swo
|
|
37
|
+
|
|
38
|
+
# Git
|
|
39
|
+
.git
|
|
40
|
+
.gitignore
|
|
41
|
+
|
|
42
|
+
# Misc
|
|
43
|
+
.DS_Store
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# syntax=docker/dockerfile:1.3
|
|
2
|
+
FROM python:3.12-slim
|
|
3
|
+
COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
|
|
4
|
+
|
|
5
|
+
# Install system dependencies
|
|
6
|
+
RUN apt-get update && apt-get install -y \
|
|
7
|
+
htop \
|
|
8
|
+
vim \
|
|
9
|
+
curl \
|
|
10
|
+
tar \
|
|
11
|
+
python3-dev \
|
|
12
|
+
postgresql-client \
|
|
13
|
+
build-essential \
|
|
14
|
+
libpq-dev \
|
|
15
|
+
gcc \
|
|
16
|
+
cmake \
|
|
17
|
+
netcat-openbsd \
|
|
18
|
+
nodejs \
|
|
19
|
+
npm \
|
|
20
|
+
&& apt-get clean \
|
|
21
|
+
&& rm -rf /var/lib/apt/lists/**
|
|
22
|
+
|
|
23
|
+
# Install tctl (Temporal CLI)
|
|
24
|
+
RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \
|
|
25
|
+
tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \
|
|
26
|
+
chmod +x /usr/local/bin/tctl && \
|
|
27
|
+
rm /tmp/tctl.tar.gz
|
|
28
|
+
|
|
29
|
+
RUN uv pip install --system --upgrade pip setuptools wheel
|
|
30
|
+
|
|
31
|
+
ENV UV_HTTP_TIMEOUT=1000
|
|
32
|
+
|
|
33
|
+
# Copy just the pyproject.toml file to optimize caching
|
|
34
|
+
COPY {{ project_path_from_build_root }}/pyproject.toml /app/{{ project_path_from_build_root }}/pyproject.toml
|
|
35
|
+
|
|
36
|
+
WORKDIR /app/{{ project_path_from_build_root }}
|
|
37
|
+
|
|
38
|
+
# Install the required Python packages using uv
|
|
39
|
+
RUN uv pip install --system .
|
|
40
|
+
|
|
41
|
+
# Copy the project code
|
|
42
|
+
COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project
|
|
43
|
+
|
|
44
|
+
# Run the ACP server using uvicorn
|
|
45
|
+
CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"]
|
|
46
|
+
|
|
47
|
+
# When we deploy the worker, we will replace the CMD with the following
|
|
48
|
+
# CMD ["python", "-m", "run_worker"]
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# syntax=docker/dockerfile:1.3
|
|
2
|
+
FROM python:3.12-slim
|
|
3
|
+
COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
|
|
4
|
+
|
|
5
|
+
# Install system dependencies
|
|
6
|
+
RUN apt-get update && apt-get install -y \
|
|
7
|
+
htop \
|
|
8
|
+
vim \
|
|
9
|
+
curl \
|
|
10
|
+
tar \
|
|
11
|
+
python3-dev \
|
|
12
|
+
postgresql-client \
|
|
13
|
+
build-essential \
|
|
14
|
+
libpq-dev \
|
|
15
|
+
gcc \
|
|
16
|
+
cmake \
|
|
17
|
+
netcat-openbsd \
|
|
18
|
+
node \
|
|
19
|
+
npm \
|
|
20
|
+
&& apt-get clean \
|
|
21
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
22
|
+
|
|
23
|
+
# Install tctl (Temporal CLI)
|
|
24
|
+
RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \
|
|
25
|
+
tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \
|
|
26
|
+
chmod +x /usr/local/bin/tctl && \
|
|
27
|
+
rm /tmp/tctl.tar.gz
|
|
28
|
+
|
|
29
|
+
RUN uv pip install --system --upgrade pip setuptools wheel
|
|
30
|
+
|
|
31
|
+
ENV UV_HTTP_TIMEOUT=1000
|
|
32
|
+
|
|
33
|
+
# Copy just the requirements file to optimize caching
|
|
34
|
+
COPY {{ project_path_from_build_root }}/requirements.txt /app/{{ project_path_from_build_root }}/requirements.txt
|
|
35
|
+
|
|
36
|
+
WORKDIR /app/{{ project_path_from_build_root }}
|
|
37
|
+
|
|
38
|
+
# Install the required Python packages
|
|
39
|
+
RUN uv pip install --system -r requirements.txt
|
|
40
|
+
|
|
41
|
+
# Copy the project code
|
|
42
|
+
COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project
|
|
43
|
+
|
|
44
|
+
# Run the ACP server using uvicorn
|
|
45
|
+
CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"]
|
|
46
|
+
|
|
47
|
+
# When we deploy the worker, we will replace the CMD with the following
|
|
48
|
+
# CMD ["python", "-m", "run_worker"]
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
# {{ agent_name }} - AgentEx Temporal + OpenAI Agents SDK Template
|
|
2
|
+
|
|
3
|
+
This is a starter template for building AI agents with the AgentEx framework, Temporal workflows, and OpenAI Agents SDK. It provides a production-ready foundation with:
|
|
4
|
+
|
|
5
|
+
- **Durable execution** via Temporal workflows
|
|
6
|
+
- **AI agent capabilities** via OpenAI Agents SDK
|
|
7
|
+
- **Tool use** via Temporal activities
|
|
8
|
+
- **Streaming responses** for real-time feedback
|
|
9
|
+
- **Conversation state management** across turns
|
|
10
|
+
- **Tracing/observability** via SGP integration
|
|
11
|
+
|
|
12
|
+
## What You'll Learn
|
|
13
|
+
|
|
14
|
+
- **Tasks**: A task is a grouping mechanism for related messages (like a conversation thread)
|
|
15
|
+
- **Messages**: Communication objects within a task (text, data, instructions)
|
|
16
|
+
- **Temporal Workflows**: Long-running processes with state management and async operations
|
|
17
|
+
- **Activities**: Non-deterministic operations (API calls, I/O) that Temporal can retry and recover
|
|
18
|
+
- **OpenAI Agents SDK**: Building AI agents with tools, instructions, and streaming
|
|
19
|
+
|
|
20
|
+
## Running the Agent
|
|
21
|
+
|
|
22
|
+
1. Run the agent locally:
|
|
23
|
+
```bash
|
|
24
|
+
agentex agents run --manifest manifest.yaml
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
The agent will start on port 8000 and be ready to handle conversations.
|
|
28
|
+
|
|
29
|
+
## Project Structure
|
|
30
|
+
|
|
31
|
+
```
|
|
32
|
+
{{ project_name }}/
|
|
33
|
+
├── project/ # Your agent's code
|
|
34
|
+
│ ├── __init__.py
|
|
35
|
+
│ ├── acp.py # ACP server with OpenAI plugin setup
|
|
36
|
+
│ ├── workflow.py # Temporal workflow with OpenAI agent
|
|
37
|
+
│ ├── activities.py # Temporal activities (tools for your agent)
|
|
38
|
+
│ └── run_worker.py # Temporal worker setup
|
|
39
|
+
├── Dockerfile # Container definition
|
|
40
|
+
├── manifest.yaml # Deployment config
|
|
41
|
+
├── dev.ipynb # Development notebook for testing
|
|
42
|
+
{% if use_uv %}
|
|
43
|
+
└── pyproject.toml # Dependencies (uv)
|
|
44
|
+
{% else %}
|
|
45
|
+
└── requirements.txt # Dependencies (pip)
|
|
46
|
+
{% endif %}
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Key Concepts
|
|
50
|
+
|
|
51
|
+
### Activities as Tools
|
|
52
|
+
|
|
53
|
+
Activities are Temporal's way of handling non-deterministic operations. In this template, activities also serve as tools for your OpenAI agent:
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
# In activities.py - define the activity
|
|
57
|
+
@activity.defn
|
|
58
|
+
async def get_weather() -> str:
|
|
59
|
+
return "Sunny, 72°F"
|
|
60
|
+
|
|
61
|
+
# In workflow.py - use it as a tool for the agent
|
|
62
|
+
agent = Agent(
|
|
63
|
+
name="my-agent",
|
|
64
|
+
tools=[
|
|
65
|
+
openai_agents.workflow.activity_as_tool(
|
|
66
|
+
get_weather,
|
|
67
|
+
start_to_close_timeout=timedelta(minutes=5),
|
|
68
|
+
),
|
|
69
|
+
],
|
|
70
|
+
)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### Conversation State
|
|
74
|
+
|
|
75
|
+
The workflow maintains conversation history across turns using `StateModel`:
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
class StateModel(BaseModel):
|
|
79
|
+
input_list: List[Dict[str, Any]] # Conversation history
|
|
80
|
+
turn_number: int # Turn counter for tracing
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Tracing
|
|
84
|
+
|
|
85
|
+
Each conversation turn creates a tracing span for observability:
|
|
86
|
+
|
|
87
|
+
```python
|
|
88
|
+
async with adk.tracing.span(
|
|
89
|
+
trace_id=params.task.id,
|
|
90
|
+
name=f"Turn {self._state.turn_number}",
|
|
91
|
+
input=turn_input.model_dump(),
|
|
92
|
+
) as span:
|
|
93
|
+
# Agent execution happens here
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
## Adding New Tools/Activities
|
|
97
|
+
|
|
98
|
+
See the detailed instructions in `project/activities.py`. The process is:
|
|
99
|
+
|
|
100
|
+
1. **Define** the activity in `activities.py`
|
|
101
|
+
2. **Register** it in `run_worker.py`
|
|
102
|
+
3. **Add** it as a tool in `workflow.py`
|
|
103
|
+
|
|
104
|
+
## Temporal Dashboard
|
|
105
|
+
|
|
106
|
+
Monitor your workflows and activities at:
|
|
107
|
+
|
|
108
|
+
```
|
|
109
|
+
http://localhost:8080
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
The dashboard shows:
|
|
113
|
+
- Running and completed workflows
|
|
114
|
+
- Activity execution history
|
|
115
|
+
- Retries and failures
|
|
116
|
+
- Workflow state and signals
|
|
117
|
+
|
|
118
|
+
## Development
|
|
119
|
+
|
|
120
|
+
### 1. Customize the Agent
|
|
121
|
+
|
|
122
|
+
Edit `project/workflow.py` to change:
|
|
123
|
+
- Agent instructions
|
|
124
|
+
- Model (default: `gpt-4o-mini`)
|
|
125
|
+
- Tools available to the agent
|
|
126
|
+
|
|
127
|
+
### 2. Add New Activities
|
|
128
|
+
|
|
129
|
+
See `project/activities.py` for detailed instructions on adding new tools.
|
|
130
|
+
|
|
131
|
+
### 3. Test with the Development Notebook
|
|
132
|
+
|
|
133
|
+
```bash
|
|
134
|
+
jupyter notebook dev.ipynb
|
|
135
|
+
# Or in VS Code
|
|
136
|
+
code dev.ipynb
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### 4. Manage Dependencies
|
|
140
|
+
|
|
141
|
+
{% if use_uv %}
|
|
142
|
+
```bash
|
|
143
|
+
# Add new dependencies
|
|
144
|
+
agentex uv add requests anthropic
|
|
145
|
+
|
|
146
|
+
# Install/sync dependencies
|
|
147
|
+
agentex uv sync
|
|
148
|
+
```
|
|
149
|
+
{% else %}
|
|
150
|
+
```bash
|
|
151
|
+
# Add to requirements.txt
|
|
152
|
+
echo "requests" >> requirements.txt
|
|
153
|
+
pip install -r requirements.txt
|
|
154
|
+
```
|
|
155
|
+
{% endif %}
|
|
156
|
+
|
|
157
|
+
## Local Development
|
|
158
|
+
|
|
159
|
+
### 1. Start the Agentex Backend
|
|
160
|
+
```bash
|
|
161
|
+
cd agentex
|
|
162
|
+
make dev
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### 2. Setup Your Agent's Environment
|
|
166
|
+
```bash
|
|
167
|
+
{% if use_uv %}
|
|
168
|
+
agentex uv sync
|
|
169
|
+
source .venv/bin/activate
|
|
170
|
+
{% else %}
|
|
171
|
+
pip install -r requirements.txt
|
|
172
|
+
{% endif %}
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
### 3. Run Your Agent
|
|
176
|
+
```bash
|
|
177
|
+
export ENVIRONMENT=development
|
|
178
|
+
agentex agents run --manifest manifest.yaml
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### 4. Interact with Your Agent
|
|
182
|
+
|
|
183
|
+
Via Web UI:
|
|
184
|
+
```bash
|
|
185
|
+
cd agentex-web
|
|
186
|
+
make dev
|
|
187
|
+
# Open http://localhost:3000
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
## Environment Variables
|
|
191
|
+
|
|
192
|
+
For local development, create a `.env` file:
|
|
193
|
+
|
|
194
|
+
```bash
|
|
195
|
+
OPENAI_API_KEY=your-api-key
|
|
196
|
+
SGP_API_KEY=your-sgp-key # Optional: for tracing
|
|
197
|
+
SGP_ACCOUNT_ID=your-account-id # Optional: for tracing
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
## Troubleshooting
|
|
201
|
+
|
|
202
|
+
### Common Issues
|
|
203
|
+
|
|
204
|
+
1. **Agent not responding**
|
|
205
|
+
- Check if agent is running on port 8000
|
|
206
|
+
- Verify `ENVIRONMENT=development` is set
|
|
207
|
+
- Check logs for errors
|
|
208
|
+
|
|
209
|
+
2. **Temporal workflow issues**
|
|
210
|
+
- Check Temporal Web UI at http://localhost:8080
|
|
211
|
+
- Verify Temporal server is running
|
|
212
|
+
- Check workflow logs
|
|
213
|
+
|
|
214
|
+
3. **OpenAI API errors**
|
|
215
|
+
- Verify `OPENAI_API_KEY` is set
|
|
216
|
+
- Check API rate limits
|
|
217
|
+
- Verify model name is correct
|
|
218
|
+
|
|
219
|
+
4. **Activity failures**
|
|
220
|
+
- Check activity logs in console
|
|
221
|
+
- Verify activity is registered in `run_worker.py`
|
|
222
|
+
- Check timeout settings
|
|
223
|
+
|
|
224
|
+
Happy building with Temporal + OpenAI Agents SDK!
|