webagents 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. webagents/__init__.py +9 -0
  2. webagents/agents/core/base_agent.py +865 -69
  3. webagents/agents/core/handoffs.py +14 -6
  4. webagents/agents/skills/base.py +33 -2
  5. webagents/agents/skills/core/llm/litellm/skill.py +906 -27
  6. webagents/agents/skills/core/memory/vector_memory/skill.py +8 -16
  7. webagents/agents/skills/ecosystem/openai/__init__.py +6 -0
  8. webagents/agents/skills/ecosystem/openai/skill.py +867 -0
  9. webagents/agents/skills/ecosystem/replicate/README.md +440 -0
  10. webagents/agents/skills/ecosystem/replicate/__init__.py +10 -0
  11. webagents/agents/skills/ecosystem/replicate/skill.py +517 -0
  12. webagents/agents/skills/examples/__init__.py +6 -0
  13. webagents/agents/skills/examples/music_player.py +329 -0
  14. webagents/agents/skills/robutler/handoff/__init__.py +6 -0
  15. webagents/agents/skills/robutler/handoff/skill.py +191 -0
  16. webagents/agents/skills/robutler/nli/skill.py +180 -24
  17. webagents/agents/skills/robutler/payments/exceptions.py +27 -7
  18. webagents/agents/skills/robutler/payments/skill.py +64 -14
  19. webagents/agents/skills/robutler/storage/files/skill.py +2 -2
  20. webagents/agents/tools/decorators.py +243 -47
  21. webagents/agents/widgets/__init__.py +6 -0
  22. webagents/agents/widgets/renderer.py +150 -0
  23. webagents/server/core/app.py +130 -15
  24. webagents/server/core/models.py +1 -1
  25. webagents/utils/logging.py +13 -1
  26. {webagents-0.2.2.dist-info → webagents-0.2.3.dist-info}/METADATA +8 -25
  27. {webagents-0.2.2.dist-info → webagents-0.2.3.dist-info}/RECORD +30 -20
  28. webagents/agents/skills/ecosystem/openai_agents/__init__.py +0 -0
  29. {webagents-0.2.2.dist-info → webagents-0.2.3.dist-info}/WHEEL +0 -0
  30. {webagents-0.2.2.dist-info → webagents-0.2.3.dist-info}/entry_points.txt +0 -0
  31. {webagents-0.2.2.dist-info → webagents-0.2.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,867 @@
1
+ """
2
+ OpenAI Agent Builder Skill
3
+
4
+ Runs OpenAI hosted agents/workflows and normalizes their responses
5
+ to OpenAI chat completion format for seamless handoff integration.
6
+ """
7
+
8
+ import os
9
+ import json
10
+ import httpx
11
+ import time
12
+ import urllib.parse
13
+ from typing import Dict, Any, List, Optional, AsyncGenerator
14
+ from webagents.agents.skills import Skill
15
+ from webagents.agents.skills.base import Handoff
16
+ from webagents.agents.tools.decorators import tool, prompt, http
17
+ from webagents.utils.logging import get_logger
18
+ from webagents.server.context.context_vars import get_context
19
+
20
+ # Load environment variables from .env file
21
+ try:
22
+ from dotenv import load_dotenv
23
+ load_dotenv()
24
+ except ImportError:
25
+ pass # dotenv not available, will use existing env vars
26
+
27
+ logger = get_logger('openai_agent_builder')
28
+
29
+
30
+ class OpenAIAgentBuilderSkill(Skill):
31
+ """Skill for running OpenAI hosted agents/workflows via streaming handoffs"""
32
+
33
+ def __init__(self, config: Optional[Dict[str, Any]] = None):
34
+ """
35
+ Initialize OpenAI Agent Builder Skill
36
+
37
+ Args:
38
+ config: Configuration dictionary with:
39
+ - workflow_id: OpenAI workflow ID (optional, can be stored in KV)
40
+ - api_key: OpenAI API key (optional, can be stored in KV or OPENAI_API_KEY env var)
41
+ - api_base: OpenAI API base URL (defaults to https://api.openai.com/v1)
42
+ - version: Workflow version (optional, defaults to None = use workflow default)
43
+ """
44
+ super().__init__(config or {}, scope="all")
45
+
46
+ # Environment variable credentials (fallback when KV not available)
47
+ self.api_key = self.config.get('api_key') or os.getenv('OPENAI_API_KEY')
48
+ self.workflow_id = self.config.get('workflow_id')
49
+
50
+ self.api_base = self.config.get('api_base', 'https://api.openai.com/v1')
51
+ self.version = self.config.get('version') # Optional: workflow version (None = use default)
52
+
53
+ # Base URL for setup callback
54
+ env_agents = os.getenv("AGENTS_BASE_URL")
55
+ base_root = (env_agents or "http://localhost:2224").rstrip('/')
56
+ if base_root.endswith("/agents"):
57
+ self.agent_base_url = base_root
58
+ else:
59
+ self.agent_base_url = base_root + "/agents"
60
+
61
+ self.logger = get_logger('openai_agent_builder')
62
+
63
+ # State for thinking detection
64
+ self._in_thinking_block = False
65
+
66
+ # State for widget data accumulation
67
+ self._widget_data_buffer = ""
68
+
69
+ async def initialize(self, agent):
70
+ """Register as streaming handoff handler"""
71
+ self.agent = agent
72
+
73
+ # Register as handoff (streaming for real-time workflow execution)
74
+ # Always use simple target name since workflow_id may be loaded from KV later
75
+ target_name = "openai_workflow"
76
+ description = "OpenAI Workflow handler"
77
+
78
+ # Use priority 15 (lower than default LLM) - this handoff is dynamically invoked, not default
79
+ priority = 15
80
+
81
+ agent.register_handoff(
82
+ Handoff(
83
+ target=target_name,
84
+ description=description,
85
+ scope="all",
86
+ metadata={
87
+ 'function': self.run_workflow_stream,
88
+ 'priority': priority,
89
+ 'is_generator': True # Streaming
90
+ }
91
+ ),
92
+ source="openai_agent_builder"
93
+ )
94
+
95
+ # Register handoff prompt to tell LLM when to use this handoff
96
+ handoff_prompt_text = self._create_handoff_prompt()
97
+ if handoff_prompt_text:
98
+ # Create a prompt function that returns the prompt text
99
+ def openai_workflow_handoff_prompt():
100
+ return handoff_prompt_text
101
+
102
+ agent.register_prompt(
103
+ openai_workflow_handoff_prompt,
104
+ priority=3, # Lower priority - only use when explicitly requested
105
+ source="openai_agent_builder_handoff_prompt",
106
+ scope="all"
107
+ )
108
+ self.logger.debug(f"📨 Registered handoff prompt for '{target_name}'")
109
+
110
+ if self.workflow_id:
111
+ self.logger.info(f"🔧 OpenAI Agent Builder registered with workflow: {self.workflow_id}")
112
+ else:
113
+ self.logger.info("🔧 OpenAI Agent Builder registered (workflow ID will be loaded from KV)")
114
+
115
+ def _create_handoff_prompt(self) -> Optional[str]:
116
+ """Create handoff prompt to guide LLM on when to use OpenAI workflow"""
117
+ return """
118
+ ## OpenAI Workflow Available
119
+
120
+ You have access to an OpenAI hosted workflow/agent that you can invoke using the `use_openai_workflow` tool.
121
+
122
+ **When to use**: ONLY call `use_openai_workflow()` when the user **explicitly** requests it:
123
+ - "use openai workflow" / "use openai agent" / "use the openai workflow"
124
+ - "switch to openai" / "hand off to openai"
125
+
126
+ **When NOT to use**:
127
+ - Do NOT use this for general requests (images, search, documents, etc.)
128
+ - Do NOT use this unless the user explicitly mentions "openai" or "workflow"
129
+ - Use your other available tools for normal tasks
130
+
131
+ **How it works**: When you call this tool, the conversation is handed off to the OpenAI workflow, which streams its response directly to the user.
132
+ """.strip()
133
+
134
+ # ---------------- Credential Management ----------------
135
+
136
+ async def _get_kv_skill(self):
137
+ """Get KV skill for credential storage"""
138
+ return self.agent.skills.get("kv") or self.agent.skills.get("json_storage")
139
+
140
+ async def _get_owner_id_from_context(self) -> Optional[str]:
141
+ """Get owner ID from request context"""
142
+ try:
143
+ ctx = get_context()
144
+ if not ctx:
145
+ return None
146
+ auth = getattr(ctx, 'auth', None) or (ctx and ctx.get('auth'))
147
+ return getattr(auth, 'owner_id', None) or getattr(auth, 'user_id', None)
148
+ except Exception:
149
+ return None
150
+
151
+ async def _save_credentials(self, api_key: str, workflow_id: str) -> None:
152
+ """Save OpenAI credentials to KV storage"""
153
+ kv_skill = await self._get_kv_skill()
154
+ if kv_skill and hasattr(kv_skill, 'kv_set'):
155
+ creds = {"api_key": api_key, "workflow_id": workflow_id}
156
+ await kv_skill.kv_set(key="openai_credentials", value=json.dumps(creds), namespace="openai")
157
+
158
+ async def _load_credentials(self) -> Optional[Dict[str, str]]:
159
+ """Load OpenAI credentials from KV storage"""
160
+ kv_skill = await self._get_kv_skill()
161
+ if kv_skill and hasattr(kv_skill, 'kv_get'):
162
+ try:
163
+ stored = await kv_skill.kv_get(key="openai_credentials", namespace="openai")
164
+ if isinstance(stored, str) and stored.startswith('{'):
165
+ return json.loads(stored)
166
+ except Exception:
167
+ pass
168
+ return None
169
+
170
+ def _build_setup_url(self) -> str:
171
+ """Build URL for credential setup form
172
+
173
+ For localhost environments, includes auth token in URL since
174
+ cookies don't work across different ports (3000 -> 2224).
175
+ In production, same origin means cookies work normally.
176
+ """
177
+ base = self.agent_base_url.rstrip('/')
178
+ url = f"{base}/{self.agent.name}/setup/openai"
179
+
180
+ # Include auth token for localhost only (cross-port authentication)
181
+ if 'localhost' in base or '127.0.0.1' in base:
182
+ if hasattr(self.agent, 'api_key') and self.agent.api_key:
183
+ url += f"?token={self.agent.api_key}"
184
+
185
+ return url
186
+
187
+ def _setup_form_html(self, success: bool = False, error: str = None, token: str = None) -> str:
188
+ """Generate HTML for credential setup form"""
189
+ from string import Template
190
+
191
+ color_ok = "#16a34a" # green-600
192
+ color_err = "#dc2626" # red-600
193
+ accent = color_ok if success else color_err
194
+ title = "OpenAI Setup Complete" if success else ("OpenAI Setup Error" if error else "OpenAI Setup")
195
+
196
+ # Basic HTML escape
197
+ safe_error = (error or '').replace('&','&amp;').replace('<','&lt;').replace('>','&gt;').replace('$','$$') if error else ''
198
+
199
+ if success:
200
+ message_html = f"""
201
+ <div style="background: {color_ok}; color: white; padding: 1rem; border-radius: 0.5rem; margin-bottom: 1.5rem;">
202
+ <div style="font-weight: 600; margin-bottom: 0.25rem;">✓ Credentials saved successfully</div>
203
+ <div style="font-size: 0.875rem; opacity: 0.9;">Your OpenAI API key and workflow ID have been configured.</div>
204
+ </div>
205
+ <p style="margin-bottom: 1rem;">You can now close this window and return to your agent.</p>
206
+ """
207
+ elif error:
208
+ message_html = f"""
209
+ <div style="background: {color_err}; color: white; padding: 1rem; border-radius: 0.5rem; margin-bottom: 1.5rem;">
210
+ <div style="font-weight: 600; margin-bottom: 0.25rem;">✗ Setup failed</div>
211
+ <div style="font-size: 0.875rem; opacity: 0.9;">{safe_error}</div>
212
+ </div>
213
+ """
214
+ else:
215
+ message_html = ""
216
+
217
+ # Include token in form action for localhost cross-port auth
218
+ form_action = f"?token={token}" if token else ""
219
+
220
+ form_html = "" if success else f"""
221
+ <form method="post" action="{form_action}" style="display: flex; flex-direction: column; gap: 1rem;">
222
+ <div>
223
+ <label for="api_key" style="display: block; font-weight: 600; margin-bottom: 0.5rem;">OpenAI API Key</label>
224
+ <input
225
+ type="password"
226
+ id="api_key"
227
+ name="api_key"
228
+ required
229
+ placeholder="sk-..."
230
+ style="width: 100%; padding: 0.75rem; border: 1px solid var(--border, #374151); border-radius: 0.5rem; background: var(--input-bg, #1f2937); color: var(--fg, #e5e7eb); font-family: ui-monospace, monospace; font-size: 0.875rem;"
231
+ />
232
+ </div>
233
+ <div>
234
+ <label for="workflow_id" style="display: block; font-weight: 600; margin-bottom: 0.5rem;">Workflow ID</label>
235
+ <input
236
+ type="text"
237
+ id="workflow_id"
238
+ name="workflow_id"
239
+ required
240
+ placeholder="wf_..."
241
+ style="width: 100%; padding: 0.75rem; border: 1px solid var(--border, #374151); border-radius: 0.5rem; background: var(--input-bg, #1f2937); color: var(--fg, #e5e7eb); font-family: ui-monospace, monospace; font-size: 0.875rem;"
242
+ />
243
+ </div>
244
+ <button
245
+ type="submit"
246
+ style="padding: 0.75rem 1.5rem; background: {accent}; color: white; border: none; border-radius: 0.5rem; font-weight: 600; cursor: pointer; font-size: 1rem;"
247
+ >
248
+ Save Credentials
249
+ </button>
250
+ </form>
251
+ """
252
+
253
+ template = Template("""<!doctype html>
254
+ <html lang="en">
255
+ <head>
256
+ <meta charset="utf-8" />
257
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
258
+ <title>WebAgents – OpenAI Setup</title>
259
+ <style>
260
+ :root { color-scheme: light dark; }
261
+ html, body { height: 100%; margin: 0; font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial; }
262
+ body { background: var(--bg, #0b0b0c); color: var(--fg, #e5e7eb); display: grid; place-items: center; padding: 1rem; }
263
+ @media (prefers-color-scheme: light) {
264
+ body { --bg: #f7f7f8; --card: #ffffff; --border: #e5e7eb; --fg: #0f172a; --input-bg: #ffffff; }
265
+ }
266
+ .card {
267
+ background: var(--card, #18181b);
268
+ border: 1px solid var(--border, #27272a);
269
+ border-radius: 1rem;
270
+ padding: 2rem;
271
+ max-width: 28rem;
272
+ width: 100%;
273
+ box-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1);
274
+ }
275
+ h1 { margin: 0 0 1.5rem 0; font-size: 1.5rem; font-weight: 700; }
276
+ </style>
277
+ </head>
278
+ <body>
279
+ <div class="card">
280
+ <h1>${title}</h1>
281
+ ${message}
282
+ ${form}
283
+ </div>
284
+ </body>
285
+ </html>""")
286
+
287
+ return template.substitute(title=title, message=message_html, form=form_html)
288
+
289
+ # ---------------- HTTP Endpoints ----------------
290
+
291
+ @http(subpath="/setup/openai", method="get", scope=["owner"])
292
+ async def show_setup_form(self, token: str = None) -> Dict[str, Any]:
293
+ """Show credential setup form (GET endpoint)"""
294
+ from fastapi.responses import HTMLResponse
295
+ return HTMLResponse(content=self._setup_form_html(token=token))
296
+
297
+ @http(subpath="/setup/openai", method="post", scope=["owner"])
298
+ async def setup_credentials(self, api_key: str = "", workflow_id: str = "", token: str = None) -> Dict[str, Any]:
299
+ """Save OpenAI credentials (POST endpoint)"""
300
+ from fastapi.responses import HTMLResponse
301
+
302
+ # Strip whitespace
303
+ api_key = (api_key or "").strip()
304
+ workflow_id = (workflow_id or "").strip()
305
+
306
+ if not api_key or not workflow_id:
307
+ return HTMLResponse(content=self._setup_form_html(error="Both API key and workflow ID are required", token=token))
308
+
309
+ try:
310
+ await self._save_credentials(api_key, workflow_id)
311
+ return HTMLResponse(content=self._setup_form_html(success=True, token=token))
312
+ except Exception as e:
313
+ return HTMLResponse(content=self._setup_form_html(error=str(e), token=token))
314
+
315
+ # ---------------- Prompts ----------------
316
+
317
+ @prompt(priority=40, scope=["owner", "all"])
318
+ async def openai_prompt(self) -> str:
319
+ """Provide setup guidance if credentials not configured"""
320
+ kv_skill = await self._get_kv_skill()
321
+ if kv_skill:
322
+ creds = await self._load_credentials()
323
+ if not creds:
324
+ setup_url = self._build_setup_url()
325
+ return f"OpenAI workflow skill available but not configured. Set up credentials at: {setup_url}"
326
+ return "OpenAI workflow integration is available for running hosted workflows."
327
+
328
+ # ---------------- Tools ----------------
329
+
330
+ @tool(
331
+ description="Switch to OpenAI workflow for direct streaming response (use when user requests OpenAI workflow/agent)",
332
+ scope=["all"]
333
+ )
334
+ async def use_openai_workflow(self) -> str:
335
+ """Request handoff to OpenAI workflow
336
+
337
+ Returns handoff request marker. The framework will execute the handoff
338
+ and stream the OpenAI workflow response directly to the user.
339
+ """
340
+ # Load credentials to verify configuration
341
+ api_key = self.api_key
342
+ workflow_id = self.workflow_id
343
+
344
+ if not api_key or not workflow_id:
345
+ creds = await self._load_credentials()
346
+ if creds:
347
+ api_key = creds.get('api_key')
348
+ workflow_id = creds.get('workflow_id')
349
+
350
+ if not api_key or not workflow_id:
351
+ setup_url = self._build_setup_url()
352
+ return f"❌ OpenAI credentials not configured. Set up at: {setup_url}"
353
+
354
+ # Use consistent target name (always "openai_workflow")
355
+ return self.request_handoff("openai_workflow")
356
+
357
+ @tool(description="Update or remove OpenAI credentials (API key and workflow ID)", scope=["owner"])
358
+ async def update_openai_credentials(self, api_key: str = None, workflow_id: str = None, remove: bool = False) -> str:
359
+ """Update or remove stored OpenAI credentials"""
360
+ kv_skill = await self._get_kv_skill()
361
+ if not kv_skill:
362
+ return "❌ KV skill not available. Credentials are configured via environment variables."
363
+
364
+ if remove:
365
+ try:
366
+ if hasattr(kv_skill, 'kv_delete'):
367
+ await kv_skill.kv_delete(key="openai_credentials", namespace="openai")
368
+ return "✓ OpenAI credentials removed"
369
+ except Exception as e:
370
+ return f"❌ Failed to remove credentials: {e}"
371
+
372
+ if not api_key or not workflow_id:
373
+ return "❌ Both api_key and workflow_id are required"
374
+
375
+ try:
376
+ await self._save_credentials(api_key, workflow_id)
377
+ return "✓ OpenAI credentials updated successfully"
378
+ except Exception as e:
379
+ return f"❌ Failed to update credentials: {e}"
380
+
381
+ # ---------------- Usage Tracking ----------------
382
+
383
+ def _log_workflow_usage(self, usage_data: Dict[str, Any], model: Optional[str]) -> None:
384
+ """Log workflow usage to context for cost tracking
385
+
386
+ Args:
387
+ usage_data: Usage data from workflow response
388
+ model: Model identifier (optional)
389
+ """
390
+ try:
391
+ context = get_context()
392
+ if not context or not hasattr(context, 'usage'):
393
+ return
394
+
395
+ # Extract token counts from usage data
396
+ # OpenAI workflows may use different field names
397
+ prompt_tokens = usage_data.get('prompt_tokens', 0) or usage_data.get('input_tokens', 0)
398
+ completion_tokens = usage_data.get('completion_tokens', 0) or usage_data.get('output_tokens', 0)
399
+ total_tokens = usage_data.get('total_tokens', 0) or (prompt_tokens + completion_tokens)
400
+
401
+ if total_tokens > 0:
402
+ usage_record = {
403
+ 'type': 'llm',
404
+ 'timestamp': time.time(),
405
+ 'model': model or f'openai-workflow-{self.workflow_id}',
406
+ 'prompt_tokens': int(prompt_tokens),
407
+ 'completion_tokens': int(completion_tokens),
408
+ 'total_tokens': int(total_tokens),
409
+ 'streaming': True,
410
+ 'source': 'openai_workflow'
411
+ }
412
+ context.usage.append(usage_record)
413
+ self.logger.info(f"💰 Workflow usage logged: {total_tokens} tokens (prompt={prompt_tokens}, completion={completion_tokens}) for model={model}")
414
+ else:
415
+ self.logger.debug(f"⚠️ Workflow usage data present but no tokens: {usage_data}")
416
+ except Exception as e:
417
+ self.logger.warning(f"Failed to log workflow usage: {e}")
418
+
419
+ def _wrap_thinking_content(self, delta_text: str, response_data: Dict[str, Any]) -> str:
420
+ """Detect and wrap thinking content in <think> tags
421
+
422
+ Args:
423
+ delta_text: The delta content from workflow response
424
+ response_data: Full response data for context
425
+
426
+ Returns:
427
+ Delta text, potentially wrapped in thinking tags
428
+ """
429
+ # Check the 'type' field in response_data for thinking markers
430
+ # OpenAI workflows use: "response.reasoning_summary_text.delta" for thinking
431
+ delta_type = response_data.get('type', '')
432
+
433
+ # Check if this is reasoning/thinking content
434
+ is_reasoning = 'reasoning' in delta_type.lower()
435
+ is_thinking = 'thinking' in delta_type.lower()
436
+ is_summary = 'summary' in delta_type.lower()
437
+
438
+ # Reasoning or thinking content should be wrapped
439
+ if is_reasoning or is_thinking or is_summary:
440
+ if not self._in_thinking_block:
441
+ self._in_thinking_block = True
442
+ self.logger.debug(f"🧠 Starting thinking block (type={delta_type})")
443
+ return f"<think>{delta_text}"
444
+ return delta_text
445
+
446
+ # If we were in a thinking block and now we're not, close it
447
+ if self._in_thinking_block and delta_type and not (is_reasoning or is_thinking or is_summary):
448
+ self._in_thinking_block = False
449
+ self.logger.debug(f"🧠 Ending thinking block (type={delta_type})")
450
+ return f"</think>{delta_text}"
451
+
452
+ # Regular content - pass through
453
+ return delta_text
454
+
455
+ def _convert_messages_to_workflow_input(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
456
+ """
457
+ Convert OpenAI chat messages to OpenAI workflow input format
458
+
459
+ Args:
460
+ messages: OpenAI format messages [{"role": "user", "content": "..."}]
461
+
462
+ Returns:
463
+ Workflow input format [{"role": "user", "content": [{"type": "input_text", "text": "..."}]}]
464
+ """
465
+ workflow_input = []
466
+
467
+ for msg in messages:
468
+ role = msg.get('role', 'user')
469
+ content = msg.get('content', '')
470
+
471
+ # Convert string content to workflow format
472
+ if isinstance(content, str):
473
+ workflow_msg = {
474
+ "role": role,
475
+ "content": [{"type": "input_text", "text": content}]
476
+ }
477
+ elif isinstance(content, list):
478
+ # Already in structured format
479
+ workflow_msg = {
480
+ "role": role,
481
+ "content": content
482
+ }
483
+ else:
484
+ # Fallback
485
+ workflow_msg = {
486
+ "role": role,
487
+ "content": [{"type": "input_text", "text": str(content)}]
488
+ }
489
+
490
+ workflow_input.append(workflow_msg)
491
+
492
+ return workflow_input
493
+
494
+ async def run_workflow_stream(
495
+ self,
496
+ messages: List[Dict[str, Any]],
497
+ tools: Optional[List[Dict[str, Any]]] = None,
498
+ **kwargs
499
+ ) -> AsyncGenerator[Dict[str, Any], None]:
500
+ """
501
+ Run OpenAI workflow and stream normalized responses
502
+
503
+ Args:
504
+ messages: OpenAI format chat messages
505
+ tools: Optional tools (not used by workflows currently)
506
+ **kwargs: Additional parameters
507
+
508
+ Yields:
509
+ OpenAI chat completion streaming chunks
510
+ """
511
+ # Reset usage logging flag and thinking state for this request
512
+ self._usage_logged = False
513
+ self._in_thinking_block = False
514
+ self._widget_data_buffer = ""
515
+
516
+ # Try to load credentials from KV first, fallback to instance variables
517
+ stored_creds = await self._load_credentials()
518
+
519
+ if stored_creds:
520
+ api_key = stored_creds.get("api_key")
521
+ workflow_id = stored_creds.get("workflow_id")
522
+ self.logger.debug("🔑 Using credentials from KV storage")
523
+ else:
524
+ # Fallback to environment variables / config
525
+ api_key = self.api_key
526
+ workflow_id = self.workflow_id
527
+ self.logger.debug("🔑 Using credentials from environment/config")
528
+
529
+ # Check if credentials are available
530
+ if not api_key or not workflow_id:
531
+ kv_skill = await self._get_kv_skill()
532
+ if kv_skill:
533
+ setup_url = self._build_setup_url()
534
+ error_msg = f"OpenAI credentials not configured. Please set up your API key and workflow ID: {setup_url}"
535
+ else:
536
+ error_msg = "OpenAI API key or workflow ID not configured. Please set OPENAI_API_KEY environment variable and workflow_id in config."
537
+
538
+ self.logger.error(f"❌ {error_msg}")
539
+ yield {
540
+ 'id': f'error-{int(time.time())}',
541
+ 'object': 'chat.completion.chunk',
542
+ 'created': int(time.time()),
543
+ 'model': f'openai-workflow-{workflow_id or "unknown"}',
544
+ 'choices': [{
545
+ 'index': 0,
546
+ 'delta': {'role': 'assistant', 'content': error_msg},
547
+ 'finish_reason': 'stop'
548
+ }]
549
+ }
550
+ return
551
+
552
+ workflow_url = f"{self.api_base}/workflows/{workflow_id}/run"
553
+
554
+ # Filter to only user messages (workflows don't handle system/assistant roles)
555
+ user_messages = [msg for msg in messages if msg.get('role') == 'user']
556
+
557
+ if not user_messages:
558
+ # No user messages, use empty input
559
+ workflow_input = []
560
+ else:
561
+ # Convert only user messages to workflow input format
562
+ workflow_input = self._convert_messages_to_workflow_input(user_messages)
563
+
564
+ # Build request payload matching OpenAI workflows v6 format
565
+ payload = {
566
+ "input_data": {
567
+ "input": workflow_input
568
+ },
569
+ "state_values": [],
570
+ "session": True, # Enable session for multi-turn conversations
571
+ "tracing": {
572
+ "enabled": True # Enable tracing for debugging
573
+ },
574
+ "stream": True
575
+ }
576
+
577
+ # Include version if explicitly specified
578
+ if self.version is not None:
579
+ payload["version"] = str(self.version)
580
+
581
+ self.logger.debug(f"🔄 Calling OpenAI workflow: {workflow_url}")
582
+
583
+ headers = {
584
+ "authorization": f"Bearer {api_key}",
585
+ "content-type": "application/json"
586
+ }
587
+
588
+ # Initialize chunk ID counter
589
+ chunk_id = 0
590
+ accumulated_content = ""
591
+
592
+ try:
593
+ async with httpx.AsyncClient(timeout=120.0) as client:
594
+ async with client.stream('POST', workflow_url, json=payload, headers=headers) as response:
595
+ response.raise_for_status()
596
+
597
+ # Parse SSE stream
598
+ async for line in response.aiter_lines():
599
+ if not line or line.startswith(':'):
600
+ continue
601
+
602
+ # Parse SSE format: "event: type" and "data: json"
603
+ if line.startswith('event: '):
604
+ current_event = line[7:].strip()
605
+ continue
606
+
607
+ if line.startswith('data: '):
608
+ data_str = line[6:].strip()
609
+
610
+ try:
611
+ data = json.loads(data_str)
612
+ event_type = data.get('type', current_event if 'current_event' in locals() else '')
613
+
614
+ # Handle workflow.node.agent.response - streaming content deltas
615
+ if event_type == 'workflow.node.agent.response':
616
+ response_data = data.get('data', {})
617
+ delta_text = response_data.get('delta')
618
+
619
+ # Check for usage data in the response
620
+ response_obj = response_data.get('response', {})
621
+ if response_obj and isinstance(response_obj, dict):
622
+ usage_data = response_obj.get('usage')
623
+ model = response_obj.get('model')
624
+
625
+ if usage_data and isinstance(usage_data, dict):
626
+ # Log usage once (check if we haven't logged it yet)
627
+ if not self._usage_logged:
628
+ self._usage_logged = True
629
+ self._log_workflow_usage(usage_data, model)
630
+
631
+ # Yield streaming delta if present and non-empty
632
+ if delta_text and isinstance(delta_text, str):
633
+ chunk_id += 1
634
+
635
+ # Wrap thinking content if this is a reasoning model
636
+ wrapped_delta = self._wrap_thinking_content(delta_text, response_data)
637
+
638
+ # Accumulate content for widget data detection
639
+ # Widget data is JSON that appears right before a widget event
640
+ self._widget_data_buffer += wrapped_delta
641
+ accumulated_content += wrapped_delta
642
+
643
+ # Build delta object
644
+ delta_obj = {'content': wrapped_delta}
645
+ if chunk_id == 1:
646
+ delta_obj['role'] = 'assistant'
647
+
648
+ yield {
649
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
650
+ 'object': 'chat.completion.chunk',
651
+ 'created': data.get('workflow_run', {}).get('created_at', 0),
652
+ 'model': f'openai-workflow-{self.workflow_id}',
653
+ 'choices': [{
654
+ 'index': 0,
655
+ 'delta': delta_obj,
656
+ 'finish_reason': None
657
+ }]
658
+ }
659
+ continue # Skip other processing for this event
660
+
661
+ # Handle workflow.finished event
662
+ if event_type == 'workflow.finished':
663
+ self.logger.debug(f"📥 Workflow finished. Total content: {len(accumulated_content)} chars")
664
+
665
+ # Check for usage data as fallback (if not already logged)
666
+ if not self._usage_logged:
667
+ workflow_result = data.get('result', {})
668
+ if workflow_result and isinstance(workflow_result, dict):
669
+ usage_data = workflow_result.get('usage')
670
+ model = workflow_result.get('model')
671
+
672
+ if usage_data and isinstance(usage_data, dict):
673
+ self._usage_logged = True
674
+ self._log_workflow_usage(usage_data, model)
675
+
676
+ # Close thinking block if still open
677
+ if self._in_thinking_block:
678
+ self.logger.debug("🧠 Closing thinking block at workflow finish")
679
+ yield {
680
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
681
+ 'object': 'chat.completion.chunk',
682
+ 'created': data.get('workflow_run', {}).get('created_at', 0),
683
+ 'model': f'openai-workflow-{self.workflow_id}',
684
+ 'choices': [{
685
+ 'index': 0,
686
+ 'delta': {'content': '</think>'},
687
+ 'finish_reason': None
688
+ }]
689
+ }
690
+ self._in_thinking_block = False
691
+
692
+ # Yield finish chunk (content already streamed via deltas)
693
+ yield {
694
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
695
+ 'object': 'chat.completion.chunk',
696
+ 'created': data.get('workflow_run', {}).get('created_at', 0),
697
+ 'model': f'openai-workflow-{self.workflow_id}',
698
+ 'choices': [{
699
+ 'index': 0,
700
+ 'delta': {},
701
+ 'finish_reason': 'stop'
702
+ }]
703
+ }
704
+
705
+ # Handle workflow.failed event
706
+ elif event_type == 'workflow.failed':
707
+ error_msg = data.get('workflow_run', {}).get('error', 'Unknown error')
708
+ self.logger.error(f"❌ Workflow failed: {json.dumps(error_msg, indent=2)}")
709
+ # Yield error message
710
+ yield {
711
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
712
+ 'object': 'chat.completion.chunk',
713
+ 'created': data.get('workflow_run', {}).get('created_at', 0),
714
+ 'model': f'openai-workflow-{self.workflow_id}',
715
+ 'choices': [{
716
+ 'index': 0,
717
+ 'delta': {
718
+ 'role': 'assistant',
719
+ 'content': f"Workflow error: {error_msg}"
720
+ },
721
+ 'finish_reason': 'stop'
722
+ }]
723
+ }
724
+
725
+ # Handle workflow.node.agent.widget event
726
+ elif event_type == 'workflow.node.agent.widget':
727
+ # Check for widget data in multiple possible locations
728
+ widget_json = data.get('widget')
729
+ widget_data_obj = data.get('data') or data.get('props') or data.get('widget_data')
730
+
731
+ self.logger.debug(f"🎨 Widget event received - widget: {bool(widget_json)}, data: {bool(widget_data_obj)}")
732
+
733
+ if widget_json:
734
+ # Close thinking block if still open (widgets should be outside thinking)
735
+ if self._in_thinking_block:
736
+ self.logger.debug("🧠 Closing thinking block before widget")
737
+ chunk_id += 1
738
+ yield {
739
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
740
+ 'object': 'chat.completion.chunk',
741
+ 'created': data.get('workflow_run', {}).get('created_at', 0),
742
+ 'model': f'openai-workflow-{self.workflow_id}',
743
+ 'choices': [{
744
+ 'index': 0,
745
+ 'delta': {'content': '</think>\n'},
746
+ 'finish_reason': None
747
+ }]
748
+ }
749
+ accumulated_content += '</think>\n'
750
+ self._in_thinking_block = False
751
+
752
+ chunk_id += 1
753
+
754
+ # Extract widget data - prefer explicit data field from event
755
+ widget_data = None
756
+ if widget_data_obj:
757
+ # Widget event contains the data - use it directly
758
+ widget_data = json.dumps(widget_data_obj) if isinstance(widget_data_obj, dict) else str(widget_data_obj)
759
+ self.logger.debug(f"🎨 Using widget data from event (length={len(widget_data)})")
760
+ elif self._widget_data_buffer:
761
+ # Fallback: extract from buffer
762
+ # Look for JSON object at the end of the buffer
763
+ buffer_stripped = self._widget_data_buffer.strip()
764
+ # Remove </think> tag if present in buffer
765
+ buffer_stripped = buffer_stripped.replace('</think>', '').strip()
766
+
767
+ if buffer_stripped.endswith('}'):
768
+ # Find the matching opening brace
769
+ brace_count = 0
770
+ start_idx = -1
771
+ for i in range(len(buffer_stripped) - 1, -1, -1):
772
+ if buffer_stripped[i] == '}':
773
+ brace_count += 1
774
+ elif buffer_stripped[i] == '{':
775
+ brace_count -= 1
776
+ if brace_count == 0:
777
+ start_idx = i
778
+ break
779
+
780
+ if start_idx >= 0:
781
+ try:
782
+ widget_data = buffer_stripped[start_idx:]
783
+ # Validate it's valid JSON
784
+ json.loads(widget_data)
785
+ self.logger.debug(f"🎨 Found widget data in buffer (length={len(widget_data)})")
786
+ except json.JSONDecodeError:
787
+ widget_data = None
788
+
789
+ # Build widget content with data attribute if found
790
+ if widget_data:
791
+ # Escape single quotes in JSON to prevent attribute parsing issues
792
+ escaped_data = widget_data.replace("'", "&#39;")
793
+ widget_content = f"\n<widget kind='openai' data='{escaped_data}'>{widget_json}</widget>\n"
794
+ else:
795
+ widget_content = f"\n<widget kind='openai'>{widget_json}</widget>\n"
796
+
797
+ accumulated_content += widget_content
798
+ self._widget_data_buffer = "" # Clear buffer after widget
799
+
800
+ self.logger.debug(f"🎨 Rendering widget (structure length={len(widget_json)}, has_data={widget_data is not None})")
801
+
802
+ yield {
803
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
804
+ 'object': 'chat.completion.chunk',
805
+ 'created': data.get('workflow_run', {}).get('created_at', 0),
806
+ 'model': f'openai-workflow-{self.workflow_id}',
807
+ 'choices': [{
808
+ 'index': 0,
809
+ 'delta': {'content': widget_content},
810
+ 'finish_reason': None
811
+ }]
812
+ }
813
+
814
+ # Log other events for debugging
815
+ elif event_type in ['workflow.started', 'workflow.node.started', 'workflow.node.finished']:
816
+ self.logger.debug(f"🔄 Workflow event: {event_type}")
817
+
818
+ except json.JSONDecodeError as e:
819
+ self.logger.warning(f"Failed to parse SSE data: {e}")
820
+ continue
821
+
822
+ except httpx.HTTPStatusError as e:
823
+ # Don't try to read response.text on streaming responses
824
+ error_msg = f"HTTP {e.response.status_code}"
825
+ try:
826
+ # Try to read error body if not streaming
827
+ if hasattr(e.response, '_content') and e.response._content is not None:
828
+ error_msg = f"{error_msg} - {e.response.text[:200]}"
829
+ except Exception:
830
+ pass
831
+
832
+ self.logger.error(f"OpenAI workflow API error: {error_msg}")
833
+
834
+ # Yield error message
835
+ yield {
836
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
837
+ 'object': 'chat.completion.chunk',
838
+ 'created': 0,
839
+ 'model': f'openai-workflow-{self.workflow_id}',
840
+ 'choices': [{
841
+ 'index': 0,
842
+ 'delta': {
843
+ 'role': 'assistant',
844
+ 'content': f"Error running workflow: {error_msg}"
845
+ },
846
+ 'finish_reason': 'stop'
847
+ }]
848
+ }
849
+
850
+ except Exception as e:
851
+ self.logger.error(f"Error running OpenAI workflow: {e}", exc_info=True)
852
+ # Yield error message
853
+ yield {
854
+ 'id': f'chatcmpl-wf-{self.workflow_id}',
855
+ 'object': 'chat.completion.chunk',
856
+ 'created': 0,
857
+ 'model': f'openai-workflow-{self.workflow_id}',
858
+ 'choices': [{
859
+ 'index': 0,
860
+ 'delta': {
861
+ 'role': 'assistant',
862
+ 'content': f"Error running workflow: {str(e)}"
863
+ },
864
+ 'finish_reason': 'stop'
865
+ }]
866
+ }
867
+