glaip-sdk 0.6.16__py3-none-any.whl → 0.6.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/client/run_rendering.py +50 -3
- glaip_sdk/hitl/__init__.py +15 -0
- glaip_sdk/hitl/local.py +151 -0
- glaip_sdk/runner/langgraph.py +95 -7
- glaip_sdk/utils/rendering/renderer/base.py +58 -0
- glaip_sdk/utils/tool_storage_provider.py +140 -0
- {glaip_sdk-0.6.16.dist-info → glaip_sdk-0.6.18.dist-info}/METADATA +6 -6
- {glaip_sdk-0.6.16.dist-info → glaip_sdk-0.6.18.dist-info}/RECORD +11 -8
- {glaip_sdk-0.6.16.dist-info → glaip_sdk-0.6.18.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.6.16.dist-info → glaip_sdk-0.6.18.dist-info}/entry_points.txt +0 -0
- {glaip_sdk-0.6.16.dist-info → glaip_sdk-0.6.18.dist-info}/top_level.txt +0 -0
|
@@ -30,6 +30,7 @@ from glaip_sdk.utils.rendering.renderer import (
|
|
|
30
30
|
from glaip_sdk.utils.rendering.state import TranscriptBuffer
|
|
31
31
|
|
|
32
32
|
NO_AGENT_RESPONSE_FALLBACK = "No agent response received."
|
|
33
|
+
_FINAL_EVENT_TYPES = {"final_response", "error", "step_limit_exceeded"}
|
|
33
34
|
|
|
34
35
|
|
|
35
36
|
def _coerce_to_string(value: Any) -> str:
|
|
@@ -156,6 +157,9 @@ class AgentRunRenderingManager:
|
|
|
156
157
|
|
|
157
158
|
if controller and getattr(controller, "enabled", False):
|
|
158
159
|
controller.poll(renderer)
|
|
160
|
+
parsed_event = self._parse_event(event)
|
|
161
|
+
if parsed_event and self._is_final_event(parsed_event):
|
|
162
|
+
break
|
|
159
163
|
finally:
|
|
160
164
|
if controller and getattr(controller, "enabled", False):
|
|
161
165
|
controller.on_stream_complete()
|
|
@@ -225,6 +229,8 @@ class AgentRunRenderingManager:
|
|
|
225
229
|
|
|
226
230
|
if controller and getattr(controller, "enabled", False):
|
|
227
231
|
controller.poll(renderer)
|
|
232
|
+
if parsed_event and self._is_final_event(parsed_event):
|
|
233
|
+
break
|
|
228
234
|
finally:
|
|
229
235
|
if controller and getattr(controller, "enabled", False):
|
|
230
236
|
controller.on_stream_complete()
|
|
@@ -379,9 +385,19 @@ class AgentRunRenderingManager:
|
|
|
379
385
|
if content:
|
|
380
386
|
content_str = str(content)
|
|
381
387
|
if not content_str.startswith("Artifact received:"):
|
|
382
|
-
|
|
388
|
+
kind = self._get_event_kind(ev)
|
|
389
|
+
# Skip accumulating content for status updates and agent steps
|
|
390
|
+
if kind in ("agent_step", "status_update"):
|
|
391
|
+
renderer.on_event(ev)
|
|
392
|
+
return final_text, stats_usage
|
|
393
|
+
|
|
394
|
+
if self._is_token_event(ev):
|
|
383
395
|
renderer.on_event(ev)
|
|
384
|
-
|
|
396
|
+
final_text = f"{final_text}{content_str}"
|
|
397
|
+
else:
|
|
398
|
+
if content_str != last_rendered_content:
|
|
399
|
+
renderer.on_event(ev)
|
|
400
|
+
final_text = content_str
|
|
385
401
|
else:
|
|
386
402
|
renderer.on_event(ev)
|
|
387
403
|
return final_text, stats_usage
|
|
@@ -402,6 +418,33 @@ class AgentRunRenderingManager:
|
|
|
402
418
|
event_type = ev.get("event_type")
|
|
403
419
|
return str(event_type) if event_type else None
|
|
404
420
|
|
|
421
|
+
def _is_token_event(self, ev: dict[str, Any]) -> bool:
|
|
422
|
+
"""Return True when the event represents token streaming output.
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
ev: Parsed event dictionary.
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
True when the event is a token chunk, otherwise False.
|
|
429
|
+
"""
|
|
430
|
+
metadata = ev.get("metadata") or {}
|
|
431
|
+
kind = metadata.get("kind")
|
|
432
|
+
return str(kind).lower() == "token"
|
|
433
|
+
|
|
434
|
+
def _is_final_event(self, ev: dict[str, Any]) -> bool:
|
|
435
|
+
"""Return True when the event marks stream termination.
|
|
436
|
+
|
|
437
|
+
Args:
|
|
438
|
+
ev: Parsed event dictionary.
|
|
439
|
+
|
|
440
|
+
Returns:
|
|
441
|
+
True when the event is terminal, otherwise False.
|
|
442
|
+
"""
|
|
443
|
+
if ev.get("is_final") is True or ev.get("final") is True:
|
|
444
|
+
return True
|
|
445
|
+
kind = self._get_event_kind(ev)
|
|
446
|
+
return kind in _FINAL_EVENT_TYPES
|
|
447
|
+
|
|
405
448
|
def _extract_content_string(self, event: dict[str, Any]) -> str | None:
|
|
406
449
|
"""Extract textual content from a parsed event.
|
|
407
450
|
|
|
@@ -494,7 +537,9 @@ class AgentRunRenderingManager:
|
|
|
494
537
|
if handled is not None:
|
|
495
538
|
return handled
|
|
496
539
|
|
|
497
|
-
|
|
540
|
+
# Only accumulate content for actual content events, not status updates or agent steps
|
|
541
|
+
# Status updates (agent_step) should be rendered but not accumulated in final_text
|
|
542
|
+
if ev.get("content") and kind not in ("agent_step", "status_update"):
|
|
498
543
|
final_text = self._handle_content_event(ev, final_text)
|
|
499
544
|
|
|
500
545
|
return final_text, stats_usage
|
|
@@ -540,6 +585,8 @@ class AgentRunRenderingManager:
|
|
|
540
585
|
"""
|
|
541
586
|
content = ev.get("content", "")
|
|
542
587
|
if not content.startswith("Artifact received:"):
|
|
588
|
+
if self._is_token_event(ev):
|
|
589
|
+
return f"{final_text}{content}"
|
|
543
590
|
return content
|
|
544
591
|
return final_text
|
|
545
592
|
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Human-in-the-Loop (HITL) utilities for glaip-sdk.
|
|
2
|
+
|
|
3
|
+
This package provides utilities for HITL approval workflows in both local
|
|
4
|
+
and remote agent execution modes.
|
|
5
|
+
|
|
6
|
+
For local development, LocalPromptHandler is automatically injected when
|
|
7
|
+
agent_config.hitl_enabled is True. No manual setup required.
|
|
8
|
+
|
|
9
|
+
Authors:
|
|
10
|
+
Putu Ravindra Wiguna (putu.r.wiguna@gdplabs.id)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from glaip_sdk.hitl.local import LocalPromptHandler, PauseResumeCallback
|
|
14
|
+
|
|
15
|
+
__all__ = ["LocalPromptHandler", "PauseResumeCallback"]
|
glaip_sdk/hitl/local.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""Local HITL prompt handler with interactive console support.
|
|
2
|
+
|
|
3
|
+
Author:
|
|
4
|
+
Putu Ravindra Wiguna (putu.r.wiguna@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from aip_agents.agent.hitl.prompt.base import BasePromptHandler
|
|
12
|
+
from aip_agents.schema.hitl import ApprovalDecision, ApprovalDecisionType, ApprovalRequest
|
|
13
|
+
except ImportError as e:
|
|
14
|
+
raise ImportError("aip_agents is required for local HITL. Install with: pip install 'glaip-sdk[local]'") from e
|
|
15
|
+
|
|
16
|
+
from rich.console import Console
|
|
17
|
+
from rich.prompt import Prompt
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class LocalPromptHandler(BasePromptHandler):
|
|
21
|
+
"""Local HITL prompt handler with interactive console prompts.
|
|
22
|
+
|
|
23
|
+
Experimental local HITL implementation with known limitations:
|
|
24
|
+
- Timeouts are not enforced (interactive prompts wait indefinitely)
|
|
25
|
+
- Relies on private renderer methods for pause/resume
|
|
26
|
+
- Only supports interactive terminal environments
|
|
27
|
+
|
|
28
|
+
The key insight from Rich documentation is that Live must be stopped before
|
|
29
|
+
using Prompt/input(), otherwise the input won't render properly.
|
|
30
|
+
|
|
31
|
+
Environment variables:
|
|
32
|
+
GLAIP_HITL_AUTO_APPROVE: Set to "true" (case-insensitive) to auto-approve
|
|
33
|
+
all requests without user interaction. Useful for integration tests and CI.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, *, pause_resume_callback: Any | None = None) -> None:
|
|
37
|
+
"""Initialize the prompt handler.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
pause_resume_callback: Optional callable with pause() and resume() methods
|
|
41
|
+
to control the live renderer during prompts. This is needed because
|
|
42
|
+
Rich Live interferes with Prompt/input().
|
|
43
|
+
"""
|
|
44
|
+
super().__init__()
|
|
45
|
+
self._pause_resume = pause_resume_callback
|
|
46
|
+
self._console = Console()
|
|
47
|
+
|
|
48
|
+
async def prompt_for_decision(
|
|
49
|
+
self,
|
|
50
|
+
request: ApprovalRequest,
|
|
51
|
+
timeout_seconds: int,
|
|
52
|
+
context_keys: list[str] | None = None,
|
|
53
|
+
) -> ApprovalDecision:
|
|
54
|
+
"""Prompt for approval decision with live renderer pause/resume.
|
|
55
|
+
|
|
56
|
+
Supports auto-approval via GLAIP_HITL_AUTO_APPROVE environment variable
|
|
57
|
+
for integration testing and CI environments. Set to "true" (case-insensitive) to enable.
|
|
58
|
+
"""
|
|
59
|
+
_ = (timeout_seconds, context_keys) # Suppress unused parameter warnings.
|
|
60
|
+
|
|
61
|
+
# Check for auto-approve mode (for integration tests/CI)
|
|
62
|
+
auto_approve = os.getenv("GLAIP_HITL_AUTO_APPROVE", "").lower() == "true"
|
|
63
|
+
|
|
64
|
+
if auto_approve:
|
|
65
|
+
# Auto-approve without user interaction
|
|
66
|
+
return ApprovalDecision(
|
|
67
|
+
request_id=request.request_id,
|
|
68
|
+
decision=ApprovalDecisionType.APPROVED,
|
|
69
|
+
operator_input="auto-approved",
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Pause the live renderer if callback is available
|
|
73
|
+
if self._pause_resume:
|
|
74
|
+
self._pause_resume.pause()
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
# POC/MVP: Show what we're approving (still auto-approve for now)
|
|
78
|
+
self._print_request_info(request)
|
|
79
|
+
|
|
80
|
+
# POC/MVP: For testing, we can do actual input here
|
|
81
|
+
# Uncomment to enable real prompting:
|
|
82
|
+
response = Prompt.ask(
|
|
83
|
+
"\n[yellow]Approve this tool call?[/yellow] [dim](y/n/s)[/dim]",
|
|
84
|
+
console=self._console,
|
|
85
|
+
default="y",
|
|
86
|
+
)
|
|
87
|
+
response = response.lower().strip()
|
|
88
|
+
|
|
89
|
+
if response in ("y", "yes"):
|
|
90
|
+
decision = ApprovalDecisionType.APPROVED
|
|
91
|
+
elif response in ("n", "no"):
|
|
92
|
+
decision = ApprovalDecisionType.REJECTED
|
|
93
|
+
else:
|
|
94
|
+
decision = ApprovalDecisionType.SKIPPED
|
|
95
|
+
|
|
96
|
+
return ApprovalDecision(
|
|
97
|
+
request_id=request.request_id,
|
|
98
|
+
decision=decision,
|
|
99
|
+
operator_input=response if decision != ApprovalDecisionType.SKIPPED else None,
|
|
100
|
+
)
|
|
101
|
+
finally:
|
|
102
|
+
# Always resume the live renderer
|
|
103
|
+
if self._pause_resume:
|
|
104
|
+
self._pause_resume.resume()
|
|
105
|
+
|
|
106
|
+
def _print_request_info(self, request: ApprovalRequest) -> None:
|
|
107
|
+
"""Print the approval request information."""
|
|
108
|
+
self._console.print()
|
|
109
|
+
self._console.rule("[yellow]HITL Approval Request[/yellow]", style="yellow")
|
|
110
|
+
|
|
111
|
+
tool_name = request.tool_name or "unknown"
|
|
112
|
+
self._console.print(f"[cyan]Tool:[/cyan] {tool_name}")
|
|
113
|
+
|
|
114
|
+
if hasattr(request, "arguments_preview") and request.arguments_preview:
|
|
115
|
+
self._console.print(f"[cyan]Arguments:[/cyan] {request.arguments_preview}")
|
|
116
|
+
|
|
117
|
+
if request.context:
|
|
118
|
+
self._console.print(f"[dim]Context: {request.context}[/dim]")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class PauseResumeCallback:
|
|
122
|
+
"""Simple callback object for pausing/resuming the live renderer.
|
|
123
|
+
|
|
124
|
+
This allows the LocalPromptHandler to control the renderer without
|
|
125
|
+
directly coupling to the renderer implementation.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
def __init__(self) -> None:
|
|
129
|
+
"""Initialize the callback."""
|
|
130
|
+
self._renderer: Any | None = None
|
|
131
|
+
|
|
132
|
+
def set_renderer(self, renderer: Any) -> None:
|
|
133
|
+
"""Set the renderer instance.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
renderer: RichStreamRenderer instance with pause_live() and resume_live() methods.
|
|
137
|
+
"""
|
|
138
|
+
self._renderer = renderer
|
|
139
|
+
|
|
140
|
+
def pause(self) -> None:
|
|
141
|
+
"""Pause the live renderer before prompting."""
|
|
142
|
+
if self._renderer and hasattr(self._renderer, "_shutdown_live"):
|
|
143
|
+
self._renderer._shutdown_live()
|
|
144
|
+
|
|
145
|
+
def resume(self) -> None:
|
|
146
|
+
"""Resume the live renderer after prompting."""
|
|
147
|
+
if self._renderer and hasattr(self._renderer, "_ensure_live"):
|
|
148
|
+
self._renderer._ensure_live()
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
__all__ = ["LocalPromptHandler", "PauseResumeCallback"]
|
glaip_sdk/runner/langgraph.py
CHANGED
|
@@ -19,18 +19,21 @@ from __future__ import annotations
|
|
|
19
19
|
|
|
20
20
|
import asyncio
|
|
21
21
|
import inspect
|
|
22
|
-
from dataclasses import dataclass
|
|
23
22
|
import logging
|
|
23
|
+
from dataclasses import dataclass
|
|
24
24
|
from typing import TYPE_CHECKING, Any
|
|
25
25
|
|
|
26
|
+
from aip_agents.agent.hitl.manager import ApprovalManager # noqa: PLC0415
|
|
26
27
|
from gllm_core.utils import LoggerManager
|
|
27
28
|
|
|
29
|
+
from glaip_sdk.client.run_rendering import AgentRunRenderingManager
|
|
30
|
+
from glaip_sdk.hitl import LocalPromptHandler, PauseResumeCallback
|
|
28
31
|
from glaip_sdk.runner.base import BaseRunner
|
|
29
32
|
from glaip_sdk.runner.deps import (
|
|
30
33
|
check_local_runtime_available,
|
|
31
34
|
get_local_runtime_missing_message,
|
|
32
35
|
)
|
|
33
|
-
from glaip_sdk.
|
|
36
|
+
from glaip_sdk.utils.tool_storage_provider import build_tool_output_manager
|
|
34
37
|
|
|
35
38
|
if TYPE_CHECKING:
|
|
36
39
|
from langchain_core.messages import BaseMessage
|
|
@@ -251,8 +254,13 @@ class LangGraphRunner(BaseRunner):
|
|
|
251
254
|
if swallow_aip_logs:
|
|
252
255
|
_swallow_aip_logs()
|
|
253
256
|
|
|
257
|
+
# POC/MVP: Create pause/resume callback for interactive HITL input
|
|
258
|
+
pause_resume_callback = PauseResumeCallback()
|
|
259
|
+
|
|
254
260
|
# Build the local LangGraphReactAgent from the glaip_sdk Agent
|
|
255
|
-
local_agent = self.build_langgraph_agent(
|
|
261
|
+
local_agent = self.build_langgraph_agent(
|
|
262
|
+
agent, runtime_config=runtime_config, pause_resume_callback=pause_resume_callback
|
|
263
|
+
)
|
|
256
264
|
|
|
257
265
|
# Convert chat history to LangChain messages for the agent
|
|
258
266
|
langchain_messages = _convert_chat_history_to_messages(chat_history)
|
|
@@ -267,6 +275,10 @@ class LangGraphRunner(BaseRunner):
|
|
|
267
275
|
# Use shared render manager for unified processing
|
|
268
276
|
render_manager = AgentRunRenderingManager(logger)
|
|
269
277
|
renderer = render_manager.create_renderer(kwargs.get("renderer"), verbose=verbose)
|
|
278
|
+
|
|
279
|
+
# POC/MVP: Set renderer on callback so LocalPromptHandler can pause/resume Live
|
|
280
|
+
pause_resume_callback.set_renderer(renderer)
|
|
281
|
+
|
|
270
282
|
meta = render_manager.build_initial_metadata(agent.name, message, kwargs)
|
|
271
283
|
render_manager.start_renderer(renderer, meta)
|
|
272
284
|
|
|
@@ -305,6 +317,9 @@ class LangGraphRunner(BaseRunner):
|
|
|
305
317
|
self,
|
|
306
318
|
agent: Agent,
|
|
307
319
|
runtime_config: dict[str, Any] | None = None,
|
|
320
|
+
shared_tool_output_manager: Any | None = None,
|
|
321
|
+
*,
|
|
322
|
+
pause_resume_callback: Any | None = None,
|
|
308
323
|
) -> Any:
|
|
309
324
|
"""Build a LangGraphReactAgent from a glaip_sdk Agent definition.
|
|
310
325
|
|
|
@@ -312,6 +327,10 @@ class LangGraphRunner(BaseRunner):
|
|
|
312
327
|
agent: The glaip_sdk Agent to convert.
|
|
313
328
|
runtime_config: Optional runtime configuration with tool_configs,
|
|
314
329
|
mcp_configs, agent_config, and agent-specific overrides.
|
|
330
|
+
shared_tool_output_manager: Optional ToolOutputManager to reuse across
|
|
331
|
+
agents with tool_output_sharing enabled.
|
|
332
|
+
pause_resume_callback: Optional callback used to pause/resume the renderer
|
|
333
|
+
during interactive HITL prompts.
|
|
315
334
|
|
|
316
335
|
Returns:
|
|
317
336
|
A configured LangGraphReactAgent instance.
|
|
@@ -333,9 +352,6 @@ class LangGraphRunner(BaseRunner):
|
|
|
333
352
|
adapter = LangChainToolAdapter()
|
|
334
353
|
langchain_tools = adapter.adapt_tools(agent.tools)
|
|
335
354
|
|
|
336
|
-
# Build sub-agents recursively
|
|
337
|
-
sub_agent_instances = self._build_sub_agents(agent.agents, runtime_config)
|
|
338
|
-
|
|
339
355
|
# Normalize runtime config: merge global and agent-specific configs
|
|
340
356
|
normalized_config = self._normalize_runtime_config(runtime_config, agent)
|
|
341
357
|
|
|
@@ -349,6 +365,19 @@ class LangGraphRunner(BaseRunner):
|
|
|
349
365
|
merged_agent_config = self._merge_agent_config(agent, normalized_config)
|
|
350
366
|
agent_config_params, agent_config_kwargs = self._apply_agent_config(merged_agent_config)
|
|
351
367
|
|
|
368
|
+
tool_output_manager = self._resolve_tool_output_manager(
|
|
369
|
+
agent,
|
|
370
|
+
merged_agent_config,
|
|
371
|
+
shared_tool_output_manager,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
# Build sub-agents recursively, sharing tool output manager when enabled.
|
|
375
|
+
sub_agent_instances = self._build_sub_agents(
|
|
376
|
+
agent.agents,
|
|
377
|
+
runtime_config,
|
|
378
|
+
shared_tool_output_manager=tool_output_manager,
|
|
379
|
+
)
|
|
380
|
+
|
|
352
381
|
# Build the LangGraphReactAgent with tools, sub-agents, and configs
|
|
353
382
|
local_agent = LangGraphReactAgent(
|
|
354
383
|
name=agent.name,
|
|
@@ -358,6 +387,7 @@ class LangGraphRunner(BaseRunner):
|
|
|
358
387
|
tools=langchain_tools,
|
|
359
388
|
agents=sub_agent_instances if sub_agent_instances else None,
|
|
360
389
|
tool_configs=tool_configs if tool_configs else None,
|
|
390
|
+
tool_output_manager=tool_output_manager,
|
|
361
391
|
**agent_config_params,
|
|
362
392
|
**agent_config_kwargs,
|
|
363
393
|
)
|
|
@@ -365,6 +395,11 @@ class LangGraphRunner(BaseRunner):
|
|
|
365
395
|
# Add MCP servers if configured
|
|
366
396
|
self._add_mcp_servers(local_agent, agent, mcp_configs)
|
|
367
397
|
|
|
398
|
+
# Inject local HITL manager only if hitl_enabled is True (master switch).
|
|
399
|
+
# This matches remote behavior: hitl_enabled gates the HITL plumbing.
|
|
400
|
+
# Tool-level HITL configs are only enforced when hitl_enabled=True.
|
|
401
|
+
self._inject_hitl_manager(local_agent, merged_agent_config, agent.name, pause_resume_callback)
|
|
402
|
+
|
|
368
403
|
logger.debug(
|
|
369
404
|
"Built local LangGraphReactAgent for agent '%s' with %d tools, %d sub-agents, and %d MCPs",
|
|
370
405
|
agent.name,
|
|
@@ -374,16 +409,60 @@ class LangGraphRunner(BaseRunner):
|
|
|
374
409
|
)
|
|
375
410
|
return local_agent
|
|
376
411
|
|
|
412
|
+
def _resolve_tool_output_manager(
|
|
413
|
+
self,
|
|
414
|
+
agent: Agent,
|
|
415
|
+
merged_agent_config: dict[str, Any],
|
|
416
|
+
shared_tool_output_manager: Any | None,
|
|
417
|
+
) -> Any | None:
|
|
418
|
+
"""Resolve tool output manager for local agent execution."""
|
|
419
|
+
tool_output_sharing_enabled = merged_agent_config.get("tool_output_sharing", False)
|
|
420
|
+
if not tool_output_sharing_enabled:
|
|
421
|
+
return None
|
|
422
|
+
if shared_tool_output_manager is not None:
|
|
423
|
+
return shared_tool_output_manager
|
|
424
|
+
return build_tool_output_manager(agent.name, merged_agent_config)
|
|
425
|
+
|
|
426
|
+
def _inject_hitl_manager(
|
|
427
|
+
self,
|
|
428
|
+
local_agent: Any,
|
|
429
|
+
merged_agent_config: dict[str, Any],
|
|
430
|
+
agent_name: str,
|
|
431
|
+
pause_resume_callback: Any | None,
|
|
432
|
+
) -> None:
|
|
433
|
+
"""Inject HITL manager when enabled, mirroring remote gating behavior."""
|
|
434
|
+
hitl_enabled = merged_agent_config.get("hitl_enabled", False)
|
|
435
|
+
if hitl_enabled:
|
|
436
|
+
try:
|
|
437
|
+
local_agent.hitl_manager = ApprovalManager(
|
|
438
|
+
prompt_handler=LocalPromptHandler(pause_resume_callback=pause_resume_callback)
|
|
439
|
+
)
|
|
440
|
+
# Store callback reference for setting renderer later
|
|
441
|
+
if pause_resume_callback:
|
|
442
|
+
local_agent._pause_resume_callback = pause_resume_callback
|
|
443
|
+
logger.debug("HITL manager injected for agent '%s' (hitl_enabled=True)", agent_name)
|
|
444
|
+
except ImportError as e:
|
|
445
|
+
# Missing dependencies - fail fast
|
|
446
|
+
raise ImportError("Local HITL requires aip_agents. Install with: pip install 'glaip-sdk[local]'") from e
|
|
447
|
+
except Exception as e:
|
|
448
|
+
# Other errors during HITL setup - fail fast
|
|
449
|
+
raise RuntimeError(f"Failed to initialize HITL manager for agent '{agent_name}'") from e
|
|
450
|
+
else:
|
|
451
|
+
logger.debug("HITL manager not injected for agent '%s' (hitl_enabled=False)", agent_name)
|
|
452
|
+
|
|
377
453
|
def _build_sub_agents(
|
|
378
454
|
self,
|
|
379
455
|
sub_agents: list[Any] | None,
|
|
380
456
|
runtime_config: dict[str, Any] | None,
|
|
457
|
+
shared_tool_output_manager: Any | None = None,
|
|
381
458
|
) -> list[Any]:
|
|
382
459
|
"""Build sub-agent instances recursively.
|
|
383
460
|
|
|
384
461
|
Args:
|
|
385
462
|
sub_agents: List of sub-agent definitions.
|
|
386
463
|
runtime_config: Runtime config to pass to sub-agents.
|
|
464
|
+
shared_tool_output_manager: Optional ToolOutputManager to reuse across
|
|
465
|
+
agents with tool_output_sharing enabled.
|
|
387
466
|
|
|
388
467
|
Returns:
|
|
389
468
|
List of built sub-agent instances.
|
|
@@ -397,7 +476,13 @@ class LangGraphRunner(BaseRunner):
|
|
|
397
476
|
sub_agent_instances = []
|
|
398
477
|
for sub_agent in sub_agents:
|
|
399
478
|
self._validate_sub_agent_for_local_mode(sub_agent)
|
|
400
|
-
sub_agent_instances.append(
|
|
479
|
+
sub_agent_instances.append(
|
|
480
|
+
self.build_langgraph_agent(
|
|
481
|
+
sub_agent,
|
|
482
|
+
runtime_config,
|
|
483
|
+
shared_tool_output_manager=shared_tool_output_manager,
|
|
484
|
+
)
|
|
485
|
+
)
|
|
401
486
|
return sub_agent_instances
|
|
402
487
|
|
|
403
488
|
def _add_mcp_servers(
|
|
@@ -644,6 +729,9 @@ class LangGraphRunner(BaseRunner):
|
|
|
644
729
|
if "planning" in agent_config:
|
|
645
730
|
direct_params["planning"] = agent_config["planning"]
|
|
646
731
|
|
|
732
|
+
if "enable_a2a_token_streaming" in agent_config:
|
|
733
|
+
direct_params["enable_a2a_token_streaming"] = agent_config["enable_a2a_token_streaming"]
|
|
734
|
+
|
|
647
735
|
# Kwargs parameters (passed through **kwargs to BaseAgent)
|
|
648
736
|
if "enable_pii" in agent_config:
|
|
649
737
|
kwargs_params["enable_pii"] = agent_config["enable_pii"]
|
|
@@ -8,6 +8,7 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import logging
|
|
11
|
+
import sys
|
|
11
12
|
from datetime import datetime, timezone
|
|
12
13
|
from time import monotonic
|
|
13
14
|
from typing import Any
|
|
@@ -349,6 +350,9 @@ class RichStreamRenderer(TranscriptModeMixin):
|
|
|
349
350
|
self._handle_status_event(ev)
|
|
350
351
|
elif kind == "content":
|
|
351
352
|
self._handle_content_event(content)
|
|
353
|
+
elif kind == "token":
|
|
354
|
+
# Token events should stream content incrementally with immediate console output
|
|
355
|
+
self._handle_token_event(content)
|
|
352
356
|
elif kind == "final_response":
|
|
353
357
|
self._handle_final_response_event(content, metadata)
|
|
354
358
|
elif kind in {"agent_step", "agent_thinking_step"}:
|
|
@@ -368,6 +372,31 @@ class RichStreamRenderer(TranscriptModeMixin):
|
|
|
368
372
|
self.state.append_transcript_text(content)
|
|
369
373
|
self._ensure_live()
|
|
370
374
|
|
|
375
|
+
def _handle_token_event(self, content: str) -> None:
|
|
376
|
+
"""Handle token streaming events - print immediately for real-time streaming."""
|
|
377
|
+
if content:
|
|
378
|
+
self.state.append_transcript_text(content)
|
|
379
|
+
# Print token content directly to stdout for immediate visibility when not verbose
|
|
380
|
+
# This bypasses Rich's Live display which has refresh rate limitations
|
|
381
|
+
if not self.verbose:
|
|
382
|
+
try:
|
|
383
|
+
# Mark that we're streaming tokens directly to prevent Live display from starting
|
|
384
|
+
self._streaming_tokens_directly = True
|
|
385
|
+
# Stop Live display if active to prevent it from intercepting stdout
|
|
386
|
+
# and causing each token to appear on a new line
|
|
387
|
+
if self.live is not None:
|
|
388
|
+
self._stop_live_display()
|
|
389
|
+
# Write directly to stdout - tokens will stream on the same line
|
|
390
|
+
# since we're bypassing Rich's console which adds newlines
|
|
391
|
+
sys.stdout.write(content)
|
|
392
|
+
sys.stdout.flush()
|
|
393
|
+
except Exception:
|
|
394
|
+
# Fallback to live display if direct write fails
|
|
395
|
+
self._ensure_live()
|
|
396
|
+
else:
|
|
397
|
+
# In verbose mode, use normal live display (debug panels handle the output)
|
|
398
|
+
self._ensure_live()
|
|
399
|
+
|
|
371
400
|
def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
|
|
372
401
|
"""Handle final response events."""
|
|
373
402
|
if content:
|
|
@@ -521,6 +550,18 @@ class RichStreamRenderer(TranscriptModeMixin):
|
|
|
521
550
|
if getattr(self, "_transcript_mode_enabled", False):
|
|
522
551
|
return
|
|
523
552
|
|
|
553
|
+
# When verbose=False and tokens were streamed directly, skip final panel
|
|
554
|
+
# The user's script will print the final result, avoiding duplication
|
|
555
|
+
if not self.verbose and getattr(self, "_streaming_tokens_directly", False):
|
|
556
|
+
# Add a newline after streaming tokens for clean separation
|
|
557
|
+
try:
|
|
558
|
+
sys.stdout.write("\n")
|
|
559
|
+
sys.stdout.flush()
|
|
560
|
+
except Exception:
|
|
561
|
+
pass
|
|
562
|
+
self.state.printed_final_output = True
|
|
563
|
+
return
|
|
564
|
+
|
|
524
565
|
if self.verbose:
|
|
525
566
|
panel = build_final_panel(
|
|
526
567
|
self.state,
|
|
@@ -597,6 +638,19 @@ class RichStreamRenderer(TranscriptModeMixin):
|
|
|
597
638
|
|
|
598
639
|
def _finalize_display(self) -> None:
|
|
599
640
|
"""Finalize live display and render final output."""
|
|
641
|
+
# When verbose=False and tokens were streamed directly, skip live display updates
|
|
642
|
+
# to avoid showing duplicate final result
|
|
643
|
+
if not self.verbose and getattr(self, "_streaming_tokens_directly", False):
|
|
644
|
+
# Just add a newline after streaming tokens for clean separation
|
|
645
|
+
try:
|
|
646
|
+
sys.stdout.write("\n")
|
|
647
|
+
sys.stdout.flush()
|
|
648
|
+
except Exception:
|
|
649
|
+
pass
|
|
650
|
+
self._stop_live_display()
|
|
651
|
+
self.state.printed_final_output = True
|
|
652
|
+
return
|
|
653
|
+
|
|
600
654
|
# Final refresh
|
|
601
655
|
self._ensure_live()
|
|
602
656
|
|
|
@@ -629,6 +683,10 @@ class RichStreamRenderer(TranscriptModeMixin):
|
|
|
629
683
|
"""Ensure live display is updated."""
|
|
630
684
|
if getattr(self, "_transcript_mode_enabled", False):
|
|
631
685
|
return
|
|
686
|
+
# When verbose=False, don't start Live display if we're streaming tokens directly
|
|
687
|
+
# This prevents Live from intercepting stdout and causing tokens to appear on separate lines
|
|
688
|
+
if not self.verbose and getattr(self, "_streaming_tokens_directly", False):
|
|
689
|
+
return
|
|
632
690
|
if not self._ensure_live_stack():
|
|
633
691
|
return
|
|
634
692
|
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""Helpers for local tool output storage setup.
|
|
2
|
+
|
|
3
|
+
This module bridges agent_config.tool_output_sharing to ToolOutputManager
|
|
4
|
+
for local execution without modifying aip-agents.
|
|
5
|
+
|
|
6
|
+
Authors:
|
|
7
|
+
Fachriza Adhiatma (fachriza.d.adhiatma@gdplabs.id)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from gllm_core.utils import LoggerManager
|
|
16
|
+
|
|
17
|
+
logger = LoggerManager().get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def build_tool_output_manager(agent_name: str, agent_config: dict[str, Any]) -> Any | None:
|
|
21
|
+
"""Build a ToolOutputManager for local tool output sharing.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
agent_name: Name of the agent whose tool outputs will be stored.
|
|
25
|
+
agent_config: Agent configuration that may enable tool output sharing and contain task_id.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
A ToolOutputManager instance when tool output sharing is enabled and
|
|
29
|
+
dependencies are available, otherwise ``None``.
|
|
30
|
+
"""
|
|
31
|
+
tool_output_sharing_enabled = agent_config.get("tool_output_sharing", False)
|
|
32
|
+
if not tool_output_sharing_enabled:
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
from aip_agents.storage.clients.minio_client import MinioConfig, MinioObjectStorage # noqa: PLC0415
|
|
37
|
+
from aip_agents.storage.providers.memory import InMemoryStorageProvider # noqa: PLC0415
|
|
38
|
+
from aip_agents.storage.providers.object_storage import ObjectStorageProvider # noqa: PLC0415
|
|
39
|
+
from aip_agents.utils.langgraph.tool_output_management import ( # noqa: PLC0415
|
|
40
|
+
ToolOutputConfig,
|
|
41
|
+
ToolOutputManager,
|
|
42
|
+
)
|
|
43
|
+
except ImportError:
|
|
44
|
+
logger.warning("Tool output sharing requested but aip-agents is unavailable; skipping.")
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
task_id = agent_config.get("task_id")
|
|
48
|
+
|
|
49
|
+
storage_provider = _build_tool_output_storage_provider(
|
|
50
|
+
agent_name=agent_name,
|
|
51
|
+
task_id=task_id,
|
|
52
|
+
minio_config_cls=MinioConfig,
|
|
53
|
+
minio_client_cls=MinioObjectStorage,
|
|
54
|
+
object_storage_provider_cls=ObjectStorageProvider,
|
|
55
|
+
memory_storage_provider_cls=InMemoryStorageProvider,
|
|
56
|
+
)
|
|
57
|
+
tool_output_config = _build_tool_output_config(storage_provider, ToolOutputConfig)
|
|
58
|
+
return ToolOutputManager(tool_output_config)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _build_tool_output_storage_provider(
|
|
62
|
+
agent_name: str,
|
|
63
|
+
task_id: str | None,
|
|
64
|
+
minio_config_cls: Any,
|
|
65
|
+
minio_client_cls: Any,
|
|
66
|
+
object_storage_provider_cls: Any,
|
|
67
|
+
memory_storage_provider_cls: Any,
|
|
68
|
+
) -> Any:
|
|
69
|
+
"""Create a storage provider for tool output sharing.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
agent_name: Name of the agent whose tool outputs are stored.
|
|
73
|
+
task_id: Optional task identifier for coordination context.
|
|
74
|
+
minio_config_cls: Class exposing a ``from_env`` constructor for MinIO config.
|
|
75
|
+
minio_client_cls: MinIO client class used to talk to the object store.
|
|
76
|
+
object_storage_provider_cls: Storage provider wrapping the MinIO client.
|
|
77
|
+
memory_storage_provider_cls: In-memory provider used as a fallback.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
An instance of ``object_storage_provider_cls`` when MinIO initialization
|
|
81
|
+
succeeds, otherwise an instance of ``memory_storage_provider_cls``.
|
|
82
|
+
"""
|
|
83
|
+
try:
|
|
84
|
+
config_obj = minio_config_cls.from_env()
|
|
85
|
+
minio_client = minio_client_cls(config=config_obj)
|
|
86
|
+
prefix = _build_tool_output_prefix(agent_name, task_id)
|
|
87
|
+
return object_storage_provider_cls(client=minio_client, prefix=prefix, use_json=False)
|
|
88
|
+
except Exception as exc:
|
|
89
|
+
logger.warning("Failed to initialize MinIO for tool outputs: %s. Using in-memory storage.", exc)
|
|
90
|
+
return memory_storage_provider_cls()
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _build_tool_output_prefix(agent_name: str, task_id: str | None) -> str:
|
|
94
|
+
"""Build object storage prefix for tool outputs in local mode.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
agent_name: Name of the agent whose outputs are stored.
|
|
98
|
+
task_id: Optional task identifier for coordination context.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Object storage key prefix dedicated to the provided agent.
|
|
102
|
+
"""
|
|
103
|
+
if task_id:
|
|
104
|
+
return f"tool-outputs/tasks/{task_id}/agents/{agent_name}/"
|
|
105
|
+
return f"tool-outputs/agents/{agent_name}/"
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _build_tool_output_config(storage_provider: Any, config_cls: Any) -> Any:
|
|
109
|
+
"""Build ToolOutputConfig using env vars, with safe defaults.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
storage_provider: Provider that will persist tool outputs.
|
|
113
|
+
config_cls: Tool output configuration class to instantiate.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
A configured ``config_cls`` instance ready for ToolOutputManager use.
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
def safe_int_conversion(env_var: str, default: str) -> int:
|
|
120
|
+
"""Convert an environment variable to int with a fallback default.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
env_var: Environment variable name to read.
|
|
124
|
+
default: Default string value used when parsing fails.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Integer representation of the environment variable or the default.
|
|
128
|
+
"""
|
|
129
|
+
try:
|
|
130
|
+
return int(os.getenv(env_var, default))
|
|
131
|
+
except (ValueError, TypeError):
|
|
132
|
+
logger.warning("Invalid value for %s, using default: %s", env_var, default)
|
|
133
|
+
return int(default)
|
|
134
|
+
|
|
135
|
+
return config_cls(
|
|
136
|
+
max_stored_outputs=safe_int_conversion("TOOL_OUTPUT_MAX_STORED", "200"),
|
|
137
|
+
max_age_minutes=safe_int_conversion("TOOL_OUTPUT_MAX_AGE_MINUTES", str(24 * 60)),
|
|
138
|
+
cleanup_interval=safe_int_conversion("TOOL_OUTPUT_CLEANUP_INTERVAL", "50"),
|
|
139
|
+
storage_provider=storage_provider,
|
|
140
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: glaip-sdk
|
|
3
|
-
Version: 0.6.
|
|
3
|
+
Version: 0.6.18
|
|
4
4
|
Summary: Python SDK for GL AIP (GDP Labs AI Agent Package) - Simplified CLI Design
|
|
5
5
|
Author-email: Raymond Christopher <raymond.christopher@gdplabs.id>
|
|
6
6
|
License: MIT
|
|
@@ -19,11 +19,11 @@ Requires-Dist: textual>=0.52.0
|
|
|
19
19
|
Requires-Dist: gllm-core-binary>=0.1.0
|
|
20
20
|
Requires-Dist: langchain-core>=0.3.0
|
|
21
21
|
Requires-Dist: gllm-tools-binary>=0.1.3
|
|
22
|
-
Requires-Dist: aip-agents-binary[local]>=0.5.
|
|
22
|
+
Requires-Dist: aip-agents-binary[local]>=0.5.13
|
|
23
23
|
Provides-Extra: memory
|
|
24
|
-
Requires-Dist: aip-agents-binary[memory]>=0.5.
|
|
24
|
+
Requires-Dist: aip-agents-binary[memory]>=0.5.13; (python_version >= "3.11" and python_version < "3.13") and extra == "memory"
|
|
25
25
|
Provides-Extra: privacy
|
|
26
|
-
Requires-Dist: aip-agents-binary[privacy]>=0.5.
|
|
26
|
+
Requires-Dist: aip-agents-binary[privacy]>=0.5.13; (python_version >= "3.11" and python_version < "3.13") and extra == "privacy"
|
|
27
27
|
Requires-Dist: en-core-web-sm; extra == "privacy"
|
|
28
28
|
Provides-Extra: dev
|
|
29
29
|
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
@@ -37,7 +37,7 @@ Requires-Dist: ruff>=0.14.0; extra == "dev"
|
|
|
37
37
|
|
|
38
38
|
# GL AIP — GDP Labs AI Agents Package
|
|
39
39
|
|
|
40
|
-
[](https://www.python.org/downloads/)
|
|
41
41
|
[](https://github.com/psf/black)
|
|
42
42
|
|
|
43
43
|
GL stands for **GDP Labs**—GL AIP is our AI Agents Package for building, running, and operating agents.
|
|
@@ -56,7 +56,7 @@ pip install --upgrade glaip-sdk
|
|
|
56
56
|
uv tool install glaip-sdk
|
|
57
57
|
```
|
|
58
58
|
|
|
59
|
-
**Requirements**: Python 3.
|
|
59
|
+
**Requirements**: Python 3.11 or 3.12
|
|
60
60
|
|
|
61
61
|
## 🐍 Hello World - Python SDK
|
|
62
62
|
|
|
@@ -69,11 +69,13 @@ glaip_sdk/client/agents.py,sha256=75uDLN85Smf67rw-jFhlVKyiToicAfcFyJHSvWJkAww,47
|
|
|
69
69
|
glaip_sdk/client/base.py,sha256=BhNaC2TJJ2jVWRTYmfxD3WjYgAyIuWNz9YURdNXXjJo,18245
|
|
70
70
|
glaip_sdk/client/main.py,sha256=RTREAOgGouYm4lFKkpNBQ9dmxalnBsIpSSaQLWVFSmU,9054
|
|
71
71
|
glaip_sdk/client/mcps.py,sha256=gFRuLOGeh6ieIhR4PeD6yNVT6NhvUMTqPq9iuu1vkAY,13019
|
|
72
|
-
glaip_sdk/client/run_rendering.py,sha256=
|
|
72
|
+
glaip_sdk/client/run_rendering.py,sha256=kERp78v50jojsNWHrjNEkbC8sgOpMacaqUdw5YZuK6A,26074
|
|
73
73
|
glaip_sdk/client/shared.py,sha256=esHlsR0LEfL-pFDaWebQjKKOLl09jsRY-2pllBUn4nU,522
|
|
74
74
|
glaip_sdk/client/tools.py,sha256=kK0rBwX1e_5AlGQRjlO6rNz6gDlohhXWdlxN9AwotdE,22585
|
|
75
75
|
glaip_sdk/client/validators.py,sha256=ioF9VCs-LG2yLkaRDd7Hff74lojDZZ0_Q3CiLbdm1RY,8381
|
|
76
76
|
glaip_sdk/config/constants.py,sha256=Y03c6op0e7K0jTQ8bmWXhWAqsnjWxkAhWniq8Z0iEKY,1081
|
|
77
|
+
glaip_sdk/hitl/__init__.py,sha256=sg92Rpu8_vJIGi1ZEhx0-qWa1nGdvfrKyJAxtoDSKzo,494
|
|
78
|
+
glaip_sdk/hitl/local.py,sha256=rzmaRK15BxgRX7cmklUcGQUotMYg8x2Gd9BWf39k6hw,5661
|
|
77
79
|
glaip_sdk/mcps/__init__.py,sha256=4jYrt8K__oxrxexHRcmnRBXt-W_tbJN61H9Kf2lVh4Q,551
|
|
78
80
|
glaip_sdk/mcps/base.py,sha256=jWwHjDF67_mtDGRp9p5SolANjVeB8jt1PSwPBtX876M,11654
|
|
79
81
|
glaip_sdk/models/__init__.py,sha256=-qO4Yr1-fkyaYC9RcT3nYhplDjoXATrIFZr4JrqflHI,2577
|
|
@@ -92,7 +94,7 @@ glaip_sdk/registry/tool.py,sha256=rxrVxnO_VwO6E5kccqxxEUC337J9qbKpje-Gwl5a3sY,76
|
|
|
92
94
|
glaip_sdk/runner/__init__.py,sha256=8RrngoGfpF8x9X27RPdX4gJjch75ZvhtVt_6UV0ULLQ,1615
|
|
93
95
|
glaip_sdk/runner/base.py,sha256=KIjcSAyDCP9_mn2H4rXR5gu1FZlwD9pe0gkTBmr6Yi4,2663
|
|
94
96
|
glaip_sdk/runner/deps.py,sha256=Du3hr2R5RHOYCRAv7RVmx661x-ayVXIeZ8JD7ODirTA,3884
|
|
95
|
-
glaip_sdk/runner/langgraph.py,sha256
|
|
97
|
+
glaip_sdk/runner/langgraph.py,sha256=-3BMJRww3S3dboS3uyR3QrxV-3p-1i2X5ObxdTTGRdg,32955
|
|
96
98
|
glaip_sdk/runner/mcp_adapter/__init__.py,sha256=Rdttfg3N6kg3-DaTCKqaGXKByZyBt0Mwf6FV8s_5kI8,462
|
|
97
99
|
glaip_sdk/runner/mcp_adapter/base_mcp_adapter.py,sha256=ic56fKgb3zgVZZQm3ClWUZi7pE1t4EVq8mOg6AM6hdA,1374
|
|
98
100
|
glaip_sdk/runner/mcp_adapter/langchain_mcp_adapter.py,sha256=b58GuadPz7q7aXoJyTYs0eeJ_oqp-wLR1tcr_5cbV1s,9723
|
|
@@ -121,6 +123,7 @@ glaip_sdk/utils/runtime_config.py,sha256=Gl9-CQ4lYZ39vRSgtdfcSU3CXshVDDuTOdSzjvs
|
|
|
121
123
|
glaip_sdk/utils/serialization.py,sha256=z-qpvWLSBrGK3wbUclcA1UIKLXJedTnMSwPdq-FF4lo,13308
|
|
122
124
|
glaip_sdk/utils/sync.py,sha256=3VKqs1UfNGWSobgRXohBKP7mMMzdUW3SU0bJQ1uxOgw,4872
|
|
123
125
|
glaip_sdk/utils/tool_detection.py,sha256=g410GNug_PhLye8rd9UU-LVFIKq3jHPbmSItEkLxPTc,807
|
|
126
|
+
glaip_sdk/utils/tool_storage_provider.py,sha256=lampwUeWu4Uy8nBG7C4ZT-M6AHoWZS0m67HdLx21VDg,5396
|
|
124
127
|
glaip_sdk/utils/validation.py,sha256=hB_k3lvHdIFUiSwHStrC0Eqnhx0OG2UvwqASeem0HuQ,6859
|
|
125
128
|
glaip_sdk/utils/a2a/__init__.py,sha256=_X8AvDOsHeppo5n7rP5TeisVxlAdkZDTFReBk_9lmxo,876
|
|
126
129
|
glaip_sdk/utils/a2a/event_processor.py,sha256=9Mjvvd4_4VDYeOkAI7_vF7N7_Dn0Kn23ramKyK32b3c,5993
|
|
@@ -136,7 +139,7 @@ glaip_sdk/utils/rendering/layout/progress.py,sha256=GhOhUPNQd8-e6JxTJsV76s6wIYht
|
|
|
136
139
|
glaip_sdk/utils/rendering/layout/summary.py,sha256=K-gkDxwUxF67-4nF20y6hv95QEwRZCQI9Eb4KbA8eQY,2325
|
|
137
140
|
glaip_sdk/utils/rendering/layout/transcript.py,sha256=vbfywtbWCDzLY9B5Vvf4crhomftFq-UEz7zqySiLrD8,19052
|
|
138
141
|
glaip_sdk/utils/rendering/renderer/__init__.py,sha256=lpf0GnNGcPb8gq_hJM6Puflwy3eTigVK9qXP01nWRv0,1754
|
|
139
|
-
glaip_sdk/utils/rendering/renderer/base.py,sha256=
|
|
142
|
+
glaip_sdk/utils/rendering/renderer/base.py,sha256=CpkkwiTmJHi8j2EGBva7WBpVWNte0VoDGgF6UbiJ9J8,41929
|
|
140
143
|
glaip_sdk/utils/rendering/renderer/config.py,sha256=FgSAZpG1g7Atm2MXg0tY0lOEciY90MR-RO6YuGFhp0E,626
|
|
141
144
|
glaip_sdk/utils/rendering/renderer/console.py,sha256=4cLOw4Q1fkHkApuj6dWW8eYpeYdcT0t2SO5MbVt5UTc,1844
|
|
142
145
|
glaip_sdk/utils/rendering/renderer/debug.py,sha256=qyqFXltYzKEqajwlu8QFSBU3P46JzMzIZqurejhx14o,5907
|
|
@@ -153,8 +156,8 @@ glaip_sdk/utils/rendering/steps/format.py,sha256=Chnq7OBaj8XMeBntSBxrX5zSmrYeGcO
|
|
|
153
156
|
glaip_sdk/utils/rendering/steps/manager.py,sha256=BiBmTeQMQhjRMykgICXsXNYh1hGsss-fH9BIGVMWFi0,13194
|
|
154
157
|
glaip_sdk/utils/rendering/viewer/__init__.py,sha256=XrxmE2cMAozqrzo1jtDFm8HqNtvDcYi2mAhXLXn5CjI,457
|
|
155
158
|
glaip_sdk/utils/rendering/viewer/presenter.py,sha256=mlLMTjnyeyPVtsyrAbz1BJu9lFGQSlS-voZ-_Cuugv0,5725
|
|
156
|
-
glaip_sdk-0.6.
|
|
157
|
-
glaip_sdk-0.6.
|
|
158
|
-
glaip_sdk-0.6.
|
|
159
|
-
glaip_sdk-0.6.
|
|
160
|
-
glaip_sdk-0.6.
|
|
159
|
+
glaip_sdk-0.6.18.dist-info/METADATA,sha256=cPfNrNuml65FqIP1ZhrAp2AEiiipu3OADHcT345uI6g,7716
|
|
160
|
+
glaip_sdk-0.6.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
161
|
+
glaip_sdk-0.6.18.dist-info/entry_points.txt,sha256=65vNPUggyYnVGhuw7RhNJ8Fp2jygTcX0yxJBcBY3iLU,48
|
|
162
|
+
glaip_sdk-0.6.18.dist-info/top_level.txt,sha256=td7yXttiYX2s94-4wFhv-5KdT0rSZ-pnJRSire341hw,10
|
|
163
|
+
glaip_sdk-0.6.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|