hexdag 0.5.0.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hexdag/__init__.py +116 -0
- hexdag/__main__.py +30 -0
- hexdag/adapters/executors/__init__.py +5 -0
- hexdag/adapters/executors/local_executor.py +316 -0
- hexdag/builtin/__init__.py +6 -0
- hexdag/builtin/adapters/__init__.py +51 -0
- hexdag/builtin/adapters/anthropic/__init__.py +5 -0
- hexdag/builtin/adapters/anthropic/anthropic_adapter.py +151 -0
- hexdag/builtin/adapters/database/__init__.py +6 -0
- hexdag/builtin/adapters/database/csv/csv_adapter.py +249 -0
- hexdag/builtin/adapters/database/pgvector/__init__.py +5 -0
- hexdag/builtin/adapters/database/pgvector/pgvector_adapter.py +478 -0
- hexdag/builtin/adapters/database/sqlalchemy/sqlalchemy_adapter.py +252 -0
- hexdag/builtin/adapters/database/sqlite/__init__.py +5 -0
- hexdag/builtin/adapters/database/sqlite/sqlite_adapter.py +410 -0
- hexdag/builtin/adapters/local/README.md +59 -0
- hexdag/builtin/adapters/local/__init__.py +7 -0
- hexdag/builtin/adapters/local/local_observer_manager.py +696 -0
- hexdag/builtin/adapters/memory/__init__.py +47 -0
- hexdag/builtin/adapters/memory/file_memory_adapter.py +297 -0
- hexdag/builtin/adapters/memory/in_memory_memory.py +216 -0
- hexdag/builtin/adapters/memory/schemas.py +57 -0
- hexdag/builtin/adapters/memory/session_memory.py +178 -0
- hexdag/builtin/adapters/memory/sqlite_memory_adapter.py +215 -0
- hexdag/builtin/adapters/memory/state_memory.py +280 -0
- hexdag/builtin/adapters/mock/README.md +89 -0
- hexdag/builtin/adapters/mock/__init__.py +15 -0
- hexdag/builtin/adapters/mock/hexdag.toml +50 -0
- hexdag/builtin/adapters/mock/mock_database.py +225 -0
- hexdag/builtin/adapters/mock/mock_embedding.py +223 -0
- hexdag/builtin/adapters/mock/mock_llm.py +177 -0
- hexdag/builtin/adapters/mock/mock_tool_adapter.py +192 -0
- hexdag/builtin/adapters/mock/mock_tool_router.py +232 -0
- hexdag/builtin/adapters/openai/__init__.py +5 -0
- hexdag/builtin/adapters/openai/openai_adapter.py +634 -0
- hexdag/builtin/adapters/secret/__init__.py +7 -0
- hexdag/builtin/adapters/secret/local_secret_adapter.py +248 -0
- hexdag/builtin/adapters/unified_tool_router.py +280 -0
- hexdag/builtin/macros/__init__.py +17 -0
- hexdag/builtin/macros/conversation_agent.py +390 -0
- hexdag/builtin/macros/llm_macro.py +151 -0
- hexdag/builtin/macros/reasoning_agent.py +423 -0
- hexdag/builtin/macros/tool_macro.py +380 -0
- hexdag/builtin/nodes/__init__.py +38 -0
- hexdag/builtin/nodes/_discovery.py +123 -0
- hexdag/builtin/nodes/agent_node.py +696 -0
- hexdag/builtin/nodes/base_node_factory.py +242 -0
- hexdag/builtin/nodes/composite_node.py +926 -0
- hexdag/builtin/nodes/data_node.py +201 -0
- hexdag/builtin/nodes/expression_node.py +487 -0
- hexdag/builtin/nodes/function_node.py +454 -0
- hexdag/builtin/nodes/llm_node.py +491 -0
- hexdag/builtin/nodes/loop_node.py +920 -0
- hexdag/builtin/nodes/mapped_input.py +518 -0
- hexdag/builtin/nodes/port_call_node.py +269 -0
- hexdag/builtin/nodes/tool_call_node.py +195 -0
- hexdag/builtin/nodes/tool_utils.py +390 -0
- hexdag/builtin/prompts/__init__.py +68 -0
- hexdag/builtin/prompts/base.py +422 -0
- hexdag/builtin/prompts/chat_prompts.py +303 -0
- hexdag/builtin/prompts/error_correction_prompts.py +320 -0
- hexdag/builtin/prompts/tool_prompts.py +160 -0
- hexdag/builtin/tools/builtin_tools.py +84 -0
- hexdag/builtin/tools/database_tools.py +164 -0
- hexdag/cli/__init__.py +17 -0
- hexdag/cli/__main__.py +7 -0
- hexdag/cli/commands/__init__.py +27 -0
- hexdag/cli/commands/build_cmd.py +812 -0
- hexdag/cli/commands/create_cmd.py +208 -0
- hexdag/cli/commands/docs_cmd.py +293 -0
- hexdag/cli/commands/generate_types_cmd.py +252 -0
- hexdag/cli/commands/init_cmd.py +188 -0
- hexdag/cli/commands/pipeline_cmd.py +494 -0
- hexdag/cli/commands/plugin_dev_cmd.py +529 -0
- hexdag/cli/commands/plugins_cmd.py +441 -0
- hexdag/cli/commands/studio_cmd.py +101 -0
- hexdag/cli/commands/validate_cmd.py +221 -0
- hexdag/cli/main.py +84 -0
- hexdag/core/__init__.py +83 -0
- hexdag/core/config/__init__.py +20 -0
- hexdag/core/config/loader.py +479 -0
- hexdag/core/config/models.py +150 -0
- hexdag/core/configurable.py +294 -0
- hexdag/core/context/__init__.py +37 -0
- hexdag/core/context/execution_context.py +378 -0
- hexdag/core/docs/__init__.py +26 -0
- hexdag/core/docs/extractors.py +678 -0
- hexdag/core/docs/generators.py +890 -0
- hexdag/core/docs/models.py +120 -0
- hexdag/core/domain/__init__.py +10 -0
- hexdag/core/domain/dag.py +1225 -0
- hexdag/core/exceptions.py +234 -0
- hexdag/core/expression_parser.py +569 -0
- hexdag/core/logging.py +449 -0
- hexdag/core/models/__init__.py +17 -0
- hexdag/core/models/base.py +138 -0
- hexdag/core/orchestration/__init__.py +46 -0
- hexdag/core/orchestration/body_executor.py +481 -0
- hexdag/core/orchestration/components/__init__.py +97 -0
- hexdag/core/orchestration/components/adapter_lifecycle_manager.py +113 -0
- hexdag/core/orchestration/components/checkpoint_manager.py +134 -0
- hexdag/core/orchestration/components/execution_coordinator.py +360 -0
- hexdag/core/orchestration/components/health_check_manager.py +176 -0
- hexdag/core/orchestration/components/input_mapper.py +143 -0
- hexdag/core/orchestration/components/lifecycle_manager.py +583 -0
- hexdag/core/orchestration/components/node_executor.py +377 -0
- hexdag/core/orchestration/components/secret_manager.py +202 -0
- hexdag/core/orchestration/components/wave_executor.py +158 -0
- hexdag/core/orchestration/constants.py +17 -0
- hexdag/core/orchestration/events/README.md +312 -0
- hexdag/core/orchestration/events/__init__.py +104 -0
- hexdag/core/orchestration/events/batching.py +330 -0
- hexdag/core/orchestration/events/decorators.py +139 -0
- hexdag/core/orchestration/events/events.py +573 -0
- hexdag/core/orchestration/events/observers/__init__.py +30 -0
- hexdag/core/orchestration/events/observers/core_observers.py +690 -0
- hexdag/core/orchestration/events/observers/models.py +111 -0
- hexdag/core/orchestration/events/taxonomy.py +269 -0
- hexdag/core/orchestration/hook_context.py +237 -0
- hexdag/core/orchestration/hooks.py +437 -0
- hexdag/core/orchestration/models.py +418 -0
- hexdag/core/orchestration/orchestrator.py +910 -0
- hexdag/core/orchestration/orchestrator_factory.py +275 -0
- hexdag/core/orchestration/port_wrappers.py +327 -0
- hexdag/core/orchestration/prompt/__init__.py +32 -0
- hexdag/core/orchestration/prompt/template.py +332 -0
- hexdag/core/pipeline_builder/__init__.py +21 -0
- hexdag/core/pipeline_builder/component_instantiator.py +386 -0
- hexdag/core/pipeline_builder/include_tag.py +265 -0
- hexdag/core/pipeline_builder/pipeline_config.py +133 -0
- hexdag/core/pipeline_builder/py_tag.py +223 -0
- hexdag/core/pipeline_builder/tag_discovery.py +268 -0
- hexdag/core/pipeline_builder/yaml_builder.py +1196 -0
- hexdag/core/pipeline_builder/yaml_validator.py +569 -0
- hexdag/core/ports/__init__.py +65 -0
- hexdag/core/ports/api_call.py +133 -0
- hexdag/core/ports/database.py +489 -0
- hexdag/core/ports/embedding.py +215 -0
- hexdag/core/ports/executor.py +237 -0
- hexdag/core/ports/file_storage.py +117 -0
- hexdag/core/ports/healthcheck.py +87 -0
- hexdag/core/ports/llm.py +551 -0
- hexdag/core/ports/memory.py +70 -0
- hexdag/core/ports/observer_manager.py +130 -0
- hexdag/core/ports/secret.py +145 -0
- hexdag/core/ports/tool_router.py +94 -0
- hexdag/core/ports_builder.py +623 -0
- hexdag/core/protocols.py +273 -0
- hexdag/core/resolver.py +304 -0
- hexdag/core/schema/__init__.py +9 -0
- hexdag/core/schema/generator.py +742 -0
- hexdag/core/secrets.py +242 -0
- hexdag/core/types.py +413 -0
- hexdag/core/utils/async_warnings.py +206 -0
- hexdag/core/utils/schema_conversion.py +78 -0
- hexdag/core/utils/sql_validation.py +86 -0
- hexdag/core/validation/secure_json.py +148 -0
- hexdag/core/yaml_macro.py +517 -0
- hexdag/mcp_server.py +3120 -0
- hexdag/studio/__init__.py +10 -0
- hexdag/studio/build_ui.py +92 -0
- hexdag/studio/server/__init__.py +1 -0
- hexdag/studio/server/main.py +100 -0
- hexdag/studio/server/routes/__init__.py +9 -0
- hexdag/studio/server/routes/execute.py +208 -0
- hexdag/studio/server/routes/export.py +558 -0
- hexdag/studio/server/routes/files.py +207 -0
- hexdag/studio/server/routes/plugins.py +419 -0
- hexdag/studio/server/routes/validate.py +220 -0
- hexdag/studio/ui/index.html +13 -0
- hexdag/studio/ui/package-lock.json +2992 -0
- hexdag/studio/ui/package.json +31 -0
- hexdag/studio/ui/postcss.config.js +6 -0
- hexdag/studio/ui/public/hexdag.svg +5 -0
- hexdag/studio/ui/src/App.tsx +251 -0
- hexdag/studio/ui/src/components/Canvas.tsx +408 -0
- hexdag/studio/ui/src/components/ContextMenu.tsx +187 -0
- hexdag/studio/ui/src/components/FileBrowser.tsx +123 -0
- hexdag/studio/ui/src/components/Header.tsx +181 -0
- hexdag/studio/ui/src/components/HexdagNode.tsx +193 -0
- hexdag/studio/ui/src/components/NodeInspector.tsx +512 -0
- hexdag/studio/ui/src/components/NodePalette.tsx +262 -0
- hexdag/studio/ui/src/components/NodePortsSection.tsx +403 -0
- hexdag/studio/ui/src/components/PluginManager.tsx +347 -0
- hexdag/studio/ui/src/components/PortsEditor.tsx +481 -0
- hexdag/studio/ui/src/components/PythonEditor.tsx +195 -0
- hexdag/studio/ui/src/components/ValidationPanel.tsx +105 -0
- hexdag/studio/ui/src/components/YamlEditor.tsx +196 -0
- hexdag/studio/ui/src/components/index.ts +8 -0
- hexdag/studio/ui/src/index.css +92 -0
- hexdag/studio/ui/src/main.tsx +10 -0
- hexdag/studio/ui/src/types/index.ts +123 -0
- hexdag/studio/ui/src/vite-env.d.ts +1 -0
- hexdag/studio/ui/tailwind.config.js +29 -0
- hexdag/studio/ui/tsconfig.json +37 -0
- hexdag/studio/ui/tsconfig.node.json +13 -0
- hexdag/studio/ui/vite.config.ts +35 -0
- hexdag/visualization/__init__.py +69 -0
- hexdag/visualization/dag_visualizer.py +1020 -0
- hexdag-0.5.0.dev1.dist-info/METADATA +369 -0
- hexdag-0.5.0.dev1.dist-info/RECORD +261 -0
- hexdag-0.5.0.dev1.dist-info/WHEEL +4 -0
- hexdag-0.5.0.dev1.dist-info/entry_points.txt +4 -0
- hexdag-0.5.0.dev1.dist-info/licenses/LICENSE +190 -0
- hexdag_plugins/.gitignore +43 -0
- hexdag_plugins/README.md +73 -0
- hexdag_plugins/__init__.py +1 -0
- hexdag_plugins/azure/LICENSE +21 -0
- hexdag_plugins/azure/README.md +414 -0
- hexdag_plugins/azure/__init__.py +21 -0
- hexdag_plugins/azure/azure_blob_adapter.py +450 -0
- hexdag_plugins/azure/azure_cosmos_adapter.py +383 -0
- hexdag_plugins/azure/azure_keyvault_adapter.py +314 -0
- hexdag_plugins/azure/azure_openai_adapter.py +415 -0
- hexdag_plugins/azure/pyproject.toml +107 -0
- hexdag_plugins/azure/tests/__init__.py +1 -0
- hexdag_plugins/azure/tests/test_azure_blob_adapter.py +350 -0
- hexdag_plugins/azure/tests/test_azure_cosmos_adapter.py +323 -0
- hexdag_plugins/azure/tests/test_azure_keyvault_adapter.py +330 -0
- hexdag_plugins/azure/tests/test_azure_openai_adapter.py +329 -0
- hexdag_plugins/hexdag_etl/README.md +168 -0
- hexdag_plugins/hexdag_etl/__init__.py +53 -0
- hexdag_plugins/hexdag_etl/examples/01_simple_pandas_transform.py +270 -0
- hexdag_plugins/hexdag_etl/examples/02_simple_pandas_only.py +149 -0
- hexdag_plugins/hexdag_etl/examples/03_file_io_pipeline.py +109 -0
- hexdag_plugins/hexdag_etl/examples/test_pandas_transform.py +84 -0
- hexdag_plugins/hexdag_etl/hexdag.toml +25 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/__init__.py +48 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/__init__.py +13 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/api_extract.py +230 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/base_node_factory.py +181 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/file_io.py +415 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/outlook.py +492 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/pandas_transform.py +563 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/sql_extract_load.py +112 -0
- hexdag_plugins/hexdag_etl/pyproject.toml +82 -0
- hexdag_plugins/hexdag_etl/test_transform.py +54 -0
- hexdag_plugins/hexdag_etl/tests/test_plugin_integration.py +62 -0
- hexdag_plugins/mysql_adapter/LICENSE +21 -0
- hexdag_plugins/mysql_adapter/README.md +224 -0
- hexdag_plugins/mysql_adapter/__init__.py +6 -0
- hexdag_plugins/mysql_adapter/mysql_adapter.py +408 -0
- hexdag_plugins/mysql_adapter/pyproject.toml +93 -0
- hexdag_plugins/mysql_adapter/tests/test_mysql_adapter.py +259 -0
- hexdag_plugins/storage/README.md +184 -0
- hexdag_plugins/storage/__init__.py +19 -0
- hexdag_plugins/storage/file/__init__.py +5 -0
- hexdag_plugins/storage/file/local.py +325 -0
- hexdag_plugins/storage/ports/__init__.py +5 -0
- hexdag_plugins/storage/ports/vector_store.py +236 -0
- hexdag_plugins/storage/sql/__init__.py +7 -0
- hexdag_plugins/storage/sql/base.py +187 -0
- hexdag_plugins/storage/sql/mysql.py +27 -0
- hexdag_plugins/storage/sql/postgresql.py +27 -0
- hexdag_plugins/storage/tests/__init__.py +1 -0
- hexdag_plugins/storage/tests/test_local_file_storage.py +161 -0
- hexdag_plugins/storage/tests/test_sql_adapters.py +212 -0
- hexdag_plugins/storage/vector/__init__.py +7 -0
- hexdag_plugins/storage/vector/chromadb.py +223 -0
- hexdag_plugins/storage/vector/in_memory.py +285 -0
- hexdag_plugins/storage/vector/pgvector.py +502 -0
|
@@ -0,0 +1,390 @@
|
|
|
1
|
+
"""ConversationMacro - Multi-turn chat with dynamic message history expansion.
|
|
2
|
+
|
|
3
|
+
Architecture:
|
|
4
|
+
- Loads conversation history from Memory port
|
|
5
|
+
- Accumulates messages dynamically during execution
|
|
6
|
+
- Supports tools with dynamic ToolMacro expansion
|
|
7
|
+
- Maintains conversation state across turns
|
|
8
|
+
- Automatic context window management
|
|
9
|
+
|
|
10
|
+
Requirements:
|
|
11
|
+
- Memory port must be configured in the pipeline for persistence
|
|
12
|
+
- Without memory port, conversations start fresh each time
|
|
13
|
+
|
|
14
|
+
Example:
|
|
15
|
+
Turn 1: User → [system, user] → LLM → Response → Save history
|
|
16
|
+
Turn 2: User → [system, user, assistant, user] → LLM → Response → Save
|
|
17
|
+
Turn N: Accumulated history → LLM → Response → Save
|
|
18
|
+
|
|
19
|
+
Dynamic Expansion:
|
|
20
|
+
- MessageAccumulator node injects new user messages at runtime
|
|
21
|
+
- ToolCallExpander node injects ToolCallNodes when LLM requests tools
|
|
22
|
+
- ConversationSaver node persists updated history to Memory port
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from typing import Any
|
|
26
|
+
|
|
27
|
+
from pydantic import Field
|
|
28
|
+
|
|
29
|
+
from hexdag.builtin.macros.reasoning_agent import ReasoningAgentMacro
|
|
30
|
+
from hexdag.builtin.nodes.function_node import FunctionNode
|
|
31
|
+
from hexdag.builtin.nodes.tool_utils import ToolCallFormat
|
|
32
|
+
from hexdag.core.configurable import ConfigurableMacro, MacroConfig
|
|
33
|
+
from hexdag.core.context import get_port
|
|
34
|
+
from hexdag.core.domain.dag import DirectedGraph, NodeSpec
|
|
35
|
+
from hexdag.core.logging import get_logger
|
|
36
|
+
from hexdag.core.resolver import resolve
|
|
37
|
+
|
|
38
|
+
logger = get_logger(__name__)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ConversationConfig(MacroConfig):
|
|
42
|
+
"""Configuration for ConversationMacro.
|
|
43
|
+
|
|
44
|
+
Attributes
|
|
45
|
+
----------
|
|
46
|
+
system_prompt : str
|
|
47
|
+
System message to set agent behavior
|
|
48
|
+
conversation_id : str
|
|
49
|
+
Unique ID for this conversation (used as memory key)
|
|
50
|
+
max_history : int
|
|
51
|
+
Maximum number of messages to keep (default: 20)
|
|
52
|
+
allowed_tools : list[str]
|
|
53
|
+
Tools available to the agent (qualified names)
|
|
54
|
+
tool_format : ToolCallFormat
|
|
55
|
+
Tool calling format (default: MIXED)
|
|
56
|
+
enable_tool_use : bool
|
|
57
|
+
Whether to enable tool calling (default: True)
|
|
58
|
+
memory_adapter : str | None
|
|
59
|
+
Optional memory adapter to use (default: uses pipeline's memory port)
|
|
60
|
+
If None, will use the memory port configured at pipeline level
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
system_prompt: str = Field(default="You are a helpful assistant")
|
|
64
|
+
conversation_id: str
|
|
65
|
+
max_history: int = Field(default=20, ge=2)
|
|
66
|
+
allowed_tools: list[str] = Field(default_factory=list)
|
|
67
|
+
tool_format: ToolCallFormat = Field(default=ToolCallFormat.MIXED)
|
|
68
|
+
enable_tool_use: bool = Field(default=True)
|
|
69
|
+
memory_adapter: str | None = Field(
|
|
70
|
+
default=None,
|
|
71
|
+
description="Optional memory adapter override (e.g., 'plugin:redis_memory')",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ConversationMacro(ConfigurableMacro):
|
|
76
|
+
"""Multi-turn conversation with dynamic message history expansion.
|
|
77
|
+
|
|
78
|
+
Architecture (dynamic graph):
|
|
79
|
+
```
|
|
80
|
+
[User Input] → Message Accumulator → LLM Node → Tool Expander → Response Saver
|
|
81
|
+
↓ ↓ ↓ ↓
|
|
82
|
+
Memory Port LLM Port [ToolCallNodes] Memory Port
|
|
83
|
+
(load history) (if tools used) (save history)
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
Dynamic expansion features:
|
|
87
|
+
- Message Accumulator: Injects new user message into conversation history
|
|
88
|
+
- Tool Expander: Dynamically creates ToolCallNodes when LLM requests tools
|
|
89
|
+
- Response Saver: Persists updated conversation to Memory port
|
|
90
|
+
|
|
91
|
+
This enables:
|
|
92
|
+
- Multi-turn conversations with full history
|
|
93
|
+
- Tool use within conversations
|
|
94
|
+
- Conversation persistence across sessions
|
|
95
|
+
- Context window management (trimming old messages)
|
|
96
|
+
|
|
97
|
+
Examples
|
|
98
|
+
--------
|
|
99
|
+
YAML configuration with memory port:
|
|
100
|
+
|
|
101
|
+
apiVersion: v1
|
|
102
|
+
kind: Pipeline
|
|
103
|
+
metadata:
|
|
104
|
+
name: chatbot_pipeline
|
|
105
|
+
spec:
|
|
106
|
+
ports:
|
|
107
|
+
memory:
|
|
108
|
+
adapter: in_memory_memory
|
|
109
|
+
config:
|
|
110
|
+
max_size: 1000
|
|
111
|
+
llm:
|
|
112
|
+
adapter: plugin:openai
|
|
113
|
+
config:
|
|
114
|
+
model: gpt-4
|
|
115
|
+
nodes:
|
|
116
|
+
- kind: macro_invocation
|
|
117
|
+
metadata:
|
|
118
|
+
name: chatbot
|
|
119
|
+
spec:
|
|
120
|
+
macro: core:conversation
|
|
121
|
+
config:
|
|
122
|
+
system_prompt: "You are a research assistant"
|
|
123
|
+
conversation_id: "{{session_id}}"
|
|
124
|
+
max_history: 20
|
|
125
|
+
allowed_tools: ["core:search", "core:calculate"]
|
|
126
|
+
enable_tool_use: true
|
|
127
|
+
|
|
128
|
+
Multi-turn execution:
|
|
129
|
+
|
|
130
|
+
# Turn 1
|
|
131
|
+
results = await orchestrator.run(
|
|
132
|
+
graph,
|
|
133
|
+
{"user_message": "What is AI?", "session_id": "user123"},
|
|
134
|
+
dynamic=True
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Turn 2 - conversation history loaded automatically
|
|
138
|
+
results = await orchestrator.run(
|
|
139
|
+
graph,
|
|
140
|
+
{"user_message": "Tell me more", "session_id": "user123"},
|
|
141
|
+
dynamic=True
|
|
142
|
+
)
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
Config = ConversationConfig
|
|
146
|
+
|
|
147
|
+
def expand(
|
|
148
|
+
self,
|
|
149
|
+
instance_name: str,
|
|
150
|
+
inputs: dict[str, Any],
|
|
151
|
+
dependencies: list[str],
|
|
152
|
+
) -> DirectedGraph:
|
|
153
|
+
"""Expand into conversation nodes using ReasoningAgent for core logic.
|
|
154
|
+
|
|
155
|
+
Graph structure:
|
|
156
|
+
```
|
|
157
|
+
[deps] → load_history → format_prompt → reasoning_agent → save_history
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
During execution:
|
|
161
|
+
- load_history: Gets conversation from Memory port
|
|
162
|
+
- format_prompt: Formats conversation into prompt for reasoning agent
|
|
163
|
+
- reasoning_agent: Uses ReasoningAgent for multi-step reasoning with tools
|
|
164
|
+
- save_history: Saves updated conversation to Memory port
|
|
165
|
+
|
|
166
|
+
Args
|
|
167
|
+
----
|
|
168
|
+
instance_name: Unique name for this conversation instance
|
|
169
|
+
inputs: Input data (must include conversation_id and user_message)
|
|
170
|
+
dependencies: Nodes to depend on
|
|
171
|
+
|
|
172
|
+
Returns
|
|
173
|
+
-------
|
|
174
|
+
DirectedGraph
|
|
175
|
+
Graph with conversation nodes
|
|
176
|
+
"""
|
|
177
|
+
graph = DirectedGraph()
|
|
178
|
+
config: ConversationConfig = self.config # type: ignore[assignment]
|
|
179
|
+
|
|
180
|
+
fn_factory = FunctionNode()
|
|
181
|
+
|
|
182
|
+
# Node 1: Load conversation history from Memory
|
|
183
|
+
load_node = self._create_load_history_node(fn_factory, instance_name, config, dependencies)
|
|
184
|
+
graph += load_node
|
|
185
|
+
|
|
186
|
+
# Node 2: Format conversation into prompt for reasoning agent
|
|
187
|
+
format_node = self._create_format_prompt_node(fn_factory, instance_name, config)
|
|
188
|
+
graph += format_node
|
|
189
|
+
|
|
190
|
+
# Node 3: Use ReasoningAgent for core reasoning and tool execution
|
|
191
|
+
# Create ReasoningAgent with config from ConversationConfig
|
|
192
|
+
reasoning_macro = ReasoningAgentMacro(
|
|
193
|
+
main_prompt="{{conversation_prompt}}", # Will be filled from format_prompt node
|
|
194
|
+
max_steps=3, # Allow multi-step reasoning
|
|
195
|
+
allowed_tools=config.allowed_tools if config.enable_tool_use else [],
|
|
196
|
+
tool_format=config.tool_format,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Expand reasoning agent with dependency on format_prompt
|
|
200
|
+
reasoning_graph = reasoning_macro.expand(
|
|
201
|
+
f"{instance_name}_reasoning", inputs, [f"{instance_name}_format_prompt"]
|
|
202
|
+
)
|
|
203
|
+
graph |= reasoning_graph
|
|
204
|
+
|
|
205
|
+
# Node 4: Save updated conversation history
|
|
206
|
+
save_node = self._create_save_history_node(
|
|
207
|
+
fn_factory,
|
|
208
|
+
instance_name,
|
|
209
|
+
config,
|
|
210
|
+
f"{instance_name}_reasoning_final", # Depend on reasoning agent's final output
|
|
211
|
+
)
|
|
212
|
+
graph += save_node
|
|
213
|
+
|
|
214
|
+
return graph
|
|
215
|
+
|
|
216
|
+
def _create_load_history_node(
|
|
217
|
+
self,
|
|
218
|
+
fn_factory: FunctionNode,
|
|
219
|
+
instance_name: str,
|
|
220
|
+
config: ConversationConfig,
|
|
221
|
+
dependencies: list[str],
|
|
222
|
+
) -> NodeSpec:
|
|
223
|
+
"""Create node that loads conversation history from Memory port."""
|
|
224
|
+
|
|
225
|
+
async def load_history(input_data: dict[str, Any]) -> dict[str, Any]:
|
|
226
|
+
"""Load conversation history from memory."""
|
|
227
|
+
conversation_id = input_data.get("conversation_id") or config.conversation_id
|
|
228
|
+
|
|
229
|
+
# Get Memory port (use configured adapter or default)
|
|
230
|
+
try:
|
|
231
|
+
if config.memory_adapter:
|
|
232
|
+
# Use specific adapter if configured (full module path)
|
|
233
|
+
memory_class = resolve(config.memory_adapter)
|
|
234
|
+
memory_port = memory_class()
|
|
235
|
+
# Verify it has the required methods
|
|
236
|
+
assert hasattr(memory_port, "aget") and hasattr(memory_port, "aset")
|
|
237
|
+
else:
|
|
238
|
+
# Use default memory port from pipeline
|
|
239
|
+
memory_port = get_port("memory")
|
|
240
|
+
except Exception as e:
|
|
241
|
+
logger.warning(f"Memory port not available ({e}), starting fresh conversation")
|
|
242
|
+
# Return empty history with system message
|
|
243
|
+
return {
|
|
244
|
+
"conversation_id": conversation_id,
|
|
245
|
+
"messages": [{"role": "system", "content": config.system_prompt}],
|
|
246
|
+
"user_message": input_data.get("user_message", ""),
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
# Load history from memory
|
|
250
|
+
memory_key = f"conversation:{conversation_id}"
|
|
251
|
+
try:
|
|
252
|
+
history_json = await memory_port.aget(memory_key) # pyright: ignore[reportAttributeAccessIssue]
|
|
253
|
+
if history_json:
|
|
254
|
+
import json
|
|
255
|
+
|
|
256
|
+
messages = json.loads(history_json)
|
|
257
|
+
logger.debug(
|
|
258
|
+
f"Loaded {len(messages)} messages from conversation {conversation_id}"
|
|
259
|
+
)
|
|
260
|
+
else:
|
|
261
|
+
# New conversation
|
|
262
|
+
messages = [{"role": "system", "content": config.system_prompt}]
|
|
263
|
+
logger.debug(f"Starting new conversation {conversation_id}")
|
|
264
|
+
except Exception as e:
|
|
265
|
+
logger.warning(f"Failed to load history: {e}, starting fresh")
|
|
266
|
+
messages = [{"role": "system", "content": config.system_prompt}]
|
|
267
|
+
|
|
268
|
+
return {
|
|
269
|
+
"conversation_id": conversation_id,
|
|
270
|
+
"messages": messages,
|
|
271
|
+
"user_message": input_data.get("user_message", ""),
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
return fn_factory(
|
|
275
|
+
name=f"{instance_name}_load_history",
|
|
276
|
+
fn=load_history,
|
|
277
|
+
deps=dependencies,
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
def _create_format_prompt_node(
|
|
281
|
+
self, fn_factory: FunctionNode, instance_name: str, config: ConversationConfig
|
|
282
|
+
) -> NodeSpec:
|
|
283
|
+
"""Create node that formats conversation history into a prompt for reasoning agent."""
|
|
284
|
+
|
|
285
|
+
async def format_prompt(history_data: dict[str, Any]) -> dict[str, Any]:
|
|
286
|
+
"""Format conversation history into prompt for reasoning agent."""
|
|
287
|
+
messages = history_data["messages"]
|
|
288
|
+
user_message = history_data["user_message"]
|
|
289
|
+
|
|
290
|
+
# Add new user message
|
|
291
|
+
if user_message:
|
|
292
|
+
messages.append({"role": "user", "content": user_message})
|
|
293
|
+
logger.debug(f"Added user message (total messages: {len(messages)})")
|
|
294
|
+
|
|
295
|
+
# Trim history if needed (keep system message + recent messages)
|
|
296
|
+
if len(messages) > config.max_history:
|
|
297
|
+
# Keep system message + most recent messages
|
|
298
|
+
system_msg = messages[0]
|
|
299
|
+
recent_messages = messages[-(config.max_history - 1) :]
|
|
300
|
+
messages = [system_msg] + recent_messages
|
|
301
|
+
logger.debug(f"Trimmed history to {len(messages)} messages")
|
|
302
|
+
|
|
303
|
+
# Format messages into a prompt for reasoning agent
|
|
304
|
+
# Include system prompt and conversation context
|
|
305
|
+
conversation_context = "\n".join([
|
|
306
|
+
f"{msg['role'].upper()}: {msg['content']}" for msg in messages
|
|
307
|
+
])
|
|
308
|
+
|
|
309
|
+
prompt = f"""{config.system_prompt}
|
|
310
|
+
|
|
311
|
+
## Conversation History:
|
|
312
|
+
{conversation_context}
|
|
313
|
+
|
|
314
|
+
Please provide a thoughtful response to continue this conversation."""
|
|
315
|
+
|
|
316
|
+
return {
|
|
317
|
+
"conversation_id": history_data["conversation_id"],
|
|
318
|
+
"messages": messages,
|
|
319
|
+
"conversation_prompt": prompt,
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
return fn_factory(
|
|
323
|
+
name=f"{instance_name}_format_prompt",
|
|
324
|
+
fn=format_prompt,
|
|
325
|
+
deps=[f"{instance_name}_load_history"],
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
def _create_save_history_node(
|
|
329
|
+
self,
|
|
330
|
+
fn_factory: FunctionNode,
|
|
331
|
+
instance_name: str,
|
|
332
|
+
config: ConversationConfig,
|
|
333
|
+
reasoning_node: str,
|
|
334
|
+
) -> NodeSpec:
|
|
335
|
+
"""Create node that saves updated conversation history."""
|
|
336
|
+
|
|
337
|
+
async def save_history(reasoning_response: Any) -> dict[str, Any]:
|
|
338
|
+
"""Save updated conversation history to memory."""
|
|
339
|
+
# Get previous messages from context
|
|
340
|
+
from hexdag.core.context import get_node_results
|
|
341
|
+
|
|
342
|
+
node_results = get_node_results()
|
|
343
|
+
if not node_results:
|
|
344
|
+
logger.warning("No node results available, cannot save history")
|
|
345
|
+
return {"response": str(reasoning_response)}
|
|
346
|
+
|
|
347
|
+
format_node_result = node_results.get(f"{instance_name}_format_prompt")
|
|
348
|
+
if not format_node_result:
|
|
349
|
+
logger.warning("Format prompt node result not found")
|
|
350
|
+
return {"response": str(reasoning_response)}
|
|
351
|
+
|
|
352
|
+
messages = format_node_result.result.get("messages", [])
|
|
353
|
+
conversation_id = format_node_result.result.get("conversation_id")
|
|
354
|
+
|
|
355
|
+
# Add assistant response to history
|
|
356
|
+
# ReasoningAgent returns a string response
|
|
357
|
+
assistant_response = str(reasoning_response) if reasoning_response else ""
|
|
358
|
+
messages.append({"role": "assistant", "content": assistant_response})
|
|
359
|
+
|
|
360
|
+
# Save to memory
|
|
361
|
+
try:
|
|
362
|
+
if config.memory_adapter:
|
|
363
|
+
# Use specific adapter if configured (full module path)
|
|
364
|
+
memory_class = resolve(config.memory_adapter)
|
|
365
|
+
memory_port = memory_class()
|
|
366
|
+
# Verify it has the required method
|
|
367
|
+
assert hasattr(memory_port, "aset")
|
|
368
|
+
else:
|
|
369
|
+
# Use default memory port from pipeline
|
|
370
|
+
memory_port = get_port("memory")
|
|
371
|
+
|
|
372
|
+
import json
|
|
373
|
+
|
|
374
|
+
memory_key = f"conversation:{conversation_id}"
|
|
375
|
+
await memory_port.aset(memory_key, json.dumps(messages)) # pyright: ignore[reportAttributeAccessIssue]
|
|
376
|
+
logger.debug(f"Saved conversation with {len(messages)} messages")
|
|
377
|
+
except Exception as e:
|
|
378
|
+
logger.warning(f"Failed to save conversation: {e}")
|
|
379
|
+
|
|
380
|
+
return {
|
|
381
|
+
"response": assistant_response,
|
|
382
|
+
"conversation_id": conversation_id,
|
|
383
|
+
"message_count": len(messages),
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
return fn_factory(
|
|
387
|
+
name=f"{instance_name}_save_history",
|
|
388
|
+
fn=save_history,
|
|
389
|
+
deps=[reasoning_node],
|
|
390
|
+
)
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""LLM Macro - Structured LLM workflow with prompt and optional parsing.
|
|
2
|
+
|
|
3
|
+
This macro provides a convenient way to use the unified LLMNode with
|
|
4
|
+
structured output parsing in a declarative YAML-friendly format.
|
|
5
|
+
|
|
6
|
+
Note: With the unified LLMNode, this macro is now a thin wrapper.
|
|
7
|
+
Consider using LLMNode directly for simpler use cases.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, ConfigDict, field_validator
|
|
13
|
+
|
|
14
|
+
from hexdag.builtin.nodes.llm_node import LLMNode
|
|
15
|
+
from hexdag.core.configurable import ConfigurableMacro, MacroConfig
|
|
16
|
+
from hexdag.core.domain.dag import DirectedGraph
|
|
17
|
+
from hexdag.core.orchestration.prompt import PromptInput
|
|
18
|
+
from hexdag.core.utils.schema_conversion import normalize_schema
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class LLMMacroConfig(MacroConfig):
|
|
22
|
+
"""Configuration for LLM macro.
|
|
23
|
+
|
|
24
|
+
Attributes
|
|
25
|
+
----------
|
|
26
|
+
template : PromptInput
|
|
27
|
+
Prompt template for LLM
|
|
28
|
+
output_schema : dict[str, type] | type[BaseModel] | None
|
|
29
|
+
Expected output schema (if None, returns raw text)
|
|
30
|
+
system_prompt : str | None
|
|
31
|
+
Optional system prompt
|
|
32
|
+
parse_strategy : str
|
|
33
|
+
Parsing strategy: "json", "json_in_markdown", "yaml"
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
37
|
+
|
|
38
|
+
template: PromptInput
|
|
39
|
+
output_schema: dict[str, type] | type[BaseModel] | None = None
|
|
40
|
+
system_prompt: str | None = None
|
|
41
|
+
parse_strategy: str = "json"
|
|
42
|
+
|
|
43
|
+
@field_validator("output_schema", mode="before")
|
|
44
|
+
@classmethod
|
|
45
|
+
def normalize_output_schema(cls, v: Any) -> Any:
|
|
46
|
+
"""Convert YAML-friendly schema to Python types."""
|
|
47
|
+
if v is None:
|
|
48
|
+
return None
|
|
49
|
+
# Use the shared utility to convert string type names to actual types
|
|
50
|
+
return normalize_schema(v)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class LLMMacro(ConfigurableMacro):
|
|
54
|
+
"""LLM macro that wraps the unified LLMNode.
|
|
55
|
+
|
|
56
|
+
This macro provides a YAML-friendly interface for structured LLM interactions.
|
|
57
|
+
It uses the unified LLMNode which handles prompt templating, API calls, and
|
|
58
|
+
optional JSON parsing in a single node.
|
|
59
|
+
|
|
60
|
+
Note: For simple use cases, consider using LLMNode directly instead of this macro.
|
|
61
|
+
|
|
62
|
+
Examples
|
|
63
|
+
--------
|
|
64
|
+
Basic usage (text generation)::
|
|
65
|
+
|
|
66
|
+
from hexdag.builtin.macros import LLMMacro, LLMMacroConfig
|
|
67
|
+
|
|
68
|
+
config = LLMMacroConfig(
|
|
69
|
+
template="Explain {{topic}} in simple terms"
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
macro = LLMMacro(config)
|
|
73
|
+
graph = macro.expand(
|
|
74
|
+
instance_name="explainer",
|
|
75
|
+
inputs={"topic": "quantum computing"},
|
|
76
|
+
dependencies=[]
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
Structured output with parsing::
|
|
80
|
+
|
|
81
|
+
from pydantic import BaseModel
|
|
82
|
+
|
|
83
|
+
class Explanation(BaseModel):
|
|
84
|
+
summary: str
|
|
85
|
+
key_points: list[str]
|
|
86
|
+
|
|
87
|
+
config = LLMMacroConfig(
|
|
88
|
+
template="Explain {{topic}}. Return JSON with summary and key_points.",
|
|
89
|
+
output_schema=Explanation,
|
|
90
|
+
parse_strategy="json"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
macro = LLMMacro(config)
|
|
94
|
+
graph = macro.expand(...)
|
|
95
|
+
|
|
96
|
+
YAML usage::
|
|
97
|
+
|
|
98
|
+
nodes:
|
|
99
|
+
- kind: macro_invocation
|
|
100
|
+
metadata:
|
|
101
|
+
name: analyzer
|
|
102
|
+
spec:
|
|
103
|
+
macro: core:llm_workflow
|
|
104
|
+
config:
|
|
105
|
+
template: "Analyze {{data}}"
|
|
106
|
+
output_schema:
|
|
107
|
+
summary: str
|
|
108
|
+
sentiment: str
|
|
109
|
+
inputs:
|
|
110
|
+
data: "{{previous_node.output}}"
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
Config = LLMMacroConfig
|
|
114
|
+
|
|
115
|
+
def expand(
|
|
116
|
+
self,
|
|
117
|
+
instance_name: str,
|
|
118
|
+
inputs: dict[str, Any],
|
|
119
|
+
dependencies: list[str],
|
|
120
|
+
) -> DirectedGraph:
|
|
121
|
+
"""Expand macro into a DirectedGraph with a single LLMNode.
|
|
122
|
+
|
|
123
|
+
Args
|
|
124
|
+
----
|
|
125
|
+
instance_name: Base name for generated nodes
|
|
126
|
+
inputs: Input mappings for the macro
|
|
127
|
+
dependencies: List of node names this macro depends on
|
|
128
|
+
|
|
129
|
+
Returns
|
|
130
|
+
-------
|
|
131
|
+
DirectedGraph
|
|
132
|
+
Graph containing a single unified LLMNode
|
|
133
|
+
"""
|
|
134
|
+
config: LLMMacroConfig = self.config # type: ignore[assignment]
|
|
135
|
+
|
|
136
|
+
graph = DirectedGraph()
|
|
137
|
+
|
|
138
|
+
# Create unified LLMNode
|
|
139
|
+
llm_node_factory = LLMNode()
|
|
140
|
+
llm_spec = llm_node_factory(
|
|
141
|
+
name=instance_name,
|
|
142
|
+
prompt_template=config.template,
|
|
143
|
+
output_schema=config.output_schema,
|
|
144
|
+
system_prompt=config.system_prompt,
|
|
145
|
+
parse_json=config.output_schema is not None,
|
|
146
|
+
parse_strategy=config.parse_strategy,
|
|
147
|
+
deps=dependencies,
|
|
148
|
+
)
|
|
149
|
+
graph += llm_spec
|
|
150
|
+
|
|
151
|
+
return graph
|