hexdag 0.5.0.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hexdag/__init__.py +116 -0
- hexdag/__main__.py +30 -0
- hexdag/adapters/executors/__init__.py +5 -0
- hexdag/adapters/executors/local_executor.py +316 -0
- hexdag/builtin/__init__.py +6 -0
- hexdag/builtin/adapters/__init__.py +51 -0
- hexdag/builtin/adapters/anthropic/__init__.py +5 -0
- hexdag/builtin/adapters/anthropic/anthropic_adapter.py +151 -0
- hexdag/builtin/adapters/database/__init__.py +6 -0
- hexdag/builtin/adapters/database/csv/csv_adapter.py +249 -0
- hexdag/builtin/adapters/database/pgvector/__init__.py +5 -0
- hexdag/builtin/adapters/database/pgvector/pgvector_adapter.py +478 -0
- hexdag/builtin/adapters/database/sqlalchemy/sqlalchemy_adapter.py +252 -0
- hexdag/builtin/adapters/database/sqlite/__init__.py +5 -0
- hexdag/builtin/adapters/database/sqlite/sqlite_adapter.py +410 -0
- hexdag/builtin/adapters/local/README.md +59 -0
- hexdag/builtin/adapters/local/__init__.py +7 -0
- hexdag/builtin/adapters/local/local_observer_manager.py +696 -0
- hexdag/builtin/adapters/memory/__init__.py +47 -0
- hexdag/builtin/adapters/memory/file_memory_adapter.py +297 -0
- hexdag/builtin/adapters/memory/in_memory_memory.py +216 -0
- hexdag/builtin/adapters/memory/schemas.py +57 -0
- hexdag/builtin/adapters/memory/session_memory.py +178 -0
- hexdag/builtin/adapters/memory/sqlite_memory_adapter.py +215 -0
- hexdag/builtin/adapters/memory/state_memory.py +280 -0
- hexdag/builtin/adapters/mock/README.md +89 -0
- hexdag/builtin/adapters/mock/__init__.py +15 -0
- hexdag/builtin/adapters/mock/hexdag.toml +50 -0
- hexdag/builtin/adapters/mock/mock_database.py +225 -0
- hexdag/builtin/adapters/mock/mock_embedding.py +223 -0
- hexdag/builtin/adapters/mock/mock_llm.py +177 -0
- hexdag/builtin/adapters/mock/mock_tool_adapter.py +192 -0
- hexdag/builtin/adapters/mock/mock_tool_router.py +232 -0
- hexdag/builtin/adapters/openai/__init__.py +5 -0
- hexdag/builtin/adapters/openai/openai_adapter.py +634 -0
- hexdag/builtin/adapters/secret/__init__.py +7 -0
- hexdag/builtin/adapters/secret/local_secret_adapter.py +248 -0
- hexdag/builtin/adapters/unified_tool_router.py +280 -0
- hexdag/builtin/macros/__init__.py +17 -0
- hexdag/builtin/macros/conversation_agent.py +390 -0
- hexdag/builtin/macros/llm_macro.py +151 -0
- hexdag/builtin/macros/reasoning_agent.py +423 -0
- hexdag/builtin/macros/tool_macro.py +380 -0
- hexdag/builtin/nodes/__init__.py +38 -0
- hexdag/builtin/nodes/_discovery.py +123 -0
- hexdag/builtin/nodes/agent_node.py +696 -0
- hexdag/builtin/nodes/base_node_factory.py +242 -0
- hexdag/builtin/nodes/composite_node.py +926 -0
- hexdag/builtin/nodes/data_node.py +201 -0
- hexdag/builtin/nodes/expression_node.py +487 -0
- hexdag/builtin/nodes/function_node.py +454 -0
- hexdag/builtin/nodes/llm_node.py +491 -0
- hexdag/builtin/nodes/loop_node.py +920 -0
- hexdag/builtin/nodes/mapped_input.py +518 -0
- hexdag/builtin/nodes/port_call_node.py +269 -0
- hexdag/builtin/nodes/tool_call_node.py +195 -0
- hexdag/builtin/nodes/tool_utils.py +390 -0
- hexdag/builtin/prompts/__init__.py +68 -0
- hexdag/builtin/prompts/base.py +422 -0
- hexdag/builtin/prompts/chat_prompts.py +303 -0
- hexdag/builtin/prompts/error_correction_prompts.py +320 -0
- hexdag/builtin/prompts/tool_prompts.py +160 -0
- hexdag/builtin/tools/builtin_tools.py +84 -0
- hexdag/builtin/tools/database_tools.py +164 -0
- hexdag/cli/__init__.py +17 -0
- hexdag/cli/__main__.py +7 -0
- hexdag/cli/commands/__init__.py +27 -0
- hexdag/cli/commands/build_cmd.py +812 -0
- hexdag/cli/commands/create_cmd.py +208 -0
- hexdag/cli/commands/docs_cmd.py +293 -0
- hexdag/cli/commands/generate_types_cmd.py +252 -0
- hexdag/cli/commands/init_cmd.py +188 -0
- hexdag/cli/commands/pipeline_cmd.py +494 -0
- hexdag/cli/commands/plugin_dev_cmd.py +529 -0
- hexdag/cli/commands/plugins_cmd.py +441 -0
- hexdag/cli/commands/studio_cmd.py +101 -0
- hexdag/cli/commands/validate_cmd.py +221 -0
- hexdag/cli/main.py +84 -0
- hexdag/core/__init__.py +83 -0
- hexdag/core/config/__init__.py +20 -0
- hexdag/core/config/loader.py +479 -0
- hexdag/core/config/models.py +150 -0
- hexdag/core/configurable.py +294 -0
- hexdag/core/context/__init__.py +37 -0
- hexdag/core/context/execution_context.py +378 -0
- hexdag/core/docs/__init__.py +26 -0
- hexdag/core/docs/extractors.py +678 -0
- hexdag/core/docs/generators.py +890 -0
- hexdag/core/docs/models.py +120 -0
- hexdag/core/domain/__init__.py +10 -0
- hexdag/core/domain/dag.py +1225 -0
- hexdag/core/exceptions.py +234 -0
- hexdag/core/expression_parser.py +569 -0
- hexdag/core/logging.py +449 -0
- hexdag/core/models/__init__.py +17 -0
- hexdag/core/models/base.py +138 -0
- hexdag/core/orchestration/__init__.py +46 -0
- hexdag/core/orchestration/body_executor.py +481 -0
- hexdag/core/orchestration/components/__init__.py +97 -0
- hexdag/core/orchestration/components/adapter_lifecycle_manager.py +113 -0
- hexdag/core/orchestration/components/checkpoint_manager.py +134 -0
- hexdag/core/orchestration/components/execution_coordinator.py +360 -0
- hexdag/core/orchestration/components/health_check_manager.py +176 -0
- hexdag/core/orchestration/components/input_mapper.py +143 -0
- hexdag/core/orchestration/components/lifecycle_manager.py +583 -0
- hexdag/core/orchestration/components/node_executor.py +377 -0
- hexdag/core/orchestration/components/secret_manager.py +202 -0
- hexdag/core/orchestration/components/wave_executor.py +158 -0
- hexdag/core/orchestration/constants.py +17 -0
- hexdag/core/orchestration/events/README.md +312 -0
- hexdag/core/orchestration/events/__init__.py +104 -0
- hexdag/core/orchestration/events/batching.py +330 -0
- hexdag/core/orchestration/events/decorators.py +139 -0
- hexdag/core/orchestration/events/events.py +573 -0
- hexdag/core/orchestration/events/observers/__init__.py +30 -0
- hexdag/core/orchestration/events/observers/core_observers.py +690 -0
- hexdag/core/orchestration/events/observers/models.py +111 -0
- hexdag/core/orchestration/events/taxonomy.py +269 -0
- hexdag/core/orchestration/hook_context.py +237 -0
- hexdag/core/orchestration/hooks.py +437 -0
- hexdag/core/orchestration/models.py +418 -0
- hexdag/core/orchestration/orchestrator.py +910 -0
- hexdag/core/orchestration/orchestrator_factory.py +275 -0
- hexdag/core/orchestration/port_wrappers.py +327 -0
- hexdag/core/orchestration/prompt/__init__.py +32 -0
- hexdag/core/orchestration/prompt/template.py +332 -0
- hexdag/core/pipeline_builder/__init__.py +21 -0
- hexdag/core/pipeline_builder/component_instantiator.py +386 -0
- hexdag/core/pipeline_builder/include_tag.py +265 -0
- hexdag/core/pipeline_builder/pipeline_config.py +133 -0
- hexdag/core/pipeline_builder/py_tag.py +223 -0
- hexdag/core/pipeline_builder/tag_discovery.py +268 -0
- hexdag/core/pipeline_builder/yaml_builder.py +1196 -0
- hexdag/core/pipeline_builder/yaml_validator.py +569 -0
- hexdag/core/ports/__init__.py +65 -0
- hexdag/core/ports/api_call.py +133 -0
- hexdag/core/ports/database.py +489 -0
- hexdag/core/ports/embedding.py +215 -0
- hexdag/core/ports/executor.py +237 -0
- hexdag/core/ports/file_storage.py +117 -0
- hexdag/core/ports/healthcheck.py +87 -0
- hexdag/core/ports/llm.py +551 -0
- hexdag/core/ports/memory.py +70 -0
- hexdag/core/ports/observer_manager.py +130 -0
- hexdag/core/ports/secret.py +145 -0
- hexdag/core/ports/tool_router.py +94 -0
- hexdag/core/ports_builder.py +623 -0
- hexdag/core/protocols.py +273 -0
- hexdag/core/resolver.py +304 -0
- hexdag/core/schema/__init__.py +9 -0
- hexdag/core/schema/generator.py +742 -0
- hexdag/core/secrets.py +242 -0
- hexdag/core/types.py +413 -0
- hexdag/core/utils/async_warnings.py +206 -0
- hexdag/core/utils/schema_conversion.py +78 -0
- hexdag/core/utils/sql_validation.py +86 -0
- hexdag/core/validation/secure_json.py +148 -0
- hexdag/core/yaml_macro.py +517 -0
- hexdag/mcp_server.py +3120 -0
- hexdag/studio/__init__.py +10 -0
- hexdag/studio/build_ui.py +92 -0
- hexdag/studio/server/__init__.py +1 -0
- hexdag/studio/server/main.py +100 -0
- hexdag/studio/server/routes/__init__.py +9 -0
- hexdag/studio/server/routes/execute.py +208 -0
- hexdag/studio/server/routes/export.py +558 -0
- hexdag/studio/server/routes/files.py +207 -0
- hexdag/studio/server/routes/plugins.py +419 -0
- hexdag/studio/server/routes/validate.py +220 -0
- hexdag/studio/ui/index.html +13 -0
- hexdag/studio/ui/package-lock.json +2992 -0
- hexdag/studio/ui/package.json +31 -0
- hexdag/studio/ui/postcss.config.js +6 -0
- hexdag/studio/ui/public/hexdag.svg +5 -0
- hexdag/studio/ui/src/App.tsx +251 -0
- hexdag/studio/ui/src/components/Canvas.tsx +408 -0
- hexdag/studio/ui/src/components/ContextMenu.tsx +187 -0
- hexdag/studio/ui/src/components/FileBrowser.tsx +123 -0
- hexdag/studio/ui/src/components/Header.tsx +181 -0
- hexdag/studio/ui/src/components/HexdagNode.tsx +193 -0
- hexdag/studio/ui/src/components/NodeInspector.tsx +512 -0
- hexdag/studio/ui/src/components/NodePalette.tsx +262 -0
- hexdag/studio/ui/src/components/NodePortsSection.tsx +403 -0
- hexdag/studio/ui/src/components/PluginManager.tsx +347 -0
- hexdag/studio/ui/src/components/PortsEditor.tsx +481 -0
- hexdag/studio/ui/src/components/PythonEditor.tsx +195 -0
- hexdag/studio/ui/src/components/ValidationPanel.tsx +105 -0
- hexdag/studio/ui/src/components/YamlEditor.tsx +196 -0
- hexdag/studio/ui/src/components/index.ts +8 -0
- hexdag/studio/ui/src/index.css +92 -0
- hexdag/studio/ui/src/main.tsx +10 -0
- hexdag/studio/ui/src/types/index.ts +123 -0
- hexdag/studio/ui/src/vite-env.d.ts +1 -0
- hexdag/studio/ui/tailwind.config.js +29 -0
- hexdag/studio/ui/tsconfig.json +37 -0
- hexdag/studio/ui/tsconfig.node.json +13 -0
- hexdag/studio/ui/vite.config.ts +35 -0
- hexdag/visualization/__init__.py +69 -0
- hexdag/visualization/dag_visualizer.py +1020 -0
- hexdag-0.5.0.dev1.dist-info/METADATA +369 -0
- hexdag-0.5.0.dev1.dist-info/RECORD +261 -0
- hexdag-0.5.0.dev1.dist-info/WHEEL +4 -0
- hexdag-0.5.0.dev1.dist-info/entry_points.txt +4 -0
- hexdag-0.5.0.dev1.dist-info/licenses/LICENSE +190 -0
- hexdag_plugins/.gitignore +43 -0
- hexdag_plugins/README.md +73 -0
- hexdag_plugins/__init__.py +1 -0
- hexdag_plugins/azure/LICENSE +21 -0
- hexdag_plugins/azure/README.md +414 -0
- hexdag_plugins/azure/__init__.py +21 -0
- hexdag_plugins/azure/azure_blob_adapter.py +450 -0
- hexdag_plugins/azure/azure_cosmos_adapter.py +383 -0
- hexdag_plugins/azure/azure_keyvault_adapter.py +314 -0
- hexdag_plugins/azure/azure_openai_adapter.py +415 -0
- hexdag_plugins/azure/pyproject.toml +107 -0
- hexdag_plugins/azure/tests/__init__.py +1 -0
- hexdag_plugins/azure/tests/test_azure_blob_adapter.py +350 -0
- hexdag_plugins/azure/tests/test_azure_cosmos_adapter.py +323 -0
- hexdag_plugins/azure/tests/test_azure_keyvault_adapter.py +330 -0
- hexdag_plugins/azure/tests/test_azure_openai_adapter.py +329 -0
- hexdag_plugins/hexdag_etl/README.md +168 -0
- hexdag_plugins/hexdag_etl/__init__.py +53 -0
- hexdag_plugins/hexdag_etl/examples/01_simple_pandas_transform.py +270 -0
- hexdag_plugins/hexdag_etl/examples/02_simple_pandas_only.py +149 -0
- hexdag_plugins/hexdag_etl/examples/03_file_io_pipeline.py +109 -0
- hexdag_plugins/hexdag_etl/examples/test_pandas_transform.py +84 -0
- hexdag_plugins/hexdag_etl/hexdag.toml +25 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/__init__.py +48 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/__init__.py +13 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/api_extract.py +230 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/base_node_factory.py +181 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/file_io.py +415 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/outlook.py +492 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/pandas_transform.py +563 -0
- hexdag_plugins/hexdag_etl/hexdag_etl/nodes/sql_extract_load.py +112 -0
- hexdag_plugins/hexdag_etl/pyproject.toml +82 -0
- hexdag_plugins/hexdag_etl/test_transform.py +54 -0
- hexdag_plugins/hexdag_etl/tests/test_plugin_integration.py +62 -0
- hexdag_plugins/mysql_adapter/LICENSE +21 -0
- hexdag_plugins/mysql_adapter/README.md +224 -0
- hexdag_plugins/mysql_adapter/__init__.py +6 -0
- hexdag_plugins/mysql_adapter/mysql_adapter.py +408 -0
- hexdag_plugins/mysql_adapter/pyproject.toml +93 -0
- hexdag_plugins/mysql_adapter/tests/test_mysql_adapter.py +259 -0
- hexdag_plugins/storage/README.md +184 -0
- hexdag_plugins/storage/__init__.py +19 -0
- hexdag_plugins/storage/file/__init__.py +5 -0
- hexdag_plugins/storage/file/local.py +325 -0
- hexdag_plugins/storage/ports/__init__.py +5 -0
- hexdag_plugins/storage/ports/vector_store.py +236 -0
- hexdag_plugins/storage/sql/__init__.py +7 -0
- hexdag_plugins/storage/sql/base.py +187 -0
- hexdag_plugins/storage/sql/mysql.py +27 -0
- hexdag_plugins/storage/sql/postgresql.py +27 -0
- hexdag_plugins/storage/tests/__init__.py +1 -0
- hexdag_plugins/storage/tests/test_local_file_storage.py +161 -0
- hexdag_plugins/storage/tests/test_sql_adapters.py +212 -0
- hexdag_plugins/storage/vector/__init__.py +7 -0
- hexdag_plugins/storage/vector/chromadb.py +223 -0
- hexdag_plugins/storage/vector/in_memory.py +285 -0
- hexdag_plugins/storage/vector/pgvector.py +502 -0
hexdag/core/ports/llm.py
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
1
|
+
"""Port interface definitions for Large Language Models (LLMs)."""
|
|
2
|
+
|
|
3
|
+
from abc import abstractmethod
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from hexdag.core.ports.healthcheck import HealthStatus
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Message(BaseModel):
|
|
13
|
+
"""A single message in a conversation."""
|
|
14
|
+
|
|
15
|
+
role: str
|
|
16
|
+
content: str
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
MessageList = list[Message]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ToolCall(BaseModel):
|
|
23
|
+
"""A tool call made by the LLM."""
|
|
24
|
+
|
|
25
|
+
id: str
|
|
26
|
+
name: str
|
|
27
|
+
arguments: dict[str, Any]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class LLMResponse(BaseModel):
|
|
31
|
+
"""Response from LLM with optional tool calls."""
|
|
32
|
+
|
|
33
|
+
content: str | None
|
|
34
|
+
tool_calls: list[ToolCall] | None = None
|
|
35
|
+
finish_reason: str | None = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@runtime_checkable
|
|
39
|
+
class LLM(Protocol):
|
|
40
|
+
"""Port interface for Large Language Models (LLMs).
|
|
41
|
+
|
|
42
|
+
LLMs provide natural language generation and/or embedding capabilities.
|
|
43
|
+
Implementations may use various backends (OpenAI, Anthropic, local models, etc.).
|
|
44
|
+
|
|
45
|
+
At least ONE of the following protocols must be implemented:
|
|
46
|
+
- **SupportsGeneration**: Text generation capabilities (most common)
|
|
47
|
+
- **SupportsEmbedding**: Embedding generation capabilities
|
|
48
|
+
|
|
49
|
+
Optional Capabilities
|
|
50
|
+
---------------------
|
|
51
|
+
Adapters may optionally implement additional protocols:
|
|
52
|
+
|
|
53
|
+
- **SupportsGeneration**: Text generation (most LLM adapters)
|
|
54
|
+
- aresponse(): Generate text from messages
|
|
55
|
+
|
|
56
|
+
- **SupportsFunctionCalling**: Native tool calling (OpenAI/Anthropic style)
|
|
57
|
+
- aresponse_with_tools(): Generate responses with tool calls
|
|
58
|
+
|
|
59
|
+
- **SupportsVision**: Multimodal vision capabilities
|
|
60
|
+
- aresponse_with_vision(): Process images alongside text
|
|
61
|
+
- aresponse_with_vision_and_tools(): Vision + tool calling
|
|
62
|
+
|
|
63
|
+
- **SupportsEmbedding**: Embedding generation (unified LLM+embedding adapters)
|
|
64
|
+
- aembed(): Generate text embeddings
|
|
65
|
+
- aembed_batch(): Batch text embeddings
|
|
66
|
+
- aembed_image(): Generate image embeddings (if supported)
|
|
67
|
+
- aembed_image_batch(): Batch image embeddings (if supported)
|
|
68
|
+
|
|
69
|
+
- **Health Checks**: Connectivity monitoring
|
|
70
|
+
- ahealth_check(): Verify API connectivity and availability
|
|
71
|
+
|
|
72
|
+
Examples
|
|
73
|
+
--------
|
|
74
|
+
Unified adapter (text generation + embeddings)::
|
|
75
|
+
|
|
76
|
+
@adapter("llm", name="unified")
|
|
77
|
+
class UnifiedAdapter(LLM, SupportsGeneration, SupportsEmbedding):
|
|
78
|
+
async def aresponse(self, messages):
|
|
79
|
+
# Text generation
|
|
80
|
+
...
|
|
81
|
+
|
|
82
|
+
async def aembed(self, text):
|
|
83
|
+
# Embedding generation
|
|
84
|
+
...
|
|
85
|
+
|
|
86
|
+
Pure embedding adapter::
|
|
87
|
+
|
|
88
|
+
@adapter("llm", name="embeddings_only")
|
|
89
|
+
class EmbeddingAdapter(LLM, SupportsEmbedding):
|
|
90
|
+
async def aembed(self, text):
|
|
91
|
+
# Only embeddings
|
|
92
|
+
...
|
|
93
|
+
|
|
94
|
+
Pure text generation adapter::
|
|
95
|
+
|
|
96
|
+
@adapter("llm", name="text_only")
|
|
97
|
+
class TextAdapter(LLM, SupportsGeneration):
|
|
98
|
+
async def aresponse(self, messages):
|
|
99
|
+
# Only text generation
|
|
100
|
+
...
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
# No required methods - adapters must implement at least one protocol
|
|
104
|
+
pass
|
|
105
|
+
|
|
106
|
+
async def aresponse_with_tools(
|
|
107
|
+
self,
|
|
108
|
+
messages: MessageList,
|
|
109
|
+
tools: list[dict[str, Any]],
|
|
110
|
+
tool_choice: str | dict[str, Any] = "auto",
|
|
111
|
+
) -> LLMResponse:
|
|
112
|
+
"""Generate response with native tool calling support (optional).
|
|
113
|
+
|
|
114
|
+
This method enables native tool calling for LLM providers that support it
|
|
115
|
+
(OpenAI, Anthropic, Gemini, etc.). If not implemented, the framework will
|
|
116
|
+
fall back to text-based tool calling using INVOKE_TOOL: directives.
|
|
117
|
+
|
|
118
|
+
Args
|
|
119
|
+
----
|
|
120
|
+
messages: Conversation messages
|
|
121
|
+
tools: Tool definitions in provider-specific format
|
|
122
|
+
tool_choice: Tool selection strategy ("auto", "none", or specific tool)
|
|
123
|
+
|
|
124
|
+
Returns
|
|
125
|
+
-------
|
|
126
|
+
LLMResponse
|
|
127
|
+
Response with content and optional tool calls
|
|
128
|
+
|
|
129
|
+
Examples
|
|
130
|
+
--------
|
|
131
|
+
OpenAI-style tool calling::
|
|
132
|
+
|
|
133
|
+
tools = [{
|
|
134
|
+
"type": "function",
|
|
135
|
+
"function": {
|
|
136
|
+
"name": "search",
|
|
137
|
+
"description": "Search the web",
|
|
138
|
+
"parameters": {
|
|
139
|
+
"type": "object",
|
|
140
|
+
"properties": {"query": {"type": "string"}},
|
|
141
|
+
"required": ["query"]
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}]
|
|
145
|
+
|
|
146
|
+
response = await llm.aresponse_with_tools(messages, tools)
|
|
147
|
+
# response.content: "Let me search for that"
|
|
148
|
+
# response.tool_calls: [{"id": "call_123", "name": "search", "arguments": {...}}]
|
|
149
|
+
"""
|
|
150
|
+
...
|
|
151
|
+
|
|
152
|
+
async def ahealth_check(self) -> "HealthStatus":
|
|
153
|
+
"""Check LLM adapter health and connectivity (optional).
|
|
154
|
+
|
|
155
|
+
Adapters should verify:
|
|
156
|
+
- API connectivity to the LLM service
|
|
157
|
+
- Model availability
|
|
158
|
+
- Authentication status
|
|
159
|
+
- Rate limit status (if applicable)
|
|
160
|
+
|
|
161
|
+
This method is optional. If not implemented, the adapter will be
|
|
162
|
+
considered healthy by default.
|
|
163
|
+
|
|
164
|
+
Returns
|
|
165
|
+
-------
|
|
166
|
+
HealthStatus
|
|
167
|
+
Current health status with details about connectivity and availability
|
|
168
|
+
|
|
169
|
+
Examples
|
|
170
|
+
--------
|
|
171
|
+
OpenAI adapter health check::
|
|
172
|
+
|
|
173
|
+
status = await openai_adapter.ahealth_check()
|
|
174
|
+
status.status # "healthy", "degraded", or "unhealthy"
|
|
175
|
+
status.latency_ms # Time taken for health check
|
|
176
|
+
status.details # {"model": "gpt-4", "rate_limit_remaining": 100}
|
|
177
|
+
"""
|
|
178
|
+
...
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
@runtime_checkable
|
|
182
|
+
class SupportsGeneration(Protocol):
|
|
183
|
+
"""Optional protocol for LLMs that support text generation.
|
|
184
|
+
|
|
185
|
+
This protocol enables basic text generation from conversation messages.
|
|
186
|
+
Most LLM adapters will implement this protocol.
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
@abstractmethod
|
|
190
|
+
async def aresponse(self, messages: MessageList) -> str | None:
|
|
191
|
+
"""Generate a response from a list of messages (async).
|
|
192
|
+
|
|
193
|
+
Args
|
|
194
|
+
----
|
|
195
|
+
messages: List of role-message dicts, e.g. [{"role": "user", "content": "..."}]
|
|
196
|
+
|
|
197
|
+
Returns
|
|
198
|
+
-------
|
|
199
|
+
The generated response as a string, or None if failed.
|
|
200
|
+
|
|
201
|
+
Examples
|
|
202
|
+
--------
|
|
203
|
+
Basic text generation::
|
|
204
|
+
|
|
205
|
+
messages = [
|
|
206
|
+
Message(role="user", content="What is 2+2?")
|
|
207
|
+
]
|
|
208
|
+
response = await llm.aresponse(messages)
|
|
209
|
+
# Returns: "2+2 equals 4."
|
|
210
|
+
"""
|
|
211
|
+
...
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
@runtime_checkable
|
|
215
|
+
class SupportsFunctionCalling(Protocol):
|
|
216
|
+
"""Optional protocol for LLMs that support native function/tool calling.
|
|
217
|
+
|
|
218
|
+
This protocol enables structured function calling with automatic parsing,
|
|
219
|
+
commonly used by OpenAI, Anthropic, Google, and other modern LLM providers.
|
|
220
|
+
"""
|
|
221
|
+
|
|
222
|
+
async def aresponse_with_tools(
|
|
223
|
+
self,
|
|
224
|
+
messages: MessageList,
|
|
225
|
+
tools: list[dict[str, Any]],
|
|
226
|
+
tool_choice: str | dict[str, Any] = "auto",
|
|
227
|
+
) -> LLMResponse:
|
|
228
|
+
"""Generate response with native tool calling support.
|
|
229
|
+
|
|
230
|
+
This method enables native tool calling for LLM providers that support it
|
|
231
|
+
(OpenAI, Anthropic, Gemini, etc.). If not implemented, the framework will
|
|
232
|
+
fall back to text-based tool calling using INVOKE_TOOL: directives.
|
|
233
|
+
|
|
234
|
+
Args
|
|
235
|
+
----
|
|
236
|
+
messages: Conversation messages
|
|
237
|
+
tools: Tool definitions in provider-specific format
|
|
238
|
+
tool_choice: Tool selection strategy ("auto", "none", or specific tool)
|
|
239
|
+
|
|
240
|
+
Returns
|
|
241
|
+
-------
|
|
242
|
+
LLMResponse
|
|
243
|
+
Response with content and optional tool calls
|
|
244
|
+
|
|
245
|
+
Examples
|
|
246
|
+
--------
|
|
247
|
+
OpenAI-style tool calling::
|
|
248
|
+
|
|
249
|
+
tools = [{
|
|
250
|
+
"type": "function",
|
|
251
|
+
"function": {
|
|
252
|
+
"name": "search",
|
|
253
|
+
"description": "Search the web",
|
|
254
|
+
"parameters": {
|
|
255
|
+
"type": "object",
|
|
256
|
+
"properties": {"query": {"type": "string"}},
|
|
257
|
+
"required": ["query"]
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
}]
|
|
261
|
+
|
|
262
|
+
response = await llm.aresponse_with_tools(messages, tools)
|
|
263
|
+
# response.content: "Let me search for that"
|
|
264
|
+
# response.tool_calls: [{"id": "call_123", "name": "search", "arguments": {...}}]
|
|
265
|
+
|
|
266
|
+
Force specific tool usage::
|
|
267
|
+
|
|
268
|
+
response = await llm.aresponse_with_tools(
|
|
269
|
+
messages,
|
|
270
|
+
tools,
|
|
271
|
+
tool_choice={"type": "function", "function": {"name": "search"}}
|
|
272
|
+
)
|
|
273
|
+
"""
|
|
274
|
+
...
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
class ImageContent(BaseModel):
|
|
278
|
+
"""Image content in a vision-enabled message."""
|
|
279
|
+
|
|
280
|
+
type: str = "image" # "image" or "image_url"
|
|
281
|
+
source: str | dict[str, Any] # URL, base64, or provider-specific format
|
|
282
|
+
detail: str = "auto" # "low", "high", or "auto" (for OpenAI)
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
class VisionMessage(BaseModel):
|
|
286
|
+
"""Message with optional image content for vision-enabled LLMs."""
|
|
287
|
+
|
|
288
|
+
role: str
|
|
289
|
+
content: str | list[dict[str, Any]] # Text or mixed text+image content
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
@runtime_checkable
|
|
293
|
+
class SupportsVision(Protocol):
|
|
294
|
+
"""Optional protocol for LLMs that support vision/multimodal capabilities.
|
|
295
|
+
|
|
296
|
+
This protocol enables processing images alongside text in conversations,
|
|
297
|
+
allowing LLMs to analyze, describe, and reason about visual content.
|
|
298
|
+
|
|
299
|
+
Optional Methods
|
|
300
|
+
----------------
|
|
301
|
+
Adapters may optionally implement:
|
|
302
|
+
- aresponse_with_vision_and_tools(): Vision + tool calling combined
|
|
303
|
+
"""
|
|
304
|
+
|
|
305
|
+
@abstractmethod
|
|
306
|
+
async def aresponse_with_vision(
|
|
307
|
+
self,
|
|
308
|
+
messages: list[VisionMessage],
|
|
309
|
+
max_tokens: int | None = None,
|
|
310
|
+
) -> str | None:
|
|
311
|
+
"""Generate response from messages containing images and text.
|
|
312
|
+
|
|
313
|
+
Args
|
|
314
|
+
----
|
|
315
|
+
messages: List of messages with optional image content
|
|
316
|
+
max_tokens: Optional maximum tokens in response
|
|
317
|
+
|
|
318
|
+
Returns
|
|
319
|
+
-------
|
|
320
|
+
Generated response text or None if failed
|
|
321
|
+
|
|
322
|
+
Examples
|
|
323
|
+
--------
|
|
324
|
+
Single image analysis::
|
|
325
|
+
|
|
326
|
+
messages = [
|
|
327
|
+
VisionMessage(
|
|
328
|
+
role="user",
|
|
329
|
+
content=[
|
|
330
|
+
{"type": "text", "text": "What's in this image?"},
|
|
331
|
+
{
|
|
332
|
+
"type": "image_url",
|
|
333
|
+
"image_url": {"url": "https://example.com/image.jpg"}
|
|
334
|
+
}
|
|
335
|
+
]
|
|
336
|
+
)
|
|
337
|
+
]
|
|
338
|
+
response = await llm.aresponse_with_vision(messages)
|
|
339
|
+
|
|
340
|
+
Multiple images comparison::
|
|
341
|
+
|
|
342
|
+
messages = [
|
|
343
|
+
VisionMessage(
|
|
344
|
+
role="user",
|
|
345
|
+
content=[
|
|
346
|
+
{"type": "text", "text": "Compare these two images"},
|
|
347
|
+
{"type": "image_url", "image_url": {"url": "image1.jpg"}},
|
|
348
|
+
{"type": "image_url", "image_url": {"url": "image2.jpg"}}
|
|
349
|
+
]
|
|
350
|
+
)
|
|
351
|
+
]
|
|
352
|
+
|
|
353
|
+
Base64 encoded image::
|
|
354
|
+
|
|
355
|
+
messages = [
|
|
356
|
+
VisionMessage(
|
|
357
|
+
role="user",
|
|
358
|
+
content=[
|
|
359
|
+
{"type": "text", "text": "Describe this"},
|
|
360
|
+
{
|
|
361
|
+
"type": "image_url",
|
|
362
|
+
"image_url": {
|
|
363
|
+
"url": "data:image/jpeg;base64,/9j/4AAQ...",
|
|
364
|
+
"detail": "high"
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
]
|
|
368
|
+
)
|
|
369
|
+
]
|
|
370
|
+
"""
|
|
371
|
+
...
|
|
372
|
+
|
|
373
|
+
async def aresponse_with_vision_and_tools(
|
|
374
|
+
self,
|
|
375
|
+
messages: list[VisionMessage],
|
|
376
|
+
tools: list[dict[str, Any]],
|
|
377
|
+
tool_choice: str | dict[str, Any] = "auto",
|
|
378
|
+
max_tokens: int | None = None,
|
|
379
|
+
) -> LLMResponse:
|
|
380
|
+
"""Generate response with both vision and tool calling capabilities (optional).
|
|
381
|
+
|
|
382
|
+
Combines vision and function calling for advanced multimodal workflows,
|
|
383
|
+
allowing the LLM to analyze images and invoke tools based on visual content.
|
|
384
|
+
|
|
385
|
+
This method is optional. Not all vision models support native tool calling.
|
|
386
|
+
|
|
387
|
+
Args
|
|
388
|
+
----
|
|
389
|
+
messages: Messages with optional image content
|
|
390
|
+
tools: Tool definitions in provider-specific format
|
|
391
|
+
tool_choice: Tool selection strategy ("auto", "none", or specific tool)
|
|
392
|
+
max_tokens: Optional maximum tokens in response
|
|
393
|
+
|
|
394
|
+
Returns
|
|
395
|
+
-------
|
|
396
|
+
LLMResponse
|
|
397
|
+
Response with content and optional tool calls
|
|
398
|
+
|
|
399
|
+
Examples
|
|
400
|
+
--------
|
|
401
|
+
Image analysis with tool calls::
|
|
402
|
+
|
|
403
|
+
tools = [{
|
|
404
|
+
"type": "function",
|
|
405
|
+
"function": {
|
|
406
|
+
"name": "identify_product",
|
|
407
|
+
"description": "Look up product details",
|
|
408
|
+
"parameters": {
|
|
409
|
+
"type": "object",
|
|
410
|
+
"properties": {"product_name": {"type": "string"}},
|
|
411
|
+
"required": ["product_name"]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
}]
|
|
415
|
+
|
|
416
|
+
messages = [
|
|
417
|
+
VisionMessage(
|
|
418
|
+
role="user",
|
|
419
|
+
content=[
|
|
420
|
+
{"type": "text", "text": "What product is this and what's its price?"},
|
|
421
|
+
{"type": "image_url", "image_url": {"url": "product.jpg"}}
|
|
422
|
+
]
|
|
423
|
+
)
|
|
424
|
+
]
|
|
425
|
+
|
|
426
|
+
response = await llm.aresponse_with_vision_and_tools(messages, tools)
|
|
427
|
+
# LLM sees image, identifies product, and calls identify_product tool
|
|
428
|
+
"""
|
|
429
|
+
...
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
type ImageInput = str | bytes
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
@runtime_checkable
|
|
436
|
+
class SupportsEmbedding(Protocol):
|
|
437
|
+
"""Optional protocol for LLMs that support embedding generation.
|
|
438
|
+
|
|
439
|
+
This protocol enables LLM providers to also serve as embedding adapters,
|
|
440
|
+
useful for unified API management when using services like OpenAI or Azure
|
|
441
|
+
that provide both text generation and embedding capabilities.
|
|
442
|
+
|
|
443
|
+
This allows a single adapter to implement both LLM and embedding functionality.
|
|
444
|
+
"""
|
|
445
|
+
|
|
446
|
+
@abstractmethod
|
|
447
|
+
async def aembed(self, text: str) -> list[float]:
|
|
448
|
+
"""Generate embedding vector for a single text input.
|
|
449
|
+
|
|
450
|
+
Args
|
|
451
|
+
----
|
|
452
|
+
text: Text string to embed
|
|
453
|
+
|
|
454
|
+
Returns
|
|
455
|
+
-------
|
|
456
|
+
List of floats representing the embedding vector
|
|
457
|
+
|
|
458
|
+
Examples
|
|
459
|
+
--------
|
|
460
|
+
Single text embedding::
|
|
461
|
+
|
|
462
|
+
embedding = await llm.aembed("Hello, world!")
|
|
463
|
+
# Returns: [0.123, -0.456, 0.789, ...]
|
|
464
|
+
"""
|
|
465
|
+
...
|
|
466
|
+
|
|
467
|
+
async def aembed_batch(self, texts: list[str]) -> list[list[float]]:
|
|
468
|
+
"""Generate embeddings for multiple texts efficiently (optional).
|
|
469
|
+
|
|
470
|
+
This method enables batch processing for improved performance when
|
|
471
|
+
embedding multiple texts. If not implemented, the framework will
|
|
472
|
+
fall back to sequential calls to aembed().
|
|
473
|
+
|
|
474
|
+
Args
|
|
475
|
+
----
|
|
476
|
+
texts: List of text strings to embed
|
|
477
|
+
|
|
478
|
+
Returns
|
|
479
|
+
-------
|
|
480
|
+
List of embedding vectors, one per input text
|
|
481
|
+
|
|
482
|
+
Examples
|
|
483
|
+
--------
|
|
484
|
+
Batch embedding::
|
|
485
|
+
|
|
486
|
+
texts = ["Hello", "World", "AI"]
|
|
487
|
+
embeddings = await llm.aembed_batch(texts)
|
|
488
|
+
# Returns: [[0.1, 0.2, ...], [0.3, 0.4, ...], [0.5, 0.6, ...]]
|
|
489
|
+
"""
|
|
490
|
+
...
|
|
491
|
+
|
|
492
|
+
async def aembed_image(self, image: ImageInput) -> list[float]:
|
|
493
|
+
"""Generate embedding vector for a single image input (optional).
|
|
494
|
+
|
|
495
|
+
This method enables image embedding for multimodal LLM providers
|
|
496
|
+
that support vision embeddings (e.g., OpenAI CLIP models).
|
|
497
|
+
|
|
498
|
+
Args
|
|
499
|
+
----
|
|
500
|
+
image: Image to embed, either as:
|
|
501
|
+
- str: File path to image or base64-encoded image data
|
|
502
|
+
- bytes: Raw image bytes
|
|
503
|
+
|
|
504
|
+
Returns
|
|
505
|
+
-------
|
|
506
|
+
List of floats representing the embedding vector
|
|
507
|
+
|
|
508
|
+
Examples
|
|
509
|
+
--------
|
|
510
|
+
Image embedding from file path::
|
|
511
|
+
|
|
512
|
+
embedding = await llm.aembed_image("/path/to/image.jpg")
|
|
513
|
+
# Returns: [0.123, -0.456, 0.789, ...]
|
|
514
|
+
|
|
515
|
+
Image embedding from bytes::
|
|
516
|
+
|
|
517
|
+
with open("image.jpg", "rb") as f:
|
|
518
|
+
image_bytes = f.read()
|
|
519
|
+
embedding = await llm.aembed_image(image_bytes)
|
|
520
|
+
"""
|
|
521
|
+
...
|
|
522
|
+
|
|
523
|
+
async def aembed_image_batch(self, images: list[ImageInput]) -> list[list[float]]:
|
|
524
|
+
"""Generate embeddings for multiple images efficiently (optional).
|
|
525
|
+
|
|
526
|
+
This method enables batch processing for improved performance when
|
|
527
|
+
embedding multiple images.
|
|
528
|
+
|
|
529
|
+
Args
|
|
530
|
+
----
|
|
531
|
+
images: List of images to embed, each can be:
|
|
532
|
+
- str: File path to image or base64-encoded image data
|
|
533
|
+
- bytes: Raw image bytes
|
|
534
|
+
|
|
535
|
+
Returns
|
|
536
|
+
-------
|
|
537
|
+
List of embedding vectors, one per input image
|
|
538
|
+
|
|
539
|
+
Examples
|
|
540
|
+
--------
|
|
541
|
+
Batch image embedding::
|
|
542
|
+
|
|
543
|
+
images = [
|
|
544
|
+
"/path/to/image1.jpg",
|
|
545
|
+
"/path/to/image2.png",
|
|
546
|
+
image_bytes
|
|
547
|
+
]
|
|
548
|
+
embeddings = await llm.aembed_image_batch(images)
|
|
549
|
+
# Returns: [[0.1, 0.2, ...], [0.3, 0.4, ...], [0.5, 0.6, ...]]
|
|
550
|
+
"""
|
|
551
|
+
...
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Port interface for Long Term Memory."""
|
|
2
|
+
|
|
3
|
+
from abc import abstractmethod
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from hexdag.core.ports.healthcheck import HealthStatus
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@runtime_checkable
|
|
11
|
+
class Memory(Protocol):
|
|
12
|
+
"""Protocol for long-term memory storage and retrieval.
|
|
13
|
+
|
|
14
|
+
Optional Methods
|
|
15
|
+
----------------
|
|
16
|
+
Adapters may optionally implement:
|
|
17
|
+
- ahealth_check(): Verify storage backend connectivity and availability
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
@abstractmethod
|
|
21
|
+
async def aget(self, key: str) -> Any:
|
|
22
|
+
"""Retrieve a value from long-term memory asynchronously.
|
|
23
|
+
|
|
24
|
+
Args
|
|
25
|
+
----
|
|
26
|
+
key: The key to retrieve
|
|
27
|
+
|
|
28
|
+
Returns
|
|
29
|
+
-------
|
|
30
|
+
The stored value, or None if key doesn't exist
|
|
31
|
+
"""
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
@abstractmethod
|
|
35
|
+
async def aset(self, key: str, value: Any) -> None:
|
|
36
|
+
"""Store a value in long-term memory asynchronously.
|
|
37
|
+
|
|
38
|
+
Args
|
|
39
|
+
----
|
|
40
|
+
key: The key to store under
|
|
41
|
+
value: The value to store
|
|
42
|
+
"""
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
async def ahealth_check(self) -> "HealthStatus":
|
|
46
|
+
"""Check memory storage backend health (optional).
|
|
47
|
+
|
|
48
|
+
Adapters should verify:
|
|
49
|
+
- Storage backend connectivity (database, file system, Redis, etc.)
|
|
50
|
+
- Read/write operations
|
|
51
|
+
- Storage capacity/availability
|
|
52
|
+
|
|
53
|
+
This method is optional. If not implemented, the adapter will be
|
|
54
|
+
considered healthy by default.
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
HealthStatus
|
|
59
|
+
Current health status with details about storage backend
|
|
60
|
+
|
|
61
|
+
Examples
|
|
62
|
+
--------
|
|
63
|
+
Example usage::
|
|
64
|
+
|
|
65
|
+
# Redis memory adapter health check
|
|
66
|
+
status = await redis_memory.ahealth_check()
|
|
67
|
+
status.status # "healthy", "degraded", or "unhealthy"
|
|
68
|
+
status.details # {"connected_clients": 5, "used_memory_mb": 128}
|
|
69
|
+
"""
|
|
70
|
+
...
|