hammad-python 0.0.24__py3-none-any.whl → 0.0.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,32 @@
1
+ """hammad.genai.a2a"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from ..._internal import create_getattr_importer
5
+
6
+
7
+ if TYPE_CHECKING:
8
+ from fasta2a import (
9
+ FastA2A
10
+ )
11
+ from .workers import (
12
+ as_a2a_app,
13
+ GraphWorker,
14
+ AgentWorker,
15
+ )
16
+
17
+
18
+ __all__ = (
19
+ # fasta2a
20
+ "FastA2A",
21
+ # hammad.genai.a2a.workers
22
+ "as_a2a_app",
23
+ "GraphWorker",
24
+ "AgentWorker",
25
+ )
26
+
27
+
28
+ __getattr__ = create_getattr_importer(__all__)
29
+
30
+
31
+ def __dir__() -> list[str]:
32
+ return list(__all__)
@@ -0,0 +1,552 @@
1
+ """hammad.genai.a2a.workers"""
2
+
3
+ from typing import Union, Optional, Any, Dict, List, TYPE_CHECKING
4
+ from contextlib import asynccontextmanager
5
+ from collections.abc import AsyncIterator
6
+ import uuid
7
+
8
+ from fasta2a import FastA2A, Worker
9
+ from fasta2a.broker import InMemoryBroker
10
+ from fasta2a.storage import InMemoryStorage
11
+ from fasta2a.schema import Artifact, Message, TaskIdParams, TaskSendParams, TextPart
12
+
13
+ if TYPE_CHECKING:
14
+ from ..agents.agent import Agent
15
+ from ..graphs.base import BaseGraph
16
+
17
+ __all__ = [
18
+ "as_a2a_app",
19
+ "GraphWorker",
20
+ "AgentWorker",
21
+ ]
22
+
23
+
24
+ Context = List[Message]
25
+ """The shape of the context stored in the storage."""
26
+
27
+
28
+ class GraphWorker(Worker[Context]):
29
+ """Worker implementation for BaseGraph instances."""
30
+
31
+ def __init__(
32
+ self,
33
+ graph: "BaseGraph",
34
+ storage: InMemoryStorage,
35
+ broker: InMemoryBroker,
36
+ state: Optional[Any] = None,
37
+ **kwargs,
38
+ ):
39
+ """Initialize the GraphWorker.
40
+
41
+ Args:
42
+ graph: The BaseGraph instance to run
43
+ storage: Storage backend for tasks and context
44
+ broker: Broker for task scheduling
45
+ state: Optional initial state for the graph
46
+ **kwargs: Additional arguments passed to Worker
47
+ """
48
+ super().__init__(storage=storage, broker=broker, **kwargs)
49
+ self.graph = graph
50
+ self.state = state
51
+
52
+ async def run_task(self, params: TaskSendParams) -> None:
53
+ """Execute a task using the graph."""
54
+ task = await self.storage.load_task(params["id"])
55
+ assert task is not None
56
+
57
+ await self.storage.update_task(task["id"], state="working")
58
+
59
+ # Load context
60
+ context = await self.storage.load_context(task["context_id"]) or []
61
+ context.extend(task.get("history", []))
62
+
63
+ # Build message history for the graph
64
+ history = self.build_message_history(context)
65
+
66
+ # Extract the user's message from the task
67
+ user_message = ""
68
+ for msg in task.get("history", []):
69
+ if msg.get("role") == "user":
70
+ # Get the text content from the message parts
71
+ for part in msg.get("parts", []):
72
+ if part.get("kind") == "text":
73
+ user_message = part.get("text", "")
74
+ break
75
+ if user_message:
76
+ break
77
+
78
+ try:
79
+ # Run the graph with the user message and history
80
+ result = await self.graph.async_run(
81
+ user_message, state=self.state, history=history
82
+ )
83
+
84
+ # Create response message
85
+ message = Message(
86
+ role="assistant",
87
+ parts=[TextPart(text=str(result.output), kind="text")],
88
+ kind="message",
89
+ message_id=str(uuid.uuid4()),
90
+ )
91
+
92
+ # Update context with new message
93
+ context.append(message)
94
+
95
+ # Build artifacts from the result
96
+ artifacts = self.build_artifacts(result)
97
+
98
+ # Update storage
99
+ await self.storage.update_context(task["context_id"], context)
100
+ await self.storage.update_task(
101
+ task["id"],
102
+ state="completed",
103
+ new_messages=[message],
104
+ new_artifacts=artifacts,
105
+ )
106
+
107
+ except Exception as e:
108
+ # Handle errors
109
+ error_message = Message(
110
+ role="assistant",
111
+ parts=[TextPart(text=f"Error: {str(e)}", kind="text")],
112
+ kind="message",
113
+ message_id=str(uuid.uuid4()),
114
+ )
115
+
116
+ context.append(error_message)
117
+ await self.storage.update_context(task["context_id"], context)
118
+ await self.storage.update_task(
119
+ task["id"],
120
+ state="failed",
121
+ new_messages=[error_message],
122
+ new_artifacts=[],
123
+ )
124
+
125
+ async def cancel_task(self, params: TaskIdParams) -> None:
126
+ """Cancel a running task."""
127
+ # For now, just mark the task as cancelled
128
+ await self.storage.update_task(params["id"], state="cancelled")
129
+
130
+ def build_message_history(self, history: List[Message]) -> List[Dict[str, Any]]:
131
+ """Convert A2A messages to graph message format."""
132
+ messages = []
133
+ for msg in history:
134
+ role = msg.get("role", "user")
135
+ content = ""
136
+
137
+ # Extract text content from message parts
138
+ for part in msg.get("parts", []):
139
+ if part.get("kind") == "text":
140
+ content = part.get("text", "")
141
+ break
142
+
143
+ if content:
144
+ messages.append({"role": role, "content": content})
145
+
146
+ return messages
147
+
148
+ def build_artifacts(self, result: Any) -> List[Artifact]:
149
+ """Build artifacts from graph execution result."""
150
+ artifacts = []
151
+
152
+ # Add the main output as an artifact
153
+ if hasattr(result, "output"):
154
+ artifacts.append(
155
+ {
156
+ "id": str(uuid.uuid4()),
157
+ "type": "text",
158
+ "data": str(result.output),
159
+ "metadata": {
160
+ "source": "graph_output",
161
+ "model": getattr(result, "model", "unknown"),
162
+ },
163
+ }
164
+ )
165
+
166
+ # Add state as an artifact if available
167
+ if hasattr(result, "state") and result.state is not None:
168
+ artifacts.append(
169
+ {
170
+ "id": str(uuid.uuid4()),
171
+ "type": "state",
172
+ "data": str(result.state),
173
+ "metadata": {"source": "graph_state"},
174
+ }
175
+ )
176
+
177
+ # Add execution metadata
178
+ if hasattr(result, "nodes_executed"):
179
+ artifacts.append(
180
+ {
181
+ "id": str(uuid.uuid4()),
182
+ "type": "metadata",
183
+ "data": {
184
+ "nodes_executed": result.nodes_executed,
185
+ "start_node": getattr(result, "start_node", None),
186
+ },
187
+ "metadata": {"source": "graph_execution"},
188
+ }
189
+ )
190
+
191
+ return artifacts
192
+
193
+
194
+ class AgentWorker(Worker[Context]):
195
+ """Worker implementation for Agent instances."""
196
+
197
+ def __init__(
198
+ self,
199
+ agent: "Agent",
200
+ storage: InMemoryStorage,
201
+ broker: InMemoryBroker,
202
+ context: Optional[Any] = None,
203
+ **kwargs,
204
+ ):
205
+ """Initialize the AgentWorker.
206
+
207
+ Args:
208
+ agent: The Agent instance to run
209
+ storage: Storage backend for tasks and context
210
+ broker: Broker for task scheduling
211
+ context: Optional initial context for the agent
212
+ **kwargs: Additional arguments passed to Worker
213
+ """
214
+ super().__init__(storage=storage, broker=broker, **kwargs)
215
+ self.agent = agent
216
+ self.agent_context = context
217
+
218
+ async def run_task(self, params: TaskSendParams) -> None:
219
+ """Execute a task using the agent."""
220
+ task = await self.storage.load_task(params["id"])
221
+ assert task is not None
222
+
223
+ await self.storage.update_task(task["id"], state="working")
224
+
225
+ # Load context
226
+ context = await self.storage.load_context(task["context_id"]) or []
227
+ context.extend(task.get("history", []))
228
+
229
+ # Build message history for the agent
230
+ history = self.build_message_history(context)
231
+
232
+ # Extract the user's message from the task
233
+ user_message = ""
234
+ for msg in task.get("history", []):
235
+ if msg.get("role") == "user":
236
+ # Get the text content from the message parts
237
+ for part in msg.get("parts", []):
238
+ if part.get("kind") == "text":
239
+ user_message = part.get("text", "")
240
+ break
241
+ if user_message:
242
+ break
243
+
244
+ try:
245
+ # Prepare messages for the agent
246
+ messages = history if history else []
247
+ if user_message:
248
+ messages.append({"role": "user", "content": user_message})
249
+
250
+ # Run the agent
251
+ result = await self.agent.async_run(
252
+ messages=messages, context=self.agent_context
253
+ )
254
+
255
+ # Create response message
256
+ message = Message(
257
+ role="assistant",
258
+ parts=[TextPart(text=str(result.output), kind="text")],
259
+ kind="message",
260
+ message_id=str(uuid.uuid4()),
261
+ )
262
+
263
+ # Update context with new message
264
+ context.append(message)
265
+
266
+ # Build artifacts from the result
267
+ artifacts = self.build_artifacts(result)
268
+
269
+ # Update the agent context if it was modified
270
+ if hasattr(result, "context") and result.context is not None:
271
+ self.agent_context = result.context
272
+
273
+ # Update storage
274
+ await self.storage.update_context(task["context_id"], context)
275
+ await self.storage.update_task(
276
+ task["id"],
277
+ state="completed",
278
+ new_messages=[message],
279
+ new_artifacts=artifacts,
280
+ )
281
+
282
+ except Exception as e:
283
+ # Handle errors
284
+ error_message = Message(
285
+ role="assistant",
286
+ parts=[TextPart(text=f"Error: {str(e)}", kind="text")],
287
+ kind="message",
288
+ message_id=str(uuid.uuid4()),
289
+ )
290
+
291
+ context.append(error_message)
292
+ await self.storage.update_context(task["context_id"], context)
293
+ await self.storage.update_task(
294
+ task["id"],
295
+ state="failed",
296
+ new_messages=[error_message],
297
+ new_artifacts=[],
298
+ )
299
+
300
+ async def cancel_task(self, params: TaskIdParams) -> None:
301
+ """Cancel a running task."""
302
+ # For now, just mark the task as cancelled
303
+ await self.storage.update_task(params["id"], state="cancelled")
304
+
305
+ def build_message_history(self, history: List[Message]) -> List[Dict[str, Any]]:
306
+ """Convert A2A messages to agent message format."""
307
+ messages = []
308
+ for msg in history:
309
+ role = msg.get("role", "user")
310
+ content = ""
311
+
312
+ # Extract text content from message parts
313
+ for part in msg.get("parts", []):
314
+ if part.get("kind") == "text":
315
+ content = part.get("text", "")
316
+ break
317
+
318
+ if content:
319
+ messages.append({"role": role, "content": content})
320
+
321
+ return messages
322
+
323
+ def build_artifacts(self, result: Any) -> List[Artifact]:
324
+ """Build artifacts from agent execution result."""
325
+ artifacts = []
326
+
327
+ # Add the main output as an artifact
328
+ if hasattr(result, "output"):
329
+ artifacts.append(
330
+ {
331
+ "id": str(uuid.uuid4()),
332
+ "type": "text",
333
+ "data": str(result.output),
334
+ "metadata": {
335
+ "source": "agent_output",
336
+ "model": getattr(result, "model", self.agent.model),
337
+ },
338
+ }
339
+ )
340
+
341
+ # Add context as an artifact if available
342
+ if hasattr(result, "context") and result.context is not None:
343
+ artifacts.append(
344
+ {
345
+ "id": str(uuid.uuid4()),
346
+ "type": "context",
347
+ "data": str(result.context),
348
+ "metadata": {"source": "agent_context"},
349
+ }
350
+ )
351
+
352
+ # Add steps/tool calls as artifacts
353
+ if hasattr(result, "steps") and result.steps:
354
+ for i, step in enumerate(result.steps):
355
+ if hasattr(step, "tool_calls") and step.tool_calls:
356
+ for tool_call in step.tool_calls:
357
+ artifacts.append(
358
+ {
359
+ "id": str(uuid.uuid4()),
360
+ "type": "tool_call",
361
+ "data": {
362
+ "tool": tool_call.function.name,
363
+ "arguments": tool_call.function.arguments,
364
+ "step": i + 1,
365
+ },
366
+ "metadata": {"source": "agent_tool_call"},
367
+ }
368
+ )
369
+
370
+ return artifacts
371
+
372
+
373
+ def as_a2a_app(
374
+ instance: Union["Agent", "BaseGraph"],
375
+ *,
376
+ # Worker configuration
377
+ state: Optional[Any] = None,
378
+ context: Optional[Any] = None,
379
+ # Storage and broker configuration
380
+ storage: Optional[Any] = None,
381
+ broker: Optional[Any] = None,
382
+ # Server configuration
383
+ host: str = "0.0.0.0",
384
+ port: int = 8000,
385
+ reload: bool = False,
386
+ workers: int = 1,
387
+ log_level: str = "info",
388
+ # A2A configuration
389
+ name: Optional[str] = None,
390
+ url: Optional[str] = None,
391
+ version: str = "1.0.0",
392
+ description: Optional[str] = None,
393
+ # Advanced configuration
394
+ lifespan_timeout: int = 30,
395
+ **uvicorn_kwargs: Any,
396
+ ) -> FastA2A:
397
+ """
398
+ Launch an Agent or BaseGraph as an A2A server.
399
+
400
+ This function creates a fully parameterized A2A server that can handle
401
+ requests for either Agent or BaseGraph instances. It sets up the necessary
402
+ Worker, Storage, and Broker components automatically.
403
+
404
+ Args:
405
+ instance: Either an Agent or BaseGraph instance to serve
406
+ state: Initial state for graphs (ignored for agents)
407
+ context: Initial context for agents (ignored for graphs)
408
+ storage: Custom storage backend (defaults to InMemoryStorage)
409
+ broker: Custom broker backend (defaults to InMemoryBroker)
410
+ host: Host to bind the server to
411
+ port: Port to bind the server to
412
+ reload: Enable auto-reload for development
413
+ workers: Number of worker processes
414
+ log_level: Logging level
415
+ name: Agent name for the A2A server
416
+ url: URL where the agent is hosted
417
+ version: API version
418
+ description: API description for the A2A server
419
+ lifespan_timeout: Timeout for lifespan events
420
+ **uvicorn_kwargs: Additional arguments passed to uvicorn
421
+
422
+ Returns:
423
+ FastA2A application instance
424
+
425
+ Examples:
426
+ Launch an agent as A2A server:
427
+ ```python
428
+ from hammad import Agent
429
+
430
+ agent = Agent(
431
+ name="assistant",
432
+ instructions="You are a helpful assistant",
433
+ model="openai/gpt-4"
434
+ )
435
+
436
+ app = as_a2a_app(agent, port=8080)
437
+ # Run with: uvicorn module:app
438
+ ```
439
+
440
+ Launch a graph as A2A server:
441
+ ```python
442
+ from hammad import BaseGraph, action
443
+
444
+ class MyGraph(BaseGraph):
445
+ @action.start()
446
+ def process(self, message: str) -> str:
447
+ return f"Processed: {message}"
448
+
449
+ graph = MyGraph()
450
+ app = as_a2a_app(graph, name="My Graph API")
451
+ # Run with: uvicorn module:app
452
+ ```
453
+
454
+ Run directly with uvicorn:
455
+ ```python
456
+ import uvicorn
457
+
458
+ app = as_a2a_app(agent)
459
+ uvicorn.run(app, host="0.0.0.0", port=8000)
460
+ ```
461
+ """
462
+ # Import here to avoid circular imports
463
+ from ..agents.agent import Agent
464
+ from ..graphs.base import BaseGraph
465
+
466
+ # Create storage and broker if not provided
467
+ if storage is None:
468
+ storage = InMemoryStorage()
469
+ if broker is None:
470
+ broker = InMemoryBroker()
471
+
472
+ # Determine instance type and create appropriate worker
473
+ if isinstance(instance, Agent):
474
+ worker = AgentWorker(
475
+ agent=instance, storage=storage, broker=broker, context=context
476
+ )
477
+ default_name = instance.name
478
+ default_description = (
479
+ instance.description or f"A2A server for {instance.name} agent"
480
+ )
481
+ elif isinstance(instance, BaseGraph):
482
+ worker = GraphWorker(
483
+ graph=instance, storage=storage, broker=broker, state=state
484
+ )
485
+ default_name = instance.__class__.__name__
486
+ default_description = (
487
+ instance.__class__.__doc__
488
+ or f"A2A server for {instance.__class__.__name__} graph"
489
+ )
490
+ else:
491
+ raise ValueError(
492
+ f"Instance must be either an Agent or BaseGraph, got {type(instance)}"
493
+ )
494
+
495
+ # Use provided values or defaults
496
+ agent_name = name or default_name
497
+ agent_url = url or f"http://{host}:{port}"
498
+ agent_description = description or default_description
499
+
500
+ # Create lifespan context manager
501
+ @asynccontextmanager
502
+ async def lifespan(app: FastA2A) -> AsyncIterator[None]:
503
+ """Lifespan context manager for the A2A server."""
504
+ # Start the task manager
505
+ async with app.task_manager:
506
+ # Start the worker
507
+ async with worker.run():
508
+ yield
509
+
510
+ # Create the FastA2A application with correct parameters
511
+ app = FastA2A(
512
+ storage=storage,
513
+ broker=broker,
514
+ lifespan=lifespan,
515
+ name=agent_name,
516
+ url=agent_url,
517
+ version=version,
518
+ description=agent_description,
519
+ )
520
+
521
+ # Store configuration for potential runtime access
522
+ app.state.instance = instance
523
+ app.state.worker = worker
524
+ app.state.host = host
525
+ app.state.port = port
526
+ app.state.reload = reload
527
+ app.state.workers = workers
528
+ app.state.log_level = log_level
529
+ app.state.uvicorn_kwargs = uvicorn_kwargs
530
+
531
+ # Add a helper method to run the server directly
532
+ def run_server():
533
+ """Run the A2A server using uvicorn."""
534
+ import uvicorn
535
+
536
+ uvicorn_config = {
537
+ "host": host,
538
+ "port": port,
539
+ "reload": reload,
540
+ "workers": workers
541
+ if not reload
542
+ else 1, # Can't use multiple workers with reload
543
+ "log_level": log_level,
544
+ **uvicorn_kwargs,
545
+ }
546
+
547
+ uvicorn.run(app, **uvicorn_config)
548
+
549
+ # Attach the run method to the app for convenience
550
+ app.run_server = run_server
551
+
552
+ return app
@@ -7,6 +7,7 @@ from ..._internal import create_getattr_importer
7
7
  if TYPE_CHECKING:
8
8
  from .agent import (
9
9
  Agent,
10
+ AgentSettings,
10
11
  create_agent,
11
12
  )
12
13
  from .run import run_agent, run_agent_iter, async_run_agent, async_run_agent_iter
@@ -27,6 +28,7 @@ __all__ = [
27
28
  # hammad.genai.agents.agent
28
29
  "Agent",
29
30
  "create_agent",
31
+ "AgentSettings",
30
32
  # hammad.genai.agents.run
31
33
  "run_agent",
32
34
  "run_agent_iter",