hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. ham/__init__.py +10 -0
  2. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
  3. hammad_python-0.0.31.dist-info/RECORD +6 -0
  4. hammad/__init__.py +0 -84
  5. hammad/_internal.py +0 -256
  6. hammad/_main.py +0 -226
  7. hammad/cache/__init__.py +0 -40
  8. hammad/cache/base_cache.py +0 -181
  9. hammad/cache/cache.py +0 -169
  10. hammad/cache/decorators.py +0 -261
  11. hammad/cache/file_cache.py +0 -80
  12. hammad/cache/ttl_cache.py +0 -74
  13. hammad/cli/__init__.py +0 -33
  14. hammad/cli/animations.py +0 -573
  15. hammad/cli/plugins.py +0 -867
  16. hammad/cli/styles/__init__.py +0 -55
  17. hammad/cli/styles/settings.py +0 -139
  18. hammad/cli/styles/types.py +0 -358
  19. hammad/cli/styles/utils.py +0 -634
  20. hammad/data/__init__.py +0 -90
  21. hammad/data/collections/__init__.py +0 -49
  22. hammad/data/collections/collection.py +0 -326
  23. hammad/data/collections/indexes/__init__.py +0 -37
  24. hammad/data/collections/indexes/qdrant/__init__.py +0 -1
  25. hammad/data/collections/indexes/qdrant/index.py +0 -723
  26. hammad/data/collections/indexes/qdrant/settings.py +0 -94
  27. hammad/data/collections/indexes/qdrant/utils.py +0 -210
  28. hammad/data/collections/indexes/tantivy/__init__.py +0 -1
  29. hammad/data/collections/indexes/tantivy/index.py +0 -426
  30. hammad/data/collections/indexes/tantivy/settings.py +0 -40
  31. hammad/data/collections/indexes/tantivy/utils.py +0 -176
  32. hammad/data/configurations/__init__.py +0 -35
  33. hammad/data/configurations/configuration.py +0 -564
  34. hammad/data/models/__init__.py +0 -50
  35. hammad/data/models/extensions/__init__.py +0 -4
  36. hammad/data/models/extensions/pydantic/__init__.py +0 -42
  37. hammad/data/models/extensions/pydantic/converters.py +0 -759
  38. hammad/data/models/fields.py +0 -546
  39. hammad/data/models/model.py +0 -1078
  40. hammad/data/models/utils.py +0 -280
  41. hammad/data/sql/__init__.py +0 -24
  42. hammad/data/sql/database.py +0 -576
  43. hammad/data/sql/types.py +0 -127
  44. hammad/data/types/__init__.py +0 -75
  45. hammad/data/types/file.py +0 -431
  46. hammad/data/types/multimodal/__init__.py +0 -36
  47. hammad/data/types/multimodal/audio.py +0 -200
  48. hammad/data/types/multimodal/image.py +0 -182
  49. hammad/data/types/text.py +0 -1308
  50. hammad/formatting/__init__.py +0 -33
  51. hammad/formatting/json/__init__.py +0 -27
  52. hammad/formatting/json/converters.py +0 -158
  53. hammad/formatting/text/__init__.py +0 -63
  54. hammad/formatting/text/converters.py +0 -723
  55. hammad/formatting/text/markdown.py +0 -131
  56. hammad/formatting/yaml/__init__.py +0 -26
  57. hammad/formatting/yaml/converters.py +0 -5
  58. hammad/genai/__init__.py +0 -217
  59. hammad/genai/a2a/__init__.py +0 -32
  60. hammad/genai/a2a/workers.py +0 -552
  61. hammad/genai/agents/__init__.py +0 -59
  62. hammad/genai/agents/agent.py +0 -1973
  63. hammad/genai/agents/run.py +0 -1024
  64. hammad/genai/agents/types/__init__.py +0 -42
  65. hammad/genai/agents/types/agent_context.py +0 -13
  66. hammad/genai/agents/types/agent_event.py +0 -128
  67. hammad/genai/agents/types/agent_hooks.py +0 -220
  68. hammad/genai/agents/types/agent_messages.py +0 -31
  69. hammad/genai/agents/types/agent_response.py +0 -125
  70. hammad/genai/agents/types/agent_stream.py +0 -327
  71. hammad/genai/graphs/__init__.py +0 -125
  72. hammad/genai/graphs/_utils.py +0 -190
  73. hammad/genai/graphs/base.py +0 -1828
  74. hammad/genai/graphs/plugins.py +0 -316
  75. hammad/genai/graphs/types.py +0 -638
  76. hammad/genai/models/__init__.py +0 -1
  77. hammad/genai/models/embeddings/__init__.py +0 -43
  78. hammad/genai/models/embeddings/model.py +0 -226
  79. hammad/genai/models/embeddings/run.py +0 -163
  80. hammad/genai/models/embeddings/types/__init__.py +0 -37
  81. hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
  82. hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
  83. hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
  84. hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
  85. hammad/genai/models/language/__init__.py +0 -57
  86. hammad/genai/models/language/model.py +0 -1098
  87. hammad/genai/models/language/run.py +0 -878
  88. hammad/genai/models/language/types/__init__.py +0 -40
  89. hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
  90. hammad/genai/models/language/types/language_model_messages.py +0 -28
  91. hammad/genai/models/language/types/language_model_name.py +0 -239
  92. hammad/genai/models/language/types/language_model_request.py +0 -127
  93. hammad/genai/models/language/types/language_model_response.py +0 -217
  94. hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
  95. hammad/genai/models/language/types/language_model_settings.py +0 -89
  96. hammad/genai/models/language/types/language_model_stream.py +0 -600
  97. hammad/genai/models/language/utils/__init__.py +0 -28
  98. hammad/genai/models/language/utils/requests.py +0 -421
  99. hammad/genai/models/language/utils/structured_outputs.py +0 -135
  100. hammad/genai/models/model_provider.py +0 -4
  101. hammad/genai/models/multimodal.py +0 -47
  102. hammad/genai/models/reranking.py +0 -26
  103. hammad/genai/types/__init__.py +0 -1
  104. hammad/genai/types/base.py +0 -215
  105. hammad/genai/types/history.py +0 -290
  106. hammad/genai/types/tools.py +0 -507
  107. hammad/logging/__init__.py +0 -35
  108. hammad/logging/decorators.py +0 -834
  109. hammad/logging/logger.py +0 -1018
  110. hammad/mcp/__init__.py +0 -53
  111. hammad/mcp/client/__init__.py +0 -35
  112. hammad/mcp/client/client.py +0 -624
  113. hammad/mcp/client/client_service.py +0 -400
  114. hammad/mcp/client/settings.py +0 -178
  115. hammad/mcp/servers/__init__.py +0 -26
  116. hammad/mcp/servers/launcher.py +0 -1161
  117. hammad/runtime/__init__.py +0 -32
  118. hammad/runtime/decorators.py +0 -142
  119. hammad/runtime/run.py +0 -299
  120. hammad/service/__init__.py +0 -49
  121. hammad/service/create.py +0 -527
  122. hammad/service/decorators.py +0 -283
  123. hammad/types.py +0 -288
  124. hammad/typing/__init__.py +0 -435
  125. hammad/web/__init__.py +0 -43
  126. hammad/web/http/__init__.py +0 -1
  127. hammad/web/http/client.py +0 -944
  128. hammad/web/models.py +0 -275
  129. hammad/web/openapi/__init__.py +0 -1
  130. hammad/web/openapi/client.py +0 -740
  131. hammad/web/search/__init__.py +0 -1
  132. hammad/web/search/client.py +0 -1023
  133. hammad/web/utils.py +0 -472
  134. hammad_python-0.0.30.dist-info/RECORD +0 -135
  135. {hammad → ham}/py.typed +0 -0
  136. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
  137. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,552 +0,0 @@
1
- """hammad.genai.a2a.workers"""
2
-
3
- from typing import Union, Optional, Any, Dict, List, TYPE_CHECKING
4
- from contextlib import asynccontextmanager
5
- from collections.abc import AsyncIterator
6
- import uuid
7
-
8
- from fasta2a import FastA2A, Worker
9
- from fasta2a.broker import InMemoryBroker
10
- from fasta2a.storage import InMemoryStorage
11
- from fasta2a.schema import Artifact, Message, TaskIdParams, TaskSendParams, TextPart
12
-
13
- if TYPE_CHECKING:
14
- from ..agents.agent import Agent
15
- from ..graphs.base import BaseGraph
16
-
17
- __all__ = [
18
- "as_a2a_app",
19
- "GraphWorker",
20
- "AgentWorker",
21
- ]
22
-
23
-
24
- Context = List[Message]
25
- """The shape of the context stored in the storage."""
26
-
27
-
28
- class GraphWorker(Worker[Context]):
29
- """Worker implementation for BaseGraph instances."""
30
-
31
- def __init__(
32
- self,
33
- graph: "BaseGraph",
34
- storage: InMemoryStorage,
35
- broker: InMemoryBroker,
36
- state: Optional[Any] = None,
37
- **kwargs,
38
- ):
39
- """Initialize the GraphWorker.
40
-
41
- Args:
42
- graph: The BaseGraph instance to run
43
- storage: Storage backend for tasks and context
44
- broker: Broker for task scheduling
45
- state: Optional initial state for the graph
46
- **kwargs: Additional arguments passed to Worker
47
- """
48
- super().__init__(storage=storage, broker=broker, **kwargs)
49
- self.graph = graph
50
- self.state = state
51
-
52
- async def run_task(self, params: TaskSendParams) -> None:
53
- """Execute a task using the graph."""
54
- task = await self.storage.load_task(params["id"])
55
- assert task is not None
56
-
57
- await self.storage.update_task(task["id"], state="working")
58
-
59
- # Load context
60
- context = await self.storage.load_context(task["context_id"]) or []
61
- context.extend(task.get("history", []))
62
-
63
- # Build message history for the graph
64
- history = self.build_message_history(context)
65
-
66
- # Extract the user's message from the task
67
- user_message = ""
68
- for msg in task.get("history", []):
69
- if msg.get("role") == "user":
70
- # Get the text content from the message parts
71
- for part in msg.get("parts", []):
72
- if part.get("kind") == "text":
73
- user_message = part.get("text", "")
74
- break
75
- if user_message:
76
- break
77
-
78
- try:
79
- # Run the graph with the user message and history
80
- result = await self.graph.async_run(
81
- user_message, state=self.state, history=history
82
- )
83
-
84
- # Create response message
85
- message = Message(
86
- role="assistant",
87
- parts=[TextPart(text=str(result.output), kind="text")],
88
- kind="message",
89
- message_id=str(uuid.uuid4()),
90
- )
91
-
92
- # Update context with new message
93
- context.append(message)
94
-
95
- # Build artifacts from the result
96
- artifacts = self.build_artifacts(result)
97
-
98
- # Update storage
99
- await self.storage.update_context(task["context_id"], context)
100
- await self.storage.update_task(
101
- task["id"],
102
- state="completed",
103
- new_messages=[message],
104
- new_artifacts=artifacts,
105
- )
106
-
107
- except Exception as e:
108
- # Handle errors
109
- error_message = Message(
110
- role="assistant",
111
- parts=[TextPart(text=f"Error: {str(e)}", kind="text")],
112
- kind="message",
113
- message_id=str(uuid.uuid4()),
114
- )
115
-
116
- context.append(error_message)
117
- await self.storage.update_context(task["context_id"], context)
118
- await self.storage.update_task(
119
- task["id"],
120
- state="failed",
121
- new_messages=[error_message],
122
- new_artifacts=[],
123
- )
124
-
125
- async def cancel_task(self, params: TaskIdParams) -> None:
126
- """Cancel a running task."""
127
- # For now, just mark the task as cancelled
128
- await self.storage.update_task(params["id"], state="cancelled")
129
-
130
- def build_message_history(self, history: List[Message]) -> List[Dict[str, Any]]:
131
- """Convert A2A messages to graph message format."""
132
- messages = []
133
- for msg in history:
134
- role = msg.get("role", "user")
135
- content = ""
136
-
137
- # Extract text content from message parts
138
- for part in msg.get("parts", []):
139
- if part.get("kind") == "text":
140
- content = part.get("text", "")
141
- break
142
-
143
- if content:
144
- messages.append({"role": role, "content": content})
145
-
146
- return messages
147
-
148
- def build_artifacts(self, result: Any) -> List[Artifact]:
149
- """Build artifacts from graph execution result."""
150
- artifacts = []
151
-
152
- # Add the main output as an artifact
153
- if hasattr(result, "output"):
154
- artifacts.append(
155
- {
156
- "id": str(uuid.uuid4()),
157
- "type": "text",
158
- "data": str(result.output),
159
- "metadata": {
160
- "source": "graph_output",
161
- "model": getattr(result, "model", "unknown"),
162
- },
163
- }
164
- )
165
-
166
- # Add state as an artifact if available
167
- if hasattr(result, "state") and result.state is not None:
168
- artifacts.append(
169
- {
170
- "id": str(uuid.uuid4()),
171
- "type": "state",
172
- "data": str(result.state),
173
- "metadata": {"source": "graph_state"},
174
- }
175
- )
176
-
177
- # Add execution metadata
178
- if hasattr(result, "nodes_executed"):
179
- artifacts.append(
180
- {
181
- "id": str(uuid.uuid4()),
182
- "type": "metadata",
183
- "data": {
184
- "nodes_executed": result.nodes_executed,
185
- "start_node": getattr(result, "start_node", None),
186
- },
187
- "metadata": {"source": "graph_execution"},
188
- }
189
- )
190
-
191
- return artifacts
192
-
193
-
194
- class AgentWorker(Worker[Context]):
195
- """Worker implementation for Agent instances."""
196
-
197
- def __init__(
198
- self,
199
- agent: "Agent",
200
- storage: InMemoryStorage,
201
- broker: InMemoryBroker,
202
- context: Optional[Any] = None,
203
- **kwargs,
204
- ):
205
- """Initialize the AgentWorker.
206
-
207
- Args:
208
- agent: The Agent instance to run
209
- storage: Storage backend for tasks and context
210
- broker: Broker for task scheduling
211
- context: Optional initial context for the agent
212
- **kwargs: Additional arguments passed to Worker
213
- """
214
- super().__init__(storage=storage, broker=broker, **kwargs)
215
- self.agent = agent
216
- self.agent_context = context
217
-
218
- async def run_task(self, params: TaskSendParams) -> None:
219
- """Execute a task using the agent."""
220
- task = await self.storage.load_task(params["id"])
221
- assert task is not None
222
-
223
- await self.storage.update_task(task["id"], state="working")
224
-
225
- # Load context
226
- context = await self.storage.load_context(task["context_id"]) or []
227
- context.extend(task.get("history", []))
228
-
229
- # Build message history for the agent
230
- history = self.build_message_history(context)
231
-
232
- # Extract the user's message from the task
233
- user_message = ""
234
- for msg in task.get("history", []):
235
- if msg.get("role") == "user":
236
- # Get the text content from the message parts
237
- for part in msg.get("parts", []):
238
- if part.get("kind") == "text":
239
- user_message = part.get("text", "")
240
- break
241
- if user_message:
242
- break
243
-
244
- try:
245
- # Prepare messages for the agent
246
- messages = history if history else []
247
- if user_message:
248
- messages.append({"role": "user", "content": user_message})
249
-
250
- # Run the agent
251
- result = await self.agent.async_run(
252
- messages=messages, context=self.agent_context
253
- )
254
-
255
- # Create response message
256
- message = Message(
257
- role="assistant",
258
- parts=[TextPart(text=str(result.output), kind="text")],
259
- kind="message",
260
- message_id=str(uuid.uuid4()),
261
- )
262
-
263
- # Update context with new message
264
- context.append(message)
265
-
266
- # Build artifacts from the result
267
- artifacts = self.build_artifacts(result)
268
-
269
- # Update the agent context if it was modified
270
- if hasattr(result, "context") and result.context is not None:
271
- self.agent_context = result.context
272
-
273
- # Update storage
274
- await self.storage.update_context(task["context_id"], context)
275
- await self.storage.update_task(
276
- task["id"],
277
- state="completed",
278
- new_messages=[message],
279
- new_artifacts=artifacts,
280
- )
281
-
282
- except Exception as e:
283
- # Handle errors
284
- error_message = Message(
285
- role="assistant",
286
- parts=[TextPart(text=f"Error: {str(e)}", kind="text")],
287
- kind="message",
288
- message_id=str(uuid.uuid4()),
289
- )
290
-
291
- context.append(error_message)
292
- await self.storage.update_context(task["context_id"], context)
293
- await self.storage.update_task(
294
- task["id"],
295
- state="failed",
296
- new_messages=[error_message],
297
- new_artifacts=[],
298
- )
299
-
300
- async def cancel_task(self, params: TaskIdParams) -> None:
301
- """Cancel a running task."""
302
- # For now, just mark the task as cancelled
303
- await self.storage.update_task(params["id"], state="cancelled")
304
-
305
- def build_message_history(self, history: List[Message]) -> List[Dict[str, Any]]:
306
- """Convert A2A messages to agent message format."""
307
- messages = []
308
- for msg in history:
309
- role = msg.get("role", "user")
310
- content = ""
311
-
312
- # Extract text content from message parts
313
- for part in msg.get("parts", []):
314
- if part.get("kind") == "text":
315
- content = part.get("text", "")
316
- break
317
-
318
- if content:
319
- messages.append({"role": role, "content": content})
320
-
321
- return messages
322
-
323
- def build_artifacts(self, result: Any) -> List[Artifact]:
324
- """Build artifacts from agent execution result."""
325
- artifacts = []
326
-
327
- # Add the main output as an artifact
328
- if hasattr(result, "output"):
329
- artifacts.append(
330
- {
331
- "id": str(uuid.uuid4()),
332
- "type": "text",
333
- "data": str(result.output),
334
- "metadata": {
335
- "source": "agent_output",
336
- "model": getattr(result, "model", self.agent.model),
337
- },
338
- }
339
- )
340
-
341
- # Add context as an artifact if available
342
- if hasattr(result, "context") and result.context is not None:
343
- artifacts.append(
344
- {
345
- "id": str(uuid.uuid4()),
346
- "type": "context",
347
- "data": str(result.context),
348
- "metadata": {"source": "agent_context"},
349
- }
350
- )
351
-
352
- # Add steps/tool calls as artifacts
353
- if hasattr(result, "steps") and result.steps:
354
- for i, step in enumerate(result.steps):
355
- if hasattr(step, "tool_calls") and step.tool_calls:
356
- for tool_call in step.tool_calls:
357
- artifacts.append(
358
- {
359
- "id": str(uuid.uuid4()),
360
- "type": "tool_call",
361
- "data": {
362
- "tool": tool_call.function.name,
363
- "arguments": tool_call.function.arguments,
364
- "step": i + 1,
365
- },
366
- "metadata": {"source": "agent_tool_call"},
367
- }
368
- )
369
-
370
- return artifacts
371
-
372
-
373
- def as_a2a_app(
374
- instance: Union["Agent", "BaseGraph"],
375
- *,
376
- # Worker configuration
377
- state: Optional[Any] = None,
378
- context: Optional[Any] = None,
379
- # Storage and broker configuration
380
- storage: Optional[Any] = None,
381
- broker: Optional[Any] = None,
382
- # Server configuration
383
- host: str = "0.0.0.0",
384
- port: int = 8000,
385
- reload: bool = False,
386
- workers: int = 1,
387
- log_level: str = "info",
388
- # A2A configuration
389
- name: Optional[str] = None,
390
- url: Optional[str] = None,
391
- version: str = "1.0.0",
392
- description: Optional[str] = None,
393
- # Advanced configuration
394
- lifespan_timeout: int = 30,
395
- **uvicorn_kwargs: Any,
396
- ) -> FastA2A:
397
- """
398
- Launch an Agent or BaseGraph as an A2A server.
399
-
400
- This function creates a fully parameterized A2A server that can handle
401
- requests for either Agent or BaseGraph instances. It sets up the necessary
402
- Worker, Storage, and Broker components automatically.
403
-
404
- Args:
405
- instance: Either an Agent or BaseGraph instance to serve
406
- state: Initial state for graphs (ignored for agents)
407
- context: Initial context for agents (ignored for graphs)
408
- storage: Custom storage backend (defaults to InMemoryStorage)
409
- broker: Custom broker backend (defaults to InMemoryBroker)
410
- host: Host to bind the server to
411
- port: Port to bind the server to
412
- reload: Enable auto-reload for development
413
- workers: Number of worker processes
414
- log_level: Logging level
415
- name: Agent name for the A2A server
416
- url: URL where the agent is hosted
417
- version: API version
418
- description: API description for the A2A server
419
- lifespan_timeout: Timeout for lifespan events
420
- **uvicorn_kwargs: Additional arguments passed to uvicorn
421
-
422
- Returns:
423
- FastA2A application instance
424
-
425
- Examples:
426
- Launch an agent as A2A server:
427
- ```python
428
- from hammad import Agent
429
-
430
- agent = Agent(
431
- name="assistant",
432
- instructions="You are a helpful assistant",
433
- model="openai/gpt-4"
434
- )
435
-
436
- app = as_a2a_app(agent, port=8080)
437
- # Run with: uvicorn module:app
438
- ```
439
-
440
- Launch a graph as A2A server:
441
- ```python
442
- from hammad import BaseGraph, action
443
-
444
- class MyGraph(BaseGraph):
445
- @action.start()
446
- def process(self, message: str) -> str:
447
- return f"Processed: {message}"
448
-
449
- graph = MyGraph()
450
- app = as_a2a_app(graph, name="My Graph API")
451
- # Run with: uvicorn module:app
452
- ```
453
-
454
- Run directly with uvicorn:
455
- ```python
456
- import uvicorn
457
-
458
- app = as_a2a_app(agent)
459
- uvicorn.run(app, host="0.0.0.0", port=8000)
460
- ```
461
- """
462
- # Import here to avoid circular imports
463
- from ..agents.agent import Agent
464
- from ..graphs.base import BaseGraph
465
-
466
- # Create storage and broker if not provided
467
- if storage is None:
468
- storage = InMemoryStorage()
469
- if broker is None:
470
- broker = InMemoryBroker()
471
-
472
- # Determine instance type and create appropriate worker
473
- if isinstance(instance, Agent):
474
- worker = AgentWorker(
475
- agent=instance, storage=storage, broker=broker, context=context
476
- )
477
- default_name = instance.name
478
- default_description = (
479
- instance.description or f"A2A server for {instance.name} agent"
480
- )
481
- elif isinstance(instance, BaseGraph):
482
- worker = GraphWorker(
483
- graph=instance, storage=storage, broker=broker, state=state
484
- )
485
- default_name = instance.__class__.__name__
486
- default_description = (
487
- instance.__class__.__doc__
488
- or f"A2A server for {instance.__class__.__name__} graph"
489
- )
490
- else:
491
- raise ValueError(
492
- f"Instance must be either an Agent or BaseGraph, got {type(instance)}"
493
- )
494
-
495
- # Use provided values or defaults
496
- agent_name = name or default_name
497
- agent_url = url or f"http://{host}:{port}"
498
- agent_description = description or default_description
499
-
500
- # Create lifespan context manager
501
- @asynccontextmanager
502
- async def lifespan(app: FastA2A) -> AsyncIterator[None]:
503
- """Lifespan context manager for the A2A server."""
504
- # Start the task manager
505
- async with app.task_manager:
506
- # Start the worker
507
- async with worker.run():
508
- yield
509
-
510
- # Create the FastA2A application with correct parameters
511
- app = FastA2A(
512
- storage=storage,
513
- broker=broker,
514
- lifespan=lifespan,
515
- name=agent_name,
516
- url=agent_url,
517
- version=version,
518
- description=agent_description,
519
- )
520
-
521
- # Store configuration for potential runtime access
522
- app.state.instance = instance
523
- app.state.worker = worker
524
- app.state.host = host
525
- app.state.port = port
526
- app.state.reload = reload
527
- app.state.workers = workers
528
- app.state.log_level = log_level
529
- app.state.uvicorn_kwargs = uvicorn_kwargs
530
-
531
- # Add a helper method to run the server directly
532
- def run_server():
533
- """Run the A2A server using uvicorn."""
534
- import uvicorn
535
-
536
- uvicorn_config = {
537
- "host": host,
538
- "port": port,
539
- "reload": reload,
540
- "workers": workers
541
- if not reload
542
- else 1, # Can't use multiple workers with reload
543
- "log_level": log_level,
544
- **uvicorn_kwargs,
545
- }
546
-
547
- uvicorn.run(app, **uvicorn_config)
548
-
549
- # Attach the run method to the app for convenience
550
- app.run_server = run_server
551
-
552
- return app
@@ -1,59 +0,0 @@
1
- """hammad.genai.agents"""
2
-
3
- from typing import TYPE_CHECKING
4
- from ..._internal import create_getattr_importer
5
-
6
-
7
- if TYPE_CHECKING:
8
- from .agent import (
9
- Agent,
10
- AgentSettings,
11
- create_agent,
12
- )
13
- from .run import run_agent, run_agent_iter, async_run_agent, async_run_agent_iter
14
-
15
- # Types
16
- from .types.agent_context import AgentContext
17
- from .types.agent_event import AgentEvent
18
- from .types.agent_hooks import HookManager, HookDecorator
19
- from .types.agent_messages import AgentMessages
20
- from .types.agent_response import (
21
- AgentResponse,
22
- _create_agent_response_from_language_model_response,
23
- )
24
- from .types.agent_stream import AgentStream, AgentResponseChunk
25
-
26
-
27
- __all__ = [
28
- # hammad.genai.agents.agent
29
- "Agent",
30
- "create_agent",
31
- "AgentSettings",
32
- # hammad.genai.agents.run
33
- "run_agent",
34
- "run_agent_iter",
35
- "async_run_agent",
36
- "async_run_agent_iter",
37
- # hammad.genai.agents.types.agent_context
38
- "AgentContext",
39
- # hammad.genai.agents.types.agent_event
40
- "AgentEvent",
41
- # hammad.genai.agents.types.agent_hooks
42
- "HookManager",
43
- "HookDecorator",
44
- # hammad.genai.agents.types.agent_messages
45
- "AgentMessages",
46
- # hammad.genai.agents.types.agent_response
47
- "AgentResponse",
48
- "_create_agent_response_from_language_model_response",
49
- # hammad.genai.agents.types.agent_stream
50
- "AgentStream",
51
- "AgentResponseChunk",
52
- ]
53
-
54
-
55
- __getattr__ = create_getattr_importer(__all__)
56
-
57
-
58
- def __dir__() -> list[str]:
59
- return __all__