devduck 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of devduck might be problematic. Click here for more details.

devduck/tools/tcp.py CHANGED
@@ -1,38 +1,60 @@
1
- """TCP tool for Strands Agents to function as both server and client.
1
+ """TCP tool for DevDuck agents with real-time streaming support.
2
2
 
3
- This module provides TCP server and client functionality for Strands Agents,
4
- allowing them to communicate over TCP/IP networks. The tool runs server operations
5
- in background threads, enabling concurrent communication without blocking the main agent.
3
+ This module provides TCP server and client functionality for DevDuck agents,
4
+ allowing them to communicate over TCP/IP networks with real-time response streaming.
5
+ The tool runs server operations in background threads, enabling concurrent
6
+ communication without blocking the main agent.
6
7
 
7
8
  Key Features:
8
- 1. TCP Server: Listen for incoming connections and process them with an agent
9
- 2. TCP Client: Connect to remote TCP servers and exchange messages
10
- 3. Background Processing: Server runs in a background thread
11
- 4. Per-Connection Agents: Creates a fresh agent for each client connection
9
+ 1. TCP Server: Listen for incoming connections and process them with a DevDuck agent
10
+ 2. Real-time Streaming: Responses stream to clients as they're generated (non-blocking)
11
+ 3. TCP Client: Connect to remote TCP servers and exchange messages
12
+ 4. Background Processing: Server runs in a background thread
13
+ 5. Per-Connection DevDuck: Creates a fresh DevDuck instance for each client connection
14
+ 6. Callback Handler: Uses Strands callback system for efficient streaming
12
15
 
13
- Usage with Strands Agent:
16
+ How Streaming Works:
17
+ -------------------
18
+ Instead of blocking until the full response is ready, this implementation uses
19
+ Strands' callback_handler mechanism to stream data as it's generated:
14
20
 
15
- ```python
16
- from strands import Agent
17
- from strands_tools import tcp
21
+ - Text chunks stream immediately as the model generates them
22
+ - Tool invocations are announced in real-time
23
+ - Tool results are sent as they complete
24
+ - No buffering delays - everything is instant
25
+
26
+ Usage with DevDuck Agent:
18
27
 
19
- agent = Agent(tools=[tcp])
28
+ ```python
29
+ from devduck import devduck
20
30
 
21
- # Start a TCP server
22
- result = agent.tool.tcp(
31
+ # Start a streaming TCP server (each connection gets its own DevDuck instance)
32
+ result = devduck.agent.tool.tcp(
23
33
  action="start_server",
24
34
  host="127.0.0.1",
25
35
  port=8000,
26
36
  system_prompt="You are a helpful TCP server assistant.",
27
37
  )
28
38
 
29
- # Connect to a TCP server as client
30
- result = agent.tool.tcp(
31
- action="client_send", host="127.0.0.1", port=8000, message="Hello, server!"
39
+ # Connect as a client and receive streaming responses
40
+ result = devduck.agent.tool.tcp(
41
+ action="client_send",
42
+ host="127.0.0.1",
43
+ port=8000,
44
+ message="What's 2+2?"
32
45
  )
33
46
 
34
47
  # Stop the TCP server
35
- result = agent.tool.tcp(action="stop_server", port=8000)
48
+ result = devduck.agent.tool.tcp(action="stop_server", port=8000)
49
+ ```
50
+
51
+ For testing with netcat:
52
+ ```bash
53
+ # Start server from devduck
54
+ devduck "start a tcp server on port 8000"
55
+
56
+ # Connect with netcat and chat in real-time
57
+ nc localhost 8000
36
58
  ```
37
59
 
38
60
  See the tcp function docstring for more details on configuration options and parameters.
@@ -42,6 +64,7 @@ import logging
42
64
  import socket
43
65
  import threading
44
66
  import time
67
+ import os
45
68
  from typing import Any
46
69
 
47
70
  from strands import Agent, tool
@@ -52,44 +75,154 @@ logger = logging.getLogger(__name__)
52
75
  SERVER_THREADS: dict[int, dict[str, Any]] = {}
53
76
 
54
77
 
78
+ class TCPStreamingCallbackHandler:
79
+ """Callback handler that streams agent responses directly over TCP socket.
80
+
81
+ This handler implements real-time streaming of:
82
+ - Assistant responses (text chunks as they're generated)
83
+ - Tool invocations (names and status)
84
+ - Reasoning text (if enabled)
85
+ - Tool results (success/error status)
86
+
87
+ All data is sent immediately to the TCP client without buffering.
88
+ """
89
+
90
+ def __init__(self, client_socket: socket.socket):
91
+ """Initialize the streaming handler.
92
+
93
+ Args:
94
+ client_socket: The TCP socket to stream data to
95
+ """
96
+ self.socket = client_socket
97
+ self.tool_count = 0
98
+ self.previous_tool_use = None
99
+
100
+ def _send(self, data: str) -> None:
101
+ """Safely send data over TCP socket.
102
+
103
+ Args:
104
+ data: String data to send
105
+ """
106
+ try:
107
+ self.socket.sendall(data.encode())
108
+ except (BrokenPipeError, ConnectionResetError, OSError) as e:
109
+ logger.warning(f"Failed to send data over TCP: {e}")
110
+
111
+ def __call__(self, **kwargs: Any) -> None:
112
+ """Stream events to TCP socket in real-time.
113
+
114
+ Args:
115
+ **kwargs: Callback event data including:
116
+ - reasoningText (Optional[str]): Reasoning text to stream
117
+ - data (str): Text content to stream
118
+ - complete (bool): Whether this is the final chunk
119
+ - current_tool_use (dict): Current tool being invoked
120
+ - message (dict): Full message objects (for tool results)
121
+ """
122
+ reasoningText = kwargs.get("reasoningText", False)
123
+ data = kwargs.get("data", "")
124
+ complete = kwargs.get("complete", False)
125
+ current_tool_use = kwargs.get("current_tool_use", {})
126
+ message = kwargs.get("message", {})
127
+
128
+ # Skip reasoning text to keep output clean
129
+ if reasoningText:
130
+ self._send(reasoningText)
131
+
132
+ # Stream response text chunks
133
+ if data:
134
+ self._send(data)
135
+ if complete:
136
+ self._send("\n")
137
+
138
+ # Stream tool invocation notifications
139
+ if current_tool_use and current_tool_use.get("name"):
140
+ tool_name = current_tool_use.get("name", "Unknown tool")
141
+ if self.previous_tool_use != current_tool_use:
142
+ self.previous_tool_use = current_tool_use
143
+ self.tool_count += 1
144
+ self._send(f"\n🛠️ Tool #{self.tool_count}: {tool_name}\n")
145
+
146
+ # Stream tool results
147
+ if isinstance(message, dict) and message.get("role") == "user":
148
+ for content in message.get("content", []):
149
+ if isinstance(content, dict):
150
+ tool_result = content.get("toolResult")
151
+ if tool_result:
152
+ status = tool_result.get("status", "unknown")
153
+ if status == "success":
154
+ self._send(f"✅ Tool completed successfully\n")
155
+ else:
156
+ self._send(f"❌ Tool failed\n")
157
+
158
+
55
159
  def handle_client(
56
160
  client_socket: socket.socket,
57
161
  client_address: tuple,
58
162
  system_prompt: str,
59
163
  buffer_size: int,
60
- model: Any,
61
- parent_tools: list | None = None,
62
- callback_handler: Any = None,
63
- trace_attributes: dict | None = None,
64
164
  ) -> None:
65
- """Handle a client connection in the TCP server.
165
+ """Handle a client connection in the TCP server with streaming responses.
66
166
 
67
167
  Args:
68
168
  client_socket: The socket for the client connection
69
169
  client_address: The address of the client
70
170
  system_prompt: System prompt for creating a new agent for this connection
71
171
  buffer_size: Size of the message buffer
72
- model: Model instance from parent agent
73
- parent_tools: Tools inherited from the parent agent
74
- callback_handler: Callback handler from parent agent
75
- trace_attributes: Trace attributes from the parent agent
76
172
  """
77
173
  logger.info(f"Connection established with {client_address}")
78
174
 
79
- # Create a fresh agent instance for this client connection
80
- connection_agent = Agent(
81
- model=model,
82
- messages=[],
83
- tools=parent_tools or [],
84
- callback_handler=callback_handler,
85
- system_prompt=system_prompt,
86
- trace_attributes=trace_attributes or {},
87
- )
175
+ # Create a streaming callback handler for this connection
176
+ streaming_handler = TCPStreamingCallbackHandler(client_socket)
177
+
178
+ # Import DevDuck and create a new instance for this connection
179
+ # This gives us full DevDuck capabilities: system prompt building, self-healing, etc.
180
+ try:
181
+ from devduck import DevDuck
182
+
183
+ # Create a new DevDuck instance with auto_start_servers=False to avoid recursion
184
+ connection_devduck = DevDuck(auto_start_servers=False)
185
+
186
+ # Override the callback handler to enable streaming
187
+ if connection_devduck.agent:
188
+ connection_devduck.agent.callback_handler = streaming_handler
189
+
190
+ # Optionally override system prompt if provided
191
+ if system_prompt:
192
+ connection_devduck.agent.system_prompt += (
193
+ "\nCustom system prompt:" + system_prompt
194
+ )
195
+
196
+ connection_agent = connection_devduck.agent
197
+
198
+ except Exception as e:
199
+ logger.error(f"Failed to create DevDuck instance: {e}", exc_info=True)
200
+ # Fallback to basic Agent if DevDuck fails
201
+ from strands import Agent
202
+ from strands.models.ollama import OllamaModel
203
+
204
+ agent_model = OllamaModel(
205
+ host=os.getenv("OLLAMA_HOST", "http://localhost:11434"),
206
+ model_id=os.getenv("OLLAMA_MODEL", "qwen3:1.7b"),
207
+ temperature=1,
208
+ keep_alive="5m",
209
+ )
210
+
211
+ connection_agent = Agent(
212
+ model=agent_model,
213
+ tools=[],
214
+ system_prompt=system_prompt or "You are a helpful TCP server assistant.",
215
+ callback_handler=streaming_handler,
216
+ )
88
217
 
89
218
  try:
90
219
  # Send welcome message
91
- welcome_msg = "Welcome to Strands TCP Server! Send a message or 'exit' to close the connection.\n"
92
- client_socket.sendall(welcome_msg.encode())
220
+ welcome_msg = "🦆 Welcome to DevDuck TCP Server!\n"
221
+ welcome_msg += (
222
+ "Real-time streaming enabled - responses stream as they're generated.\n"
223
+ )
224
+ welcome_msg += "Send a message or 'exit' to close the connection.\n\n"
225
+ streaming_handler._send(welcome_msg)
93
226
 
94
227
  while True:
95
228
  # Receive data from the client
@@ -103,16 +236,24 @@ def handle_client(
103
236
  logger.info(f"Received from {client_address}: {message}")
104
237
 
105
238
  if message.lower() == "exit":
106
- client_socket.sendall(b"Connection closed by client request.\n")
239
+ streaming_handler._send("Connection closed by client request.\n")
107
240
  logger.info(f"Client {client_address} requested to exit")
108
241
  break
109
242
 
110
- # Process the message with the connection-specific agent
111
- response = connection_agent(message)
112
- response_text = str(response)
243
+ # Process the message - responses stream automatically via callback_handler
244
+ try:
245
+ streaming_handler._send(f"\n\n🦆: {message}\n\n")
246
+
247
+ # The agent call will stream responses directly to the socket
248
+ # through the callback_handler - no need to collect the response
249
+ connection_agent(message)
113
250
 
114
- # Send the response back to the client
115
- client_socket.sendall((response_text + "\n").encode())
251
+ # Send completion marker
252
+ streaming_handler._send("\n\n🦆\n\n")
253
+
254
+ except Exception as e:
255
+ logger.error(f"Error processing message: {e}")
256
+ streaming_handler._send(f"\n❌ Error processing message: {e}\n\n")
116
257
 
117
258
  except Exception as e:
118
259
  logger.error(f"Error handling client {client_address}: {e}")
@@ -127,9 +268,8 @@ def run_server(
127
268
  system_prompt: str,
128
269
  max_connections: int,
129
270
  buffer_size: int,
130
- parent_agent: Agent | None = None,
131
271
  ) -> None:
132
- """Run a TCP server that processes client requests with per-connection Strands agents.
272
+ """Run a TCP server that processes client requests with per-connection DevDuck instances.
133
273
 
134
274
  Args:
135
275
  host: Host address to bind the server
@@ -137,24 +277,12 @@ def run_server(
137
277
  system_prompt: System prompt for the server agents
138
278
  max_connections: Maximum number of concurrent connections
139
279
  buffer_size: Size of the message buffer
140
- parent_agent: Parent agent to inherit tools from
141
280
  """
142
281
  # Store server state
143
282
  SERVER_THREADS[port]["running"] = True
144
283
  SERVER_THREADS[port]["connections"] = 0
145
284
  SERVER_THREADS[port]["start_time"] = time.time()
146
285
 
147
- # Get model, tools, callback_handler and trace attributes from parent agent
148
- model = None
149
- callback_handler = None
150
- parent_tools = []
151
- trace_attributes = {}
152
- if parent_agent:
153
- model = parent_agent.model
154
- callback_handler = parent_agent.callback_handler
155
- parent_tools = list(parent_agent.tool_registry.registry.values())
156
- trace_attributes = parent_agent.trace_attributes
157
-
158
286
  # Create server socket
159
287
  server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
160
288
  server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -175,7 +303,7 @@ def run_server(
175
303
  client_socket, client_address = server_socket.accept()
176
304
  SERVER_THREADS[port]["connections"] += 1
177
305
 
178
- # Handle client in a new thread with a fresh agent
306
+ # Handle client in a new thread with a fresh DevDuck instance
179
307
  client_thread = threading.Thread(
180
308
  target=handle_client,
181
309
  args=(
@@ -183,10 +311,6 @@ def run_server(
183
311
  client_address,
184
312
  system_prompt,
185
313
  buffer_size,
186
- model,
187
- parent_tools,
188
- callback_handler,
189
- trace_attributes,
190
314
  ),
191
315
  )
192
316
  client_thread.daemon = True
@@ -221,21 +345,29 @@ def tcp(
221
345
  timeout: int = 90,
222
346
  buffer_size: int = 4096,
223
347
  max_connections: int = 5,
224
- agent: Any = None,
225
348
  ) -> dict:
226
- """Create and manage TCP servers and clients for network communication with connection handling.
349
+ """Create and manage TCP servers and clients with real-time streaming for DevDuck instances.
227
350
 
228
- This function provides TCP server and client functionality for Strands agents,
351
+ This function provides TCP server and client functionality for DevDuck agents,
229
352
  allowing them to communicate over TCP/IP networks. Servers run in background
230
- threads with a new, fresh agent instance for each client connection.
353
+ threads with a new, fresh DevDuck instance for each client connection.
354
+
355
+ **Real-time Streaming:** Unlike traditional blocking TCP responses, this
356
+ implementation streams data as it's generated using Strands' callback_handler
357
+ mechanism. Clients receive:
358
+ - Text chunks as the model generates them (no buffering)
359
+ - Tool invocation notifications in real-time
360
+ - Tool completion status immediately
361
+ - Reasoning text (if enabled)
231
362
 
232
363
  How It Works:
233
364
  ------------
234
365
  1. Server Mode:
235
366
  - Starts a TCP server in a background thread
236
- - Creates a dedicated agent for EACH client connection
237
- - Inherits tools from the parent agent
238
- - Processes client messages and returns responses
367
+ - Creates a dedicated DevDuck instance for EACH client connection
368
+ - Attaches a streaming callback handler to send data immediately
369
+ - Each DevDuck has full self-healing, hot-reload, and all tools
370
+ - Processes client messages with non-blocking streaming responses
239
371
 
240
372
  2. Client Mode:
241
373
  - Connects to a TCP server
@@ -249,17 +381,18 @@ def tcp(
249
381
 
250
382
  Common Use Cases:
251
383
  ---------------
252
- - Network service automation
253
- - Inter-agent communication
254
- - Remote command and control
255
- - API gateway implementation
256
- - IoT device management
384
+ - Network service automation with real-time feedback
385
+ - Inter-agent communication with streaming
386
+ - Remote command and control (instant responsiveness)
387
+ - API gateway implementation with SSE-like behavior
388
+ - IoT device management with live updates
389
+ - Interactive chat services over raw TCP
257
390
 
258
391
  Args:
259
392
  action: Action to perform (start_server, stop_server, get_status, client_send)
260
393
  host: Host address for server or client connection
261
394
  port: Port number for server or client connection
262
- system_prompt: System prompt for the server agent (for start_server)
395
+ system_prompt: System prompt for the server DevDuck instances (for start_server)
263
396
  message: Message to send to the TCP server (for client_send action)
264
397
  timeout: Connection timeout in seconds (default: 90)
265
398
  buffer_size: Size of the message buffer in bytes (default: 4096)
@@ -270,19 +403,32 @@ def tcp(
270
403
 
271
404
  Notes:
272
405
  - Server instances persist until explicitly stopped
273
- - Each client connection gets its own agent instance
274
- - Connection agents inherit tools from the parent agent
406
+ - Each client connection gets its own DevDuck instance
407
+ - Connection DevDuck instances have all standard DevDuck capabilities
408
+ - Streaming is automatic via callback_handler (no configuration needed)
275
409
  - Client connections are stateless
276
- """
277
- # Get parent agent from tool context if available
278
- parent_agent = agent
410
+ - Compatible with any TCP client (netcat, telnet, custom clients)
411
+
412
+ Examples:
413
+ # Start a streaming server
414
+ devduck("start a tcp server on port 9000")
279
415
 
416
+ # Test with netcat
417
+ nc localhost 9000
418
+ > what is 2+2?
419
+ [Streaming response appears in real-time]
420
+
421
+ # Send message from another devduck instance
422
+ devduck("send 'hello world' to tcp server at localhost:9000")
423
+ """
280
424
  if action == "start_server":
281
425
  # Check if server already running on this port
282
426
  if port in SERVER_THREADS and SERVER_THREADS[port].get("running", False):
283
427
  return {
284
428
  "status": "error",
285
- "content": [{"text": f"❌ Error: TCP Server already running on port {port}"}],
429
+ "content": [
430
+ {"text": f"❌ Error: TCP Server already running on port {port}"}
431
+ ],
286
432
  }
287
433
 
288
434
  # Create server thread
@@ -295,7 +441,6 @@ def tcp(
295
441
  system_prompt,
296
442
  max_connections,
297
443
  buffer_size,
298
- parent_agent,
299
444
  ),
300
445
  )
301
446
  server_thread.daemon = True
@@ -307,7 +452,9 @@ def tcp(
307
452
  if not SERVER_THREADS[port].get("running", False):
308
453
  return {
309
454
  "status": "error",
310
- "content": [{"text": f"❌ Error: Failed to start TCP Server on {host}:{port}"}],
455
+ "content": [
456
+ {"text": f"❌ Error: Failed to start TCP Server on {host}:{port}"}
457
+ ],
311
458
  }
312
459
 
313
460
  return {
@@ -315,7 +462,14 @@ def tcp(
315
462
  "content": [
316
463
  {"text": f"✅ TCP Server started successfully on {host}:{port}"},
317
464
  {"text": f"System prompt: {system_prompt}"},
318
- {"text": "Server creates a new agent instance for each connection"},
465
+ {"text": "🌊 Real-time streaming enabled (non-blocking responses)"},
466
+ {
467
+ "text": "🦆 Server creates a new DevDuck instance for each connection"
468
+ },
469
+ {
470
+ "text": "🛠️ Each DevDuck has full self-healing, hot-reload, and all tools"
471
+ },
472
+ {"text": f"📝 Test with: nc localhost {port}"},
319
473
  ],
320
474
  }
321
475
 
@@ -323,7 +477,9 @@ def tcp(
323
477
  if port not in SERVER_THREADS or not SERVER_THREADS[port].get("running", False):
324
478
  return {
325
479
  "status": "error",
326
- "content": [{"text": f"❌ Error: No TCP Server running on port {port}"}],
480
+ "content": [
481
+ {"text": f"❌ Error: No TCP Server running on port {port}"}
482
+ ],
327
483
  }
328
484
 
329
485
  # Stop the server
@@ -350,7 +506,9 @@ def tcp(
350
506
  "status": "success",
351
507
  "content": [
352
508
  {"text": f"✅ TCP Server on port {port} stopped successfully"},
353
- {"text": f"Statistics: {connections} connections handled, uptime {uptime:.2f} seconds"},
509
+ {
510
+ "text": f"Statistics: {connections} connections handled, uptime {uptime:.2f} seconds"
511
+ },
354
512
  ],
355
513
  }
356
514
 
@@ -366,7 +524,9 @@ def tcp(
366
524
  if data.get("running", False):
367
525
  uptime = time.time() - data.get("start_time", time.time())
368
526
  connections = data.get("connections", 0)
369
- status_info.append(f"Port {port}: Running - {connections} connections, uptime {uptime:.2f}s")
527
+ status_info.append(
528
+ f"Port {port}: Running - {connections} connections, uptime {uptime:.2f}s"
529
+ )
370
530
  else:
371
531
  status_info.append(f"Port {port}: Stopped")
372
532
 
@@ -388,7 +548,9 @@ def tcp(
388
548
  if not message:
389
549
  return {
390
550
  "status": "error",
391
- "content": [{"text": "Error: No message provided for client_send action"}],
551
+ "content": [
552
+ {"text": "Error: No message provided for client_send action"}
553
+ ],
392
554
  }
393
555
 
394
556
  # Create client socket
@@ -426,12 +588,20 @@ def tcp(
426
588
  except TimeoutError:
427
589
  return {
428
590
  "status": "error",
429
- "content": [{"text": f"Error: Connection to {host}:{port} timed out after {timeout} seconds"}],
591
+ "content": [
592
+ {
593
+ "text": f"Error: Connection to {host}:{port} timed out after {timeout} seconds"
594
+ }
595
+ ],
430
596
  }
431
597
  except ConnectionRefusedError:
432
598
  return {
433
599
  "status": "error",
434
- "content": [{"text": f"Error: Connection to {host}:{port} refused - no server running on that port"}],
600
+ "content": [
601
+ {
602
+ "text": f"Error: Connection to {host}:{port} refused - no server running on that port"
603
+ }
604
+ ],
435
605
  }
436
606
  except Exception as e:
437
607
  return {
@@ -454,4 +624,4 @@ def tcp(
454
624
  f"start_server, stop_server, get_status, client_send"
455
625
  }
456
626
  ],
457
- }
627
+ }