agent-event-stream 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,29 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.egg-info/
5
+ dist/
6
+ build/
7
+ .venv/
8
+ venv/
9
+ *.egg
10
+ .pytest_cache/
11
+ .mypy_cache/
12
+ .ruff_cache/
13
+
14
+ # Node / React
15
+ node_modules/
16
+ packages/react/dist/
17
+
18
+ # OS
19
+ .DS_Store
20
+ Thumbs.db
21
+
22
+ # Env
23
+ .env
24
+ .env.local
25
+
26
+ # IDE
27
+ .vscode/
28
+ .idea/
29
+ *.swp
@@ -0,0 +1,19 @@
1
+ Metadata-Version: 2.4
2
+ Name: agent-event-stream
3
+ Version: 0.1.0
4
+ Summary: Typed SSE event protocol for AI agents — token, tool_use, thinking, progress, done
5
+ License: MIT
6
+ Keywords: ai-agents,anthropic,fastapi,llm,openai,sse,streaming
7
+ Classifier: Development Status :: 4 - Beta
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Programming Language :: Python :: 3.11
11
+ Classifier: Programming Language :: Python :: 3.12
12
+ Classifier: Topic :: Software Development :: Libraries
13
+ Requires-Python: >=3.11
14
+ Provides-Extra: dev
15
+ Requires-Dist: fastapi>=0.100; extra == 'dev'
16
+ Requires-Dist: httpx>=0.27; extra == 'dev'
17
+ Requires-Dist: pytest>=8; extra == 'dev'
18
+ Provides-Extra: fastapi
19
+ Requires-Dist: fastapi>=0.100; extra == 'fastapi'
@@ -0,0 +1,5 @@
1
+ from .emitter import AgentStreamEmitter
2
+ from .batcher import TokenBatcher
3
+
4
+ __all__ = ["AgentStreamEmitter", "TokenBatcher"]
5
+ __version__ = "0.1.0"
@@ -0,0 +1,50 @@
1
+ """TokenBatcher — accumulates text tokens and flushes every `interval_ms` milliseconds.
2
+
3
+ Prevents excessive SSE emissions (and client re-renders) by batching
4
+ tokens that arrive faster than the flush interval.
5
+
6
+ Usage::
7
+
8
+ batcher = TokenBatcher(interval_ms=50)
9
+
10
+ for chunk in llm_stream:
11
+ if batched := batcher.add(chunk.text):
12
+ yield emitter.token(batched)
13
+
14
+ # Always flush at end of stream
15
+ if remaining := batcher.flush():
16
+ yield emitter.token(remaining)
17
+ """
18
+ from __future__ import annotations
19
+
20
+ import time
21
+
22
+
23
+ class TokenBatcher:
24
+ """Accumulates text tokens and flushes every `interval_ms` milliseconds."""
25
+
26
+ def __init__(self, interval_ms: int = 50) -> None:
27
+ self._buffer: list[str] = []
28
+ self._interval = interval_ms / 1000.0
29
+ self._last_flush = time.monotonic()
30
+
31
+ def add(self, text: str) -> str | None:
32
+ """Add a token. Returns flushed text if interval elapsed, else None."""
33
+ self._buffer.append(text)
34
+ if time.monotonic() - self._last_flush >= self._interval:
35
+ return self.flush()
36
+ return None
37
+
38
+ def flush(self) -> str | None:
39
+ """Flush buffer and return accumulated text, or None if empty."""
40
+ if not self._buffer:
41
+ return None
42
+ text = "".join(self._buffer)
43
+ self._buffer.clear()
44
+ self._last_flush = time.monotonic()
45
+ return text
46
+
47
+ @property
48
+ def has_content(self) -> bool:
49
+ """True if there are buffered tokens not yet flushed."""
50
+ return bool(self._buffer)
@@ -0,0 +1,136 @@
1
+ """AgentStreamEmitter — formats SSE event strings for AI agent streaming.
2
+
3
+ No external dependencies. Each method returns a ready-to-yield SSE string:
4
+
5
+ event: <type>
6
+ data: <json>
7
+
8
+ (blank line terminates the event)
9
+
10
+ Usage::
11
+
12
+ from agent_stream import AgentStreamEmitter
13
+
14
+ emitter = AgentStreamEmitter()
15
+
16
+ async def generate():
17
+ yield emitter.token("Hello")
18
+ yield emitter.tool_use("search", "tu_1", "query=foo")
19
+ yield emitter.tool_result("search", "tu_1", "3 results", 120)
20
+ yield emitter.done(message_id="msg_1", num_turns=1, tool_count=1)
21
+ """
22
+ from __future__ import annotations
23
+
24
+ import json
25
+ from typing import Any
26
+
27
+
28
+ def _fmt(event_type: str, data: dict[str, Any]) -> str:
29
+ return f"event: {event_type}\ndata: {json.dumps(data, default=str)}\n\n"
30
+
31
+
32
+ class AgentStreamEmitter:
33
+ """Formats typed SSE events for AI agent streaming."""
34
+
35
+ def token(self, text: str) -> str:
36
+ """Incremental text chunk from the agent."""
37
+ return _fmt("token", {"text": text})
38
+
39
+ def thinking(self, text: str) -> str:
40
+ """Extended reasoning / chain-of-thought block."""
41
+ return _fmt("thinking", {"text": text})
42
+
43
+ def tool_use(
44
+ self,
45
+ tool_name: str,
46
+ tool_use_id: str,
47
+ input_summary: str = "",
48
+ ) -> str:
49
+ """Tool invocation started."""
50
+ return _fmt("tool_use", {
51
+ "tool_name": tool_name,
52
+ "tool_use_id": tool_use_id,
53
+ "input_summary": input_summary,
54
+ "status": "running",
55
+ })
56
+
57
+ def tool_result(
58
+ self,
59
+ tool_name: str,
60
+ tool_use_id: str,
61
+ output_summary: str = "",
62
+ duration_ms: int = 0,
63
+ is_error: bool = False,
64
+ ) -> str:
65
+ """Tool execution completed."""
66
+ return _fmt("tool_result", {
67
+ "tool_name": tool_name,
68
+ "tool_use_id": tool_use_id,
69
+ "output_summary": output_summary,
70
+ "duration_ms": duration_ms,
71
+ "status": "error" if is_error else "done",
72
+ })
73
+
74
+ def turn(self, turn_number: int, total_tools: int = 0) -> str:
75
+ """Agentic turn boundary."""
76
+ return _fmt("turn", {"turn_number": turn_number, "total_tools": total_tools})
77
+
78
+ def progress(
79
+ self,
80
+ step: str,
81
+ percentage: int,
82
+ message: str,
83
+ sub_progress: dict[str, Any] | None = None,
84
+ ) -> str:
85
+ """Pipeline step progress update."""
86
+ data: dict[str, Any] = {"step": step, "percentage": percentage, "message": message}
87
+ if sub_progress is not None:
88
+ data["sub_progress"] = sub_progress
89
+ return _fmt("progress", data)
90
+
91
+ def creation(
92
+ self,
93
+ creation_type: str,
94
+ count: int,
95
+ tool_use_id: str = "",
96
+ items: list[dict[str, Any]] | None = None,
97
+ ) -> str:
98
+ """Agent created a persistent artifact."""
99
+ return _fmt("creation", {
100
+ "creation_type": creation_type,
101
+ "count": count,
102
+ "tool_use_id": tool_use_id,
103
+ "items": items or [],
104
+ })
105
+
106
+ def error(
107
+ self,
108
+ error_type: str,
109
+ message: str,
110
+ details: dict[str, Any] | None = None,
111
+ ) -> str:
112
+ """Structured error during execution."""
113
+ return _fmt("error", {
114
+ "error_type": error_type,
115
+ "message": message,
116
+ "details": details or {},
117
+ })
118
+
119
+ def done(
120
+ self,
121
+ message_id: str = "",
122
+ num_turns: int = 0,
123
+ tool_count: int = 0,
124
+ duration_ms: int = 0,
125
+ model: str = "",
126
+ total_cost_usd: float = 0.0,
127
+ ) -> str:
128
+ """Stream complete — always the last event emitted."""
129
+ return _fmt("done", {
130
+ "message_id": message_id,
131
+ "num_turns": num_turns,
132
+ "tool_count": tool_count,
133
+ "duration_ms": duration_ms,
134
+ "model": model,
135
+ "total_cost_usd": total_cost_usd,
136
+ })
@@ -0,0 +1,52 @@
1
+ """FastAPI integration for agent-stream.
2
+
3
+ Optional module — only import if FastAPI is installed.
4
+
5
+ Usage::
6
+
7
+ from agent_stream import AgentStreamEmitter, TokenBatcher
8
+ from agent_stream.fastapi import agent_stream_response
9
+
10
+ @app.post("/api/chat")
11
+ async def chat(req: ChatRequest):
12
+ async def generate():
13
+ emitter = AgentStreamEmitter()
14
+ batcher = TokenBatcher()
15
+ async for chunk in your_llm_call(req.message):
16
+ if batched := batcher.add(chunk.text):
17
+ yield emitter.token(batched)
18
+ if remaining := batcher.flush():
19
+ yield emitter.token(remaining)
20
+ yield emitter.done()
21
+
22
+ return agent_stream_response(generate())
23
+ """
24
+ from __future__ import annotations
25
+
26
+ from typing import AsyncGenerator
27
+
28
+ try:
29
+ from fastapi.responses import StreamingResponse
30
+ except ImportError as e:
31
+ raise ImportError(
32
+ "fastapi is required to use agent_stream.fastapi. "
33
+ "Install it with: pip install agent-stream[fastapi]"
34
+ ) from e
35
+
36
+
37
+ def agent_stream_response(
38
+ generator: AsyncGenerator[str, None],
39
+ ) -> StreamingResponse:
40
+ """Wrap an SSE generator in a FastAPI StreamingResponse.
41
+
42
+ Sets the correct content type and disables proxy buffering so events
43
+ reach the client immediately.
44
+ """
45
+ return StreamingResponse(
46
+ generator,
47
+ media_type="text/event-stream",
48
+ headers={
49
+ "Cache-Control": "no-cache",
50
+ "X-Accel-Buffering": "no",
51
+ },
52
+ )
@@ -0,0 +1,29 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "agent-event-stream"
7
+ version = "0.1.0"
8
+ description = "Typed SSE event protocol for AI agents — token, tool_use, thinking, progress, done"
9
+ license = { text = "MIT" }
10
+ requires-python = ">=3.11"
11
+ keywords = ["llm", "ai-agents", "sse", "streaming", "fastapi", "anthropic", "openai"]
12
+ classifiers = [
13
+ "Development Status :: 4 - Beta",
14
+ "Intended Audience :: Developers",
15
+ "License :: OSI Approved :: MIT License",
16
+ "Programming Language :: Python :: 3.11",
17
+ "Programming Language :: Python :: 3.12",
18
+ "Topic :: Software Development :: Libraries",
19
+ ]
20
+
21
+ [project.optional-dependencies]
22
+ fastapi = ["fastapi>=0.100"]
23
+ dev = ["pytest>=8", "fastapi>=0.100", "httpx>=0.27"]
24
+
25
+ [tool.hatch.build.targets.wheel]
26
+ packages = ["agent_stream"]
27
+
28
+ [tool.pytest.ini_options]
29
+ testpaths = ["tests"]
File without changes
@@ -0,0 +1,53 @@
1
+ import time
2
+ import pytest
3
+ from agent_stream.batcher import TokenBatcher
4
+
5
+
6
+ def test_add_below_interval_returns_none():
7
+ batcher = TokenBatcher(interval_ms=200)
8
+ result = batcher.add("hello")
9
+ assert result is None
10
+
11
+
12
+ def test_add_above_interval_returns_batched():
13
+ batcher = TokenBatcher(interval_ms=0) # 0ms = always flush
14
+ result = batcher.add("hello")
15
+ assert result == "hello"
16
+
17
+
18
+ def test_flush_returns_accumulated_text():
19
+ batcher = TokenBatcher(interval_ms=200)
20
+ batcher.add("foo")
21
+ batcher.add("bar")
22
+ result = batcher.flush()
23
+ assert result == "foobar"
24
+
25
+
26
+ def test_flush_empty_returns_none():
27
+ batcher = TokenBatcher()
28
+ assert batcher.flush() is None
29
+
30
+
31
+ def test_flush_clears_buffer():
32
+ batcher = TokenBatcher(interval_ms=200)
33
+ batcher.add("x")
34
+ batcher.flush()
35
+ assert batcher.flush() is None
36
+
37
+
38
+ def test_has_content_false_when_empty():
39
+ batcher = TokenBatcher()
40
+ assert batcher.has_content is False
41
+
42
+
43
+ def test_has_content_true_after_add():
44
+ batcher = TokenBatcher(interval_ms=200)
45
+ batcher.add("x")
46
+ assert batcher.has_content is True
47
+
48
+
49
+ def test_multiple_adds_batched():
50
+ batcher = TokenBatcher(interval_ms=200)
51
+ for ch in ["a", "b", "c"]:
52
+ batcher.add(ch)
53
+ assert batcher.flush() == "abc"
@@ -0,0 +1,113 @@
1
+ import json
2
+ import pytest
3
+ from agent_stream.emitter import AgentStreamEmitter
4
+
5
+
6
+ @pytest.fixture
7
+ def emitter():
8
+ return AgentStreamEmitter()
9
+
10
+
11
+ def _parse(sse: str) -> tuple[str, dict]:
12
+ """Parse 'event: X\ndata: {...}\n\n' into (event_type, data)."""
13
+ lines = sse.strip().splitlines()
14
+ event_type = lines[0].removeprefix("event: ")
15
+ data = json.loads(lines[1].removeprefix("data: "))
16
+ return event_type, data
17
+
18
+
19
+ def test_token(emitter):
20
+ ev, data = _parse(emitter.token("hello"))
21
+ assert ev == "token"
22
+ assert data["text"] == "hello"
23
+
24
+
25
+ def test_thinking(emitter):
26
+ ev, data = _parse(emitter.thinking("step 1: consider X"))
27
+ assert ev == "thinking"
28
+ assert data["text"] == "step 1: consider X"
29
+
30
+
31
+ def test_tool_use(emitter):
32
+ ev, data = _parse(emitter.tool_use("search", "tu_123", "query=foo"))
33
+ assert ev == "tool_use"
34
+ assert data["tool_name"] == "search"
35
+ assert data["tool_use_id"] == "tu_123"
36
+ assert data["input_summary"] == "query=foo"
37
+ assert data["status"] == "running"
38
+
39
+
40
+ def test_tool_result_success(emitter):
41
+ ev, data = _parse(emitter.tool_result("search", "tu_123", "3 results", 42))
42
+ assert ev == "tool_result"
43
+ assert data["status"] == "done"
44
+ assert data["duration_ms"] == 42
45
+
46
+
47
+ def test_tool_result_error(emitter):
48
+ ev, data = _parse(emitter.tool_result("search", "tu_123", is_error=True))
49
+ assert ev == "tool_result"
50
+ assert data["status"] == "error"
51
+
52
+
53
+ def test_turn(emitter):
54
+ ev, data = _parse(emitter.turn(2, total_tools=5))
55
+ assert ev == "turn"
56
+ assert data["turn_number"] == 2
57
+ assert data["total_tools"] == 5
58
+
59
+
60
+ def test_progress(emitter):
61
+ ev, data = _parse(emitter.progress("synthesis", 40, "Extracting insights"))
62
+ assert ev == "progress"
63
+ assert data["percentage"] == 40
64
+
65
+
66
+ def test_progress_with_sub(emitter):
67
+ ev, data = _parse(emitter.progress(
68
+ "synthesis", 50, "Processing",
69
+ sub_progress={"current": 3, "total": 10, "item_title": "Interview 3"}
70
+ ))
71
+ assert data["sub_progress"]["current"] == 3
72
+
73
+
74
+ def test_creation(emitter):
75
+ ev, data = _parse(emitter.creation("insights", 5, tool_use_id="tu_abc"))
76
+ assert ev == "creation"
77
+ assert data["count"] == 5
78
+ assert data["creation_type"] == "insights"
79
+ assert data["tool_use_id"] == "tu_abc"
80
+ assert data["items"] == [] # default empty list always emitted
81
+
82
+
83
+ def test_creation_defaults(emitter):
84
+ ev, data = _parse(emitter.creation("document", 1))
85
+ assert data["tool_use_id"] == ""
86
+ assert data["items"] == []
87
+
88
+
89
+ def test_error(emitter):
90
+ ev, data = _parse(emitter.error("timeout", "Request timed out"))
91
+ assert ev == "error"
92
+ assert data["error_type"] == "timeout"
93
+ assert data["message"] == "Request timed out"
94
+ assert data["details"] == {} # default empty dict always emitted
95
+
96
+
97
+ def test_error_with_details(emitter):
98
+ ev, data = _parse(emitter.error("auth", "Unauthorized", details={"code": 401}))
99
+ assert data["details"] == {"code": 401}
100
+
101
+
102
+ def test_done(emitter):
103
+ ev, data = _parse(emitter.done(
104
+ message_id="msg_1", num_turns=3, tool_count=7,
105
+ duration_ms=4200, model="claude-sonnet-4-6"
106
+ ))
107
+ assert ev == "done"
108
+ assert data["num_turns"] == 3
109
+ assert data["model"] == "claude-sonnet-4-6"
110
+
111
+
112
+ def test_sse_format_ends_with_double_newline(emitter):
113
+ assert emitter.token("x").endswith("\n\n")
@@ -0,0 +1,30 @@
1
+ import pytest
2
+
3
+ pytest.importorskip("fastapi") # Skip if FastAPI not installed
4
+
5
+ from fastapi.responses import StreamingResponse
6
+ from agent_stream.fastapi import agent_stream_response
7
+
8
+
9
+ async def mock_generator():
10
+ yield "event: token\ndata: {\"text\": \"hi\"}\n\n"
11
+
12
+
13
+ def test_returns_streaming_response():
14
+ response = agent_stream_response(mock_generator())
15
+ assert isinstance(response, StreamingResponse)
16
+
17
+
18
+ def test_content_type_is_event_stream():
19
+ response = agent_stream_response(mock_generator())
20
+ assert response.media_type == "text/event-stream"
21
+
22
+
23
+ def test_no_cache_header():
24
+ response = agent_stream_response(mock_generator())
25
+ assert response.headers.get("Cache-Control") == "no-cache"
26
+
27
+
28
+ def test_no_buffering_header():
29
+ response = agent_stream_response(mock_generator())
30
+ assert response.headers.get("X-Accel-Buffering") == "no"