haiku.rag-slim 0.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of haiku.rag-slim might be problematic. Click here for more details.
- haiku/rag/__init__.py +0 -0
- haiku/rag/app.py +542 -0
- haiku/rag/chunker.py +65 -0
- haiku/rag/cli.py +466 -0
- haiku/rag/client.py +731 -0
- haiku/rag/config/__init__.py +74 -0
- haiku/rag/config/loader.py +94 -0
- haiku/rag/config/models.py +99 -0
- haiku/rag/embeddings/__init__.py +49 -0
- haiku/rag/embeddings/base.py +25 -0
- haiku/rag/embeddings/ollama.py +28 -0
- haiku/rag/embeddings/openai.py +26 -0
- haiku/rag/embeddings/vllm.py +29 -0
- haiku/rag/embeddings/voyageai.py +27 -0
- haiku/rag/graph/__init__.py +26 -0
- haiku/rag/graph/agui/__init__.py +53 -0
- haiku/rag/graph/agui/cli_renderer.py +135 -0
- haiku/rag/graph/agui/emitter.py +197 -0
- haiku/rag/graph/agui/events.py +254 -0
- haiku/rag/graph/agui/server.py +310 -0
- haiku/rag/graph/agui/state.py +34 -0
- haiku/rag/graph/agui/stream.py +86 -0
- haiku/rag/graph/common/__init__.py +5 -0
- haiku/rag/graph/common/models.py +42 -0
- haiku/rag/graph/common/nodes.py +265 -0
- haiku/rag/graph/common/prompts.py +46 -0
- haiku/rag/graph/common/utils.py +44 -0
- haiku/rag/graph/deep_qa/__init__.py +1 -0
- haiku/rag/graph/deep_qa/dependencies.py +27 -0
- haiku/rag/graph/deep_qa/graph.py +243 -0
- haiku/rag/graph/deep_qa/models.py +20 -0
- haiku/rag/graph/deep_qa/prompts.py +59 -0
- haiku/rag/graph/deep_qa/state.py +56 -0
- haiku/rag/graph/research/__init__.py +3 -0
- haiku/rag/graph/research/common.py +87 -0
- haiku/rag/graph/research/dependencies.py +151 -0
- haiku/rag/graph/research/graph.py +295 -0
- haiku/rag/graph/research/models.py +166 -0
- haiku/rag/graph/research/prompts.py +107 -0
- haiku/rag/graph/research/state.py +85 -0
- haiku/rag/logging.py +56 -0
- haiku/rag/mcp.py +245 -0
- haiku/rag/monitor.py +194 -0
- haiku/rag/qa/__init__.py +33 -0
- haiku/rag/qa/agent.py +93 -0
- haiku/rag/qa/prompts.py +60 -0
- haiku/rag/reader.py +135 -0
- haiku/rag/reranking/__init__.py +63 -0
- haiku/rag/reranking/base.py +13 -0
- haiku/rag/reranking/cohere.py +34 -0
- haiku/rag/reranking/mxbai.py +28 -0
- haiku/rag/reranking/vllm.py +44 -0
- haiku/rag/reranking/zeroentropy.py +59 -0
- haiku/rag/store/__init__.py +4 -0
- haiku/rag/store/engine.py +309 -0
- haiku/rag/store/models/__init__.py +4 -0
- haiku/rag/store/models/chunk.py +17 -0
- haiku/rag/store/models/document.py +17 -0
- haiku/rag/store/repositories/__init__.py +9 -0
- haiku/rag/store/repositories/chunk.py +442 -0
- haiku/rag/store/repositories/document.py +261 -0
- haiku/rag/store/repositories/settings.py +165 -0
- haiku/rag/store/upgrades/__init__.py +62 -0
- haiku/rag/store/upgrades/v0_10_1.py +64 -0
- haiku/rag/store/upgrades/v0_9_3.py +112 -0
- haiku/rag/utils.py +211 -0
- haiku_rag_slim-0.16.0.dist-info/METADATA +128 -0
- haiku_rag_slim-0.16.0.dist-info/RECORD +71 -0
- haiku_rag_slim-0.16.0.dist-info/WHEEL +4 -0
- haiku_rag_slim-0.16.0.dist-info/entry_points.txt +2 -0
- haiku_rag_slim-0.16.0.dist-info/licenses/LICENSE +7 -0
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
"""Generic AG-UI event emitter for any graph execution."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import hashlib
|
|
5
|
+
from collections.abc import AsyncIterator
|
|
6
|
+
from uuid import uuid4
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
|
|
10
|
+
from haiku.rag.graph.agui.events import (
|
|
11
|
+
AGUIEvent,
|
|
12
|
+
emit_activity,
|
|
13
|
+
emit_run_error,
|
|
14
|
+
emit_run_finished,
|
|
15
|
+
emit_run_started,
|
|
16
|
+
emit_state_delta,
|
|
17
|
+
emit_state_snapshot,
|
|
18
|
+
emit_step_finished,
|
|
19
|
+
emit_step_started,
|
|
20
|
+
emit_text_message,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AGUIEmitter[StateT: BaseModel, ResultT]:
|
|
25
|
+
"""Generic queue-backed AG-UI event emitter for any graph.
|
|
26
|
+
|
|
27
|
+
Manages the lifecycle of AG-UI events including:
|
|
28
|
+
- Run lifecycle (start, finish, error)
|
|
29
|
+
- Step lifecycle (start, finish)
|
|
30
|
+
- Text messages
|
|
31
|
+
- State synchronization (snapshots and deltas)
|
|
32
|
+
- Activity updates
|
|
33
|
+
|
|
34
|
+
Type parameters:
|
|
35
|
+
StateT: The Pydantic BaseModel type for graph state
|
|
36
|
+
ResultT: The result type returned by the graph
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
thread_id: str | None = None,
|
|
42
|
+
run_id: str | None = None,
|
|
43
|
+
use_deltas: bool = True,
|
|
44
|
+
):
|
|
45
|
+
"""Initialize the emitter.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
thread_id: Optional thread ID (generated from input hash if not provided)
|
|
49
|
+
run_id: Optional run ID (random UUID if not provided)
|
|
50
|
+
use_deltas: Whether to emit state deltas instead of full snapshots (default: True)
|
|
51
|
+
"""
|
|
52
|
+
self._queue: asyncio.Queue[AGUIEvent | None] = asyncio.Queue()
|
|
53
|
+
self._closed = False
|
|
54
|
+
self._thread_id = thread_id or str(uuid4())
|
|
55
|
+
self._run_id = run_id or str(uuid4())
|
|
56
|
+
self._last_state: StateT | None = None
|
|
57
|
+
self._current_step: str | None = None
|
|
58
|
+
self._use_deltas = use_deltas
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def thread_id(self) -> str:
|
|
62
|
+
"""Get the thread ID for this emitter."""
|
|
63
|
+
return self._thread_id
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def run_id(self) -> str:
|
|
67
|
+
"""Get the run ID for this emitter."""
|
|
68
|
+
return self._run_id
|
|
69
|
+
|
|
70
|
+
def start_run(self, initial_state: StateT) -> None:
|
|
71
|
+
"""Emit RunStarted and initial StateSnapshot.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
initial_state: The initial state of the graph
|
|
75
|
+
"""
|
|
76
|
+
# If thread_id wasn't provided, generate from state hash
|
|
77
|
+
if not self._thread_id or self._thread_id == str(uuid4()):
|
|
78
|
+
state_json = initial_state.model_dump_json()
|
|
79
|
+
self._thread_id = self._generate_thread_id(state_json)
|
|
80
|
+
|
|
81
|
+
# RunStarted (state snapshot follows immediately with full state)
|
|
82
|
+
self._emit(emit_run_started(self._thread_id, self._run_id))
|
|
83
|
+
self._emit(emit_state_snapshot(initial_state))
|
|
84
|
+
# Store a deep copy to detect future changes
|
|
85
|
+
self._last_state = initial_state.model_copy(deep=True)
|
|
86
|
+
|
|
87
|
+
def start_step(self, step_name: str) -> None:
|
|
88
|
+
"""Emit StepStarted event.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
step_name: Name of the step being started
|
|
92
|
+
"""
|
|
93
|
+
self._current_step = step_name
|
|
94
|
+
self._emit(emit_step_started(step_name))
|
|
95
|
+
|
|
96
|
+
def finish_step(self) -> None:
|
|
97
|
+
"""Emit StepFinished event for the current step."""
|
|
98
|
+
if self._current_step:
|
|
99
|
+
self._emit(emit_step_finished(self._current_step))
|
|
100
|
+
self._current_step = None
|
|
101
|
+
|
|
102
|
+
def log(self, message: str, role: str = "assistant") -> None:
|
|
103
|
+
"""Emit a text message event.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
message: The message content
|
|
107
|
+
role: The role of the sender (default: assistant)
|
|
108
|
+
"""
|
|
109
|
+
self._emit(emit_text_message(message, role))
|
|
110
|
+
|
|
111
|
+
def update_state(self, new_state: StateT) -> None:
|
|
112
|
+
"""Emit StateDelta or StateSnapshot for state change.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
new_state: The updated state
|
|
116
|
+
"""
|
|
117
|
+
if self._use_deltas and self._last_state is not None:
|
|
118
|
+
# Emit delta for incremental updates
|
|
119
|
+
self._emit(emit_state_delta(self._last_state, new_state))
|
|
120
|
+
else:
|
|
121
|
+
# Emit full snapshot for initial state or when deltas disabled
|
|
122
|
+
self._emit(emit_state_snapshot(new_state))
|
|
123
|
+
# Store a deep copy to detect future changes
|
|
124
|
+
self._last_state = new_state.model_copy(deep=True)
|
|
125
|
+
|
|
126
|
+
def update_activity(
|
|
127
|
+
self, activity_type: str, content: str, message_id: str | None = None
|
|
128
|
+
) -> None:
|
|
129
|
+
"""Emit ActivitySnapshot event.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
activity_type: Type of activity (e.g., "planning", "searching")
|
|
133
|
+
content: Description of the activity
|
|
134
|
+
message_id: Optional message ID to associate activity with (auto-generated if None)
|
|
135
|
+
"""
|
|
136
|
+
if message_id is None:
|
|
137
|
+
message_id = str(uuid4())
|
|
138
|
+
self._emit(emit_activity(message_id, activity_type, content))
|
|
139
|
+
|
|
140
|
+
def finish_run(self, result: ResultT) -> None:
|
|
141
|
+
"""Emit RunFinished event.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
result: The final result from the graph
|
|
145
|
+
"""
|
|
146
|
+
self._emit(emit_run_finished(self._thread_id, self._run_id, result))
|
|
147
|
+
|
|
148
|
+
def error(self, error: Exception, code: str | None = None) -> None:
|
|
149
|
+
"""Emit RunError event.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
error: The exception that occurred
|
|
153
|
+
code: Optional error code
|
|
154
|
+
"""
|
|
155
|
+
self._emit(emit_run_error(str(error), code))
|
|
156
|
+
|
|
157
|
+
def _emit(self, event: AGUIEvent) -> None:
|
|
158
|
+
"""Put event in queue.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
event: The event to emit
|
|
162
|
+
"""
|
|
163
|
+
if not self._closed:
|
|
164
|
+
self._queue.put_nowait(event)
|
|
165
|
+
|
|
166
|
+
async def close(self) -> None:
|
|
167
|
+
"""Close the emitter and stop event iteration."""
|
|
168
|
+
if self._closed:
|
|
169
|
+
return
|
|
170
|
+
self._closed = True
|
|
171
|
+
await self._queue.put(None)
|
|
172
|
+
|
|
173
|
+
def __aiter__(self) -> AsyncIterator[AGUIEvent]:
|
|
174
|
+
"""Enable async iteration over events."""
|
|
175
|
+
return self._iter_events()
|
|
176
|
+
|
|
177
|
+
async def _iter_events(self) -> AsyncIterator[AGUIEvent]:
|
|
178
|
+
"""Iterate over events from the queue."""
|
|
179
|
+
while True:
|
|
180
|
+
event = await self._queue.get()
|
|
181
|
+
if event is None:
|
|
182
|
+
break
|
|
183
|
+
yield event
|
|
184
|
+
|
|
185
|
+
@staticmethod
|
|
186
|
+
def _generate_thread_id(input_data: str) -> str:
|
|
187
|
+
"""Generate a deterministic thread ID from input data.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
input_data: The input data (e.g., question, prompt)
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
A stable thread ID based on input hash
|
|
194
|
+
"""
|
|
195
|
+
# Use hash of input for deterministic thread ID
|
|
196
|
+
hash_obj = hashlib.sha256(input_data.encode("utf-8"))
|
|
197
|
+
return hash_obj.hexdigest()[:16]
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""Generic AG-UI event creation utilities for any graph."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
from uuid import uuid4
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
from haiku.rag.graph.agui.state import compute_state_delta
|
|
9
|
+
|
|
10
|
+
# Type aliases for AG-UI events (actual types from ag_ui.core will be used at runtime)
|
|
11
|
+
AGUIEvent = dict[str, Any]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def emit_run_started(
|
|
15
|
+
thread_id: str, run_id: str, input_data: str | None = None
|
|
16
|
+
) -> dict[str, Any]:
|
|
17
|
+
"""Create a RunStarted event.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
thread_id: Unique identifier for the conversation thread
|
|
21
|
+
run_id: Unique identifier for this run
|
|
22
|
+
input_data: Optional input that started the run
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
RunStarted event dict
|
|
26
|
+
"""
|
|
27
|
+
event: dict[str, Any] = {
|
|
28
|
+
"type": "RUN_STARTED",
|
|
29
|
+
"threadId": thread_id,
|
|
30
|
+
"runId": run_id,
|
|
31
|
+
}
|
|
32
|
+
if input_data:
|
|
33
|
+
event["input"] = input_data
|
|
34
|
+
return event
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def emit_run_finished(thread_id: str, run_id: str, result: Any) -> dict[str, Any]:
|
|
38
|
+
"""Create a RunFinished event.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
thread_id: Unique identifier for the conversation thread
|
|
42
|
+
run_id: Unique identifier for this run
|
|
43
|
+
result: The final result of the run
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
RunFinished event dict
|
|
47
|
+
"""
|
|
48
|
+
# Convert result to dict if it's a Pydantic model
|
|
49
|
+
if hasattr(result, "model_dump"):
|
|
50
|
+
result = result.model_dump()
|
|
51
|
+
|
|
52
|
+
return {
|
|
53
|
+
"type": "RUN_FINISHED",
|
|
54
|
+
"threadId": thread_id,
|
|
55
|
+
"runId": run_id,
|
|
56
|
+
"result": result,
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def emit_run_error(message: str, code: str | None = None) -> dict[str, Any]:
|
|
61
|
+
"""Create a RunError event.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
message: Error message
|
|
65
|
+
code: Optional error code
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
RunError event dict
|
|
69
|
+
"""
|
|
70
|
+
event: dict[str, Any] = {
|
|
71
|
+
"type": "RUN_ERROR",
|
|
72
|
+
"message": message,
|
|
73
|
+
}
|
|
74
|
+
if code:
|
|
75
|
+
event["code"] = code
|
|
76
|
+
return event
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def emit_step_started(step_name: str) -> dict[str, Any]:
|
|
80
|
+
"""Create a StepStarted event.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
step_name: Name of the step being started
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
StepStarted event dict
|
|
87
|
+
"""
|
|
88
|
+
return {
|
|
89
|
+
"type": "STEP_STARTED",
|
|
90
|
+
"stepName": step_name,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def emit_step_finished(step_name: str) -> dict[str, Any]:
|
|
95
|
+
"""Create a StepFinished event.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
step_name: Name of the step that finished
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
StepFinished event dict
|
|
102
|
+
"""
|
|
103
|
+
return {
|
|
104
|
+
"type": "STEP_FINISHED",
|
|
105
|
+
"stepName": step_name,
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def emit_text_message(content: str, role: str = "assistant") -> dict[str, Any]:
|
|
110
|
+
"""Create a TextMessageChunk event (convenience wrapper).
|
|
111
|
+
|
|
112
|
+
This creates a complete text message in one event.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
content: The message content
|
|
116
|
+
role: The role of the sender (default: assistant)
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
TextMessageChunk event dict
|
|
120
|
+
"""
|
|
121
|
+
message_id = str(uuid4())
|
|
122
|
+
return {
|
|
123
|
+
"type": "TEXT_MESSAGE_CHUNK",
|
|
124
|
+
"messageId": message_id,
|
|
125
|
+
"role": role,
|
|
126
|
+
"delta": content,
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def emit_text_message_start(message_id: str, role: str = "assistant") -> dict[str, Any]:
|
|
131
|
+
"""Create a TextMessageStart event.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
message_id: Unique identifier for this message
|
|
135
|
+
role: The role of the sender
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
TextMessageStart event dict
|
|
139
|
+
"""
|
|
140
|
+
return {
|
|
141
|
+
"type": "TEXT_MESSAGE_START",
|
|
142
|
+
"messageId": message_id,
|
|
143
|
+
"role": role,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def emit_text_message_content(message_id: str, delta: str) -> dict[str, Any]:
|
|
148
|
+
"""Create a TextMessageContent event.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
message_id: Identifier for the message being streamed
|
|
152
|
+
delta: Content chunk to append
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
TextMessageContent event dict
|
|
156
|
+
"""
|
|
157
|
+
return {
|
|
158
|
+
"type": "TEXT_MESSAGE_CONTENT",
|
|
159
|
+
"messageId": message_id,
|
|
160
|
+
"delta": delta,
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def emit_text_message_end(message_id: str) -> dict[str, Any]:
|
|
165
|
+
"""Create a TextMessageEnd event.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
message_id: Identifier for the message being completed
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
TextMessageEnd event dict
|
|
172
|
+
"""
|
|
173
|
+
return {
|
|
174
|
+
"type": "TEXT_MESSAGE_END",
|
|
175
|
+
"messageId": message_id,
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def emit_state_snapshot(state: BaseModel) -> dict[str, Any]:
|
|
180
|
+
"""Create a StateSnapshot event.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
state: The complete state to snapshot (any Pydantic BaseModel)
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
StateSnapshot event dict
|
|
187
|
+
"""
|
|
188
|
+
return {
|
|
189
|
+
"type": "STATE_SNAPSHOT",
|
|
190
|
+
"snapshot": state.model_dump(),
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def emit_state_delta(old_state: BaseModel, new_state: BaseModel) -> dict[str, Any]:
|
|
195
|
+
"""Create a StateDelta event with JSON Patch operations.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
old_state: Previous state (any Pydantic BaseModel)
|
|
199
|
+
new_state: Current state (same type as old_state)
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
StateDelta event dict
|
|
203
|
+
"""
|
|
204
|
+
delta = compute_state_delta(old_state, new_state)
|
|
205
|
+
return {
|
|
206
|
+
"type": "STATE_DELTA",
|
|
207
|
+
"delta": delta,
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def emit_activity(
|
|
212
|
+
message_id: str,
|
|
213
|
+
activity_type: str,
|
|
214
|
+
content: str,
|
|
215
|
+
) -> dict[str, Any]:
|
|
216
|
+
"""Create an ActivitySnapshot event.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
message_id: Message ID to associate activity with (required)
|
|
220
|
+
activity_type: Type of activity (e.g., "planning", "searching")
|
|
221
|
+
content: Description of the activity
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
ActivitySnapshot event dict
|
|
225
|
+
"""
|
|
226
|
+
return {
|
|
227
|
+
"type": "ACTIVITY_SNAPSHOT",
|
|
228
|
+
"messageId": message_id,
|
|
229
|
+
"activityType": activity_type,
|
|
230
|
+
"content": content,
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def emit_activity_delta(
|
|
235
|
+
message_id: str,
|
|
236
|
+
activity_type: str,
|
|
237
|
+
patch: list[dict[str, Any]],
|
|
238
|
+
) -> dict[str, Any]:
|
|
239
|
+
"""Create an ActivityDelta event with JSON Patch operations.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
message_id: Message ID of the activity being updated
|
|
243
|
+
activity_type: Type of activity being updated
|
|
244
|
+
patch: JSON Patch operations to apply
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
ActivityDelta event dict
|
|
248
|
+
"""
|
|
249
|
+
return {
|
|
250
|
+
"type": "ACTIVITY_DELTA",
|
|
251
|
+
"messageId": message_id,
|
|
252
|
+
"activityType": activity_type,
|
|
253
|
+
"patch": patch,
|
|
254
|
+
}
|