chuk-ai-session-manager 0.7.1__py3-none-any.whl → 0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +84 -40
- chuk_ai_session_manager/api/__init__.py +1 -1
- chuk_ai_session_manager/api/simple_api.py +53 -59
- chuk_ai_session_manager/exceptions.py +31 -17
- chuk_ai_session_manager/guards/__init__.py +118 -0
- chuk_ai_session_manager/guards/bindings.py +217 -0
- chuk_ai_session_manager/guards/cache.py +163 -0
- chuk_ai_session_manager/guards/manager.py +819 -0
- chuk_ai_session_manager/guards/models.py +498 -0
- chuk_ai_session_manager/guards/ungrounded.py +159 -0
- chuk_ai_session_manager/infinite_conversation.py +86 -79
- chuk_ai_session_manager/memory/__init__.py +247 -0
- chuk_ai_session_manager/memory/artifacts_bridge.py +469 -0
- chuk_ai_session_manager/memory/context_packer.py +347 -0
- chuk_ai_session_manager/memory/fault_handler.py +507 -0
- chuk_ai_session_manager/memory/manifest.py +307 -0
- chuk_ai_session_manager/memory/models.py +1084 -0
- chuk_ai_session_manager/memory/mutation_log.py +186 -0
- chuk_ai_session_manager/memory/pack_cache.py +206 -0
- chuk_ai_session_manager/memory/page_table.py +275 -0
- chuk_ai_session_manager/memory/prefetcher.py +192 -0
- chuk_ai_session_manager/memory/tlb.py +247 -0
- chuk_ai_session_manager/memory/vm_prompts.py +238 -0
- chuk_ai_session_manager/memory/working_set.py +574 -0
- chuk_ai_session_manager/models/__init__.py +21 -9
- chuk_ai_session_manager/models/event_source.py +3 -1
- chuk_ai_session_manager/models/event_type.py +10 -1
- chuk_ai_session_manager/models/session.py +103 -68
- chuk_ai_session_manager/models/session_event.py +69 -68
- chuk_ai_session_manager/models/session_metadata.py +9 -10
- chuk_ai_session_manager/models/session_run.py +21 -22
- chuk_ai_session_manager/models/token_usage.py +76 -76
- chuk_ai_session_manager/procedural_memory/__init__.py +70 -0
- chuk_ai_session_manager/procedural_memory/formatter.py +407 -0
- chuk_ai_session_manager/procedural_memory/manager.py +523 -0
- chuk_ai_session_manager/procedural_memory/models.py +371 -0
- chuk_ai_session_manager/sample_tools.py +79 -46
- chuk_ai_session_manager/session_aware_tool_processor.py +27 -16
- chuk_ai_session_manager/session_manager.py +238 -197
- chuk_ai_session_manager/session_prompt_builder.py +163 -111
- chuk_ai_session_manager/session_storage.py +45 -52
- {chuk_ai_session_manager-0.7.1.dist-info → chuk_ai_session_manager-0.8.dist-info}/METADATA +79 -3
- chuk_ai_session_manager-0.8.dist-info/RECORD +45 -0
- {chuk_ai_session_manager-0.7.1.dist-info → chuk_ai_session_manager-0.8.dist-info}/WHEEL +1 -1
- chuk_ai_session_manager-0.7.1.dist-info/RECORD +0 -22
- {chuk_ai_session_manager-0.7.1.dist-info → chuk_ai_session_manager-0.8.dist-info}/top_level.txt +0 -0
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Session event model for the chuk session manager with improved async support.
|
|
4
4
|
"""
|
|
5
|
+
|
|
5
6
|
from __future__ import annotations
|
|
6
7
|
from datetime import datetime, timezone
|
|
7
8
|
from typing import Any, Dict, Generic, Optional, TypeVar
|
|
@@ -12,32 +13,34 @@ from chuk_ai_session_manager.models.event_source import EventSource
|
|
|
12
13
|
from chuk_ai_session_manager.models.event_type import EventType
|
|
13
14
|
from chuk_ai_session_manager.models.token_usage import TokenUsage
|
|
14
15
|
|
|
15
|
-
MessageT = TypeVar(
|
|
16
|
+
MessageT = TypeVar("MessageT")
|
|
17
|
+
|
|
16
18
|
|
|
17
19
|
class SessionEvent(BaseModel, Generic[MessageT]):
|
|
18
20
|
"""
|
|
19
21
|
A single event within a session.
|
|
20
|
-
|
|
22
|
+
|
|
21
23
|
Events track all interactions in a session including messages,
|
|
22
24
|
tool calls, summaries, and other activities.
|
|
23
25
|
"""
|
|
26
|
+
|
|
24
27
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
25
|
-
|
|
28
|
+
|
|
26
29
|
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
27
30
|
message: MessageT
|
|
28
31
|
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
29
|
-
|
|
32
|
+
|
|
30
33
|
# Make source and type have defaults for backward compatibility with tests
|
|
31
34
|
source: EventSource = Field(default=EventSource.SYSTEM)
|
|
32
35
|
type: EventType = Field(default=EventType.MESSAGE)
|
|
33
|
-
|
|
36
|
+
|
|
34
37
|
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
35
38
|
parent_event_id: Optional[str] = None
|
|
36
39
|
task_id: Optional[str] = None
|
|
37
|
-
|
|
40
|
+
|
|
38
41
|
# Token tracking
|
|
39
42
|
token_usage: Optional[TokenUsage] = None
|
|
40
|
-
|
|
43
|
+
|
|
41
44
|
@classmethod
|
|
42
45
|
async def create_with_tokens(
|
|
43
46
|
cls,
|
|
@@ -47,11 +50,11 @@ class SessionEvent(BaseModel, Generic[MessageT]):
|
|
|
47
50
|
model: str = "gpt-3.5-turbo",
|
|
48
51
|
source: EventSource = EventSource.SYSTEM,
|
|
49
52
|
type: EventType = EventType.MESSAGE,
|
|
50
|
-
**kwargs
|
|
53
|
+
**kwargs,
|
|
51
54
|
) -> SessionEvent[MessageT]:
|
|
52
55
|
"""
|
|
53
56
|
Create a new SessionEvent with automatic token counting.
|
|
54
|
-
|
|
57
|
+
|
|
55
58
|
Args:
|
|
56
59
|
message: The message content
|
|
57
60
|
prompt: The prompt text for token counting
|
|
@@ -60,43 +63,37 @@ class SessionEvent(BaseModel, Generic[MessageT]):
|
|
|
60
63
|
source: The event source
|
|
61
64
|
type: The event type
|
|
62
65
|
**kwargs: Additional fields for the event
|
|
63
|
-
|
|
66
|
+
|
|
64
67
|
Returns:
|
|
65
68
|
A new SessionEvent instance with token usage calculated
|
|
66
69
|
"""
|
|
67
70
|
# Create token usage
|
|
68
71
|
token_usage = await TokenUsage.from_text(
|
|
69
|
-
prompt=prompt,
|
|
70
|
-
completion=completion,
|
|
71
|
-
model=model
|
|
72
|
+
prompt=prompt, completion=completion, model=model
|
|
72
73
|
)
|
|
73
|
-
|
|
74
|
+
|
|
74
75
|
# Create the event
|
|
75
76
|
event = cls(
|
|
76
|
-
message=message,
|
|
77
|
-
source=source,
|
|
78
|
-
type=type,
|
|
79
|
-
token_usage=token_usage,
|
|
80
|
-
**kwargs
|
|
77
|
+
message=message, source=source, type=type, token_usage=token_usage, **kwargs
|
|
81
78
|
)
|
|
82
|
-
|
|
79
|
+
|
|
83
80
|
return event
|
|
84
|
-
|
|
81
|
+
|
|
85
82
|
async def update_token_usage(
|
|
86
|
-
self,
|
|
83
|
+
self,
|
|
87
84
|
prompt: Optional[str] = None,
|
|
88
85
|
completion: Optional[str] = None,
|
|
89
86
|
prompt_tokens: Optional[int] = None,
|
|
90
87
|
completion_tokens: Optional[int] = None,
|
|
91
|
-
model: str = "gpt-3.5-turbo"
|
|
88
|
+
model: str = "gpt-3.5-turbo",
|
|
92
89
|
) -> None:
|
|
93
90
|
"""
|
|
94
91
|
Update the token usage for this event.
|
|
95
|
-
|
|
92
|
+
|
|
96
93
|
This method supports two modes:
|
|
97
94
|
1. Pass prompt/completion strings to calculate tokens
|
|
98
95
|
2. Pass prompt_tokens/completion_tokens directly
|
|
99
|
-
|
|
96
|
+
|
|
100
97
|
Args:
|
|
101
98
|
prompt: Optional prompt text to calculate tokens from
|
|
102
99
|
completion: Optional completion text to calculate tokens from
|
|
@@ -107,139 +104,143 @@ class SessionEvent(BaseModel, Generic[MessageT]):
|
|
|
107
104
|
if prompt is not None or completion is not None:
|
|
108
105
|
# Calculate tokens from text
|
|
109
106
|
self.token_usage = await TokenUsage.from_text(
|
|
110
|
-
prompt=prompt or "",
|
|
111
|
-
completion=completion,
|
|
112
|
-
model=model
|
|
107
|
+
prompt=prompt or "", completion=completion, model=model
|
|
113
108
|
)
|
|
114
109
|
elif prompt_tokens is not None or completion_tokens is not None:
|
|
115
110
|
# Use provided token counts
|
|
116
111
|
if not self.token_usage:
|
|
117
112
|
self.token_usage = TokenUsage(model=model)
|
|
118
|
-
|
|
113
|
+
|
|
119
114
|
if prompt_tokens is not None:
|
|
120
115
|
self.token_usage.prompt_tokens = prompt_tokens
|
|
121
116
|
if completion_tokens is not None:
|
|
122
117
|
self.token_usage.completion_tokens = completion_tokens
|
|
123
|
-
|
|
118
|
+
|
|
124
119
|
# Update total
|
|
125
|
-
self.token_usage.total_tokens =
|
|
126
|
-
|
|
120
|
+
self.token_usage.total_tokens = (
|
|
121
|
+
self.token_usage.prompt_tokens + self.token_usage.completion_tokens
|
|
122
|
+
)
|
|
123
|
+
|
|
127
124
|
# Recalculate cost
|
|
128
|
-
self.token_usage.estimated_cost_usd =
|
|
129
|
-
|
|
125
|
+
self.token_usage.estimated_cost_usd = (
|
|
126
|
+
self.token_usage._calculate_cost_sync()
|
|
127
|
+
)
|
|
128
|
+
|
|
130
129
|
async def set_metadata(self, key: str, value: Any) -> None:
|
|
131
130
|
"""
|
|
132
131
|
Set a metadata value asynchronously.
|
|
133
|
-
|
|
132
|
+
|
|
134
133
|
Args:
|
|
135
134
|
key: The metadata key
|
|
136
135
|
value: The value to set
|
|
137
136
|
"""
|
|
138
137
|
self.metadata[key] = value
|
|
139
|
-
|
|
138
|
+
|
|
140
139
|
async def get_metadata(self, key: str, default: Any = None) -> Any:
|
|
141
140
|
"""
|
|
142
141
|
Get a metadata value asynchronously.
|
|
143
|
-
|
|
142
|
+
|
|
144
143
|
Args:
|
|
145
144
|
key: The metadata key
|
|
146
145
|
default: Default value if key not found
|
|
147
|
-
|
|
146
|
+
|
|
148
147
|
Returns:
|
|
149
148
|
The metadata value or default
|
|
150
149
|
"""
|
|
151
150
|
return self.metadata.get(key, default)
|
|
152
|
-
|
|
151
|
+
|
|
153
152
|
async def has_metadata(self, key: str) -> bool:
|
|
154
153
|
"""
|
|
155
154
|
Check if a metadata key exists asynchronously.
|
|
156
|
-
|
|
155
|
+
|
|
157
156
|
Args:
|
|
158
157
|
key: The metadata key to check
|
|
159
|
-
|
|
158
|
+
|
|
160
159
|
Returns:
|
|
161
160
|
True if the key exists
|
|
162
161
|
"""
|
|
163
162
|
return key in self.metadata
|
|
164
|
-
|
|
163
|
+
|
|
165
164
|
async def remove_metadata(self, key: str) -> None:
|
|
166
165
|
"""
|
|
167
166
|
Remove a metadata key-value pair asynchronously.
|
|
168
|
-
|
|
167
|
+
|
|
169
168
|
Args:
|
|
170
169
|
key: The metadata key to remove
|
|
171
170
|
"""
|
|
172
171
|
if key in self.metadata:
|
|
173
172
|
del self.metadata[key]
|
|
174
|
-
|
|
173
|
+
|
|
175
174
|
async def update_metadata(self, key: str, value: Any) -> None:
|
|
176
175
|
"""
|
|
177
176
|
Update or add a metadata value asynchronously.
|
|
178
|
-
|
|
177
|
+
|
|
179
178
|
Args:
|
|
180
179
|
key: The metadata key
|
|
181
180
|
value: The new value
|
|
182
181
|
"""
|
|
183
182
|
self.metadata[key] = value
|
|
184
|
-
|
|
183
|
+
|
|
185
184
|
async def merge_metadata(self, new_metadata: Dict[str, Any]) -> None:
|
|
186
185
|
"""
|
|
187
186
|
Merge new metadata with existing metadata asynchronously.
|
|
188
|
-
|
|
187
|
+
|
|
189
188
|
Args:
|
|
190
189
|
new_metadata: Dictionary of metadata to merge
|
|
191
190
|
"""
|
|
192
191
|
self.metadata.update(new_metadata)
|
|
193
|
-
|
|
192
|
+
|
|
194
193
|
async def clear_metadata(self) -> None:
|
|
195
194
|
"""Clear all metadata asynchronously."""
|
|
196
195
|
self.metadata.clear()
|
|
197
|
-
|
|
196
|
+
|
|
198
197
|
async def calculate_tokens(self, model: str = "gpt-3.5-turbo") -> int:
|
|
199
198
|
"""
|
|
200
199
|
Calculate tokens for this event's message asynchronously.
|
|
201
|
-
|
|
200
|
+
|
|
202
201
|
Args:
|
|
203
202
|
model: The model to use for token counting
|
|
204
|
-
|
|
203
|
+
|
|
205
204
|
Returns:
|
|
206
205
|
The number of tokens in the message
|
|
207
206
|
"""
|
|
208
207
|
if self.token_usage:
|
|
209
208
|
return self.token_usage.total_tokens
|
|
210
|
-
|
|
209
|
+
|
|
211
210
|
# Calculate tokens from message
|
|
212
|
-
message_str =
|
|
211
|
+
message_str = (
|
|
212
|
+
str(self.message) if not isinstance(self.message, str) else self.message
|
|
213
|
+
)
|
|
213
214
|
return await TokenUsage.count_tokens(message_str, model)
|
|
214
|
-
|
|
215
|
+
|
|
215
216
|
def is_child_of(self, parent_event_id: str) -> bool:
|
|
216
217
|
"""
|
|
217
218
|
Check if this event is a child of another event.
|
|
218
|
-
|
|
219
|
+
|
|
219
220
|
Args:
|
|
220
221
|
parent_event_id: The ID of the potential parent event
|
|
221
|
-
|
|
222
|
+
|
|
222
223
|
Returns:
|
|
223
224
|
True if this event is a child of the specified event
|
|
224
225
|
"""
|
|
225
226
|
return self.parent_event_id == parent_event_id
|
|
226
|
-
|
|
227
|
+
|
|
227
228
|
def is_part_of_task(self, task_id: str) -> bool:
|
|
228
229
|
"""
|
|
229
230
|
Check if this event is part of a specific task.
|
|
230
|
-
|
|
231
|
+
|
|
231
232
|
Args:
|
|
232
233
|
task_id: The task ID to check
|
|
233
|
-
|
|
234
|
+
|
|
234
235
|
Returns:
|
|
235
236
|
True if this event is part of the specified task
|
|
236
237
|
"""
|
|
237
238
|
return self.task_id == task_id
|
|
238
|
-
|
|
239
|
+
|
|
239
240
|
async def to_dict(self) -> Dict[str, Any]:
|
|
240
241
|
"""
|
|
241
242
|
Convert the event to a dictionary asynchronously.
|
|
242
|
-
|
|
243
|
+
|
|
243
244
|
Returns:
|
|
244
245
|
Dictionary representation of the event
|
|
245
246
|
"""
|
|
@@ -249,22 +250,22 @@ class SessionEvent(BaseModel, Generic[MessageT]):
|
|
|
249
250
|
"timestamp": self.timestamp.isoformat(),
|
|
250
251
|
"source": self.source.value,
|
|
251
252
|
"type": self.type.value,
|
|
252
|
-
"metadata": self.metadata
|
|
253
|
+
"metadata": self.metadata,
|
|
253
254
|
}
|
|
254
|
-
|
|
255
|
+
|
|
255
256
|
if self.parent_event_id:
|
|
256
257
|
result["parent_event_id"] = self.parent_event_id
|
|
257
|
-
|
|
258
|
+
|
|
258
259
|
if self.task_id:
|
|
259
260
|
result["task_id"] = self.task_id
|
|
260
|
-
|
|
261
|
+
|
|
261
262
|
if self.token_usage:
|
|
262
263
|
result["token_usage"] = {
|
|
263
264
|
"prompt_tokens": self.token_usage.prompt_tokens,
|
|
264
265
|
"completion_tokens": self.token_usage.completion_tokens,
|
|
265
266
|
"total_tokens": self.token_usage.total_tokens,
|
|
266
267
|
"model": self.token_usage.model,
|
|
267
|
-
"estimated_cost_usd": self.token_usage.estimated_cost_usd
|
|
268
|
+
"estimated_cost_usd": self.token_usage.estimated_cost_usd,
|
|
268
269
|
}
|
|
269
|
-
|
|
270
|
-
return result
|
|
270
|
+
|
|
271
|
+
return result
|
|
@@ -7,12 +7,13 @@ from pydantic import BaseModel, Field
|
|
|
7
7
|
|
|
8
8
|
class SessionMetadata(BaseModel):
|
|
9
9
|
"""Core metadata associated with a session."""
|
|
10
|
+
|
|
10
11
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
11
12
|
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
12
|
-
|
|
13
|
+
|
|
13
14
|
# Free-form properties for session-level identifiers and custom data
|
|
14
15
|
properties: Dict[str, Any] = Field(default_factory=dict)
|
|
15
|
-
|
|
16
|
+
|
|
16
17
|
async def set_property(self, key: str, value: Any) -> None:
|
|
17
18
|
"""Add or update a custom metadata property asynchronously."""
|
|
18
19
|
self.properties[key] = value
|
|
@@ -21,17 +22,15 @@ class SessionMetadata(BaseModel):
|
|
|
21
22
|
async def get_property(self, key: str) -> Any:
|
|
22
23
|
"""Retrieve a metadata property by key asynchronously."""
|
|
23
24
|
return self.properties.get(key)
|
|
24
|
-
|
|
25
|
+
|
|
25
26
|
async def update_timestamp(self) -> None:
|
|
26
27
|
"""Update the updated_at timestamp asynchronously."""
|
|
27
28
|
self.updated_at = datetime.now(timezone.utc)
|
|
28
|
-
|
|
29
|
+
|
|
29
30
|
@classmethod
|
|
30
|
-
async def create(
|
|
31
|
+
async def create(
|
|
32
|
+
cls, properties: Optional[Dict[str, Any]] = None
|
|
33
|
+
) -> SessionMetadata:
|
|
31
34
|
"""Create a new SessionMetadata instance asynchronously."""
|
|
32
35
|
now = datetime.now(timezone.utc)
|
|
33
|
-
return cls(
|
|
34
|
-
created_at=now,
|
|
35
|
-
updated_at=now,
|
|
36
|
-
properties=properties or {}
|
|
37
|
-
)
|
|
36
|
+
return cls(created_at=now, updated_at=now, properties=properties or {})
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Session run model for the chuk session manager with improved async support.
|
|
4
4
|
"""
|
|
5
|
+
|
|
5
6
|
from __future__ import annotations
|
|
6
7
|
from datetime import datetime, timezone
|
|
7
8
|
from enum import Enum
|
|
@@ -12,6 +13,7 @@ from pydantic import BaseModel, Field, ConfigDict
|
|
|
12
13
|
|
|
13
14
|
class RunStatus(str, Enum):
|
|
14
15
|
"""Status of a session run."""
|
|
16
|
+
|
|
15
17
|
PENDING = "pending"
|
|
16
18
|
RUNNING = "running"
|
|
17
19
|
COMPLETED = "completed"
|
|
@@ -21,6 +23,7 @@ class RunStatus(str, Enum):
|
|
|
21
23
|
|
|
22
24
|
class SessionRun(BaseModel):
|
|
23
25
|
"""A single execution or "run" within a session."""
|
|
26
|
+
|
|
24
27
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
25
28
|
|
|
26
29
|
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
@@ -28,15 +31,14 @@ class SessionRun(BaseModel):
|
|
|
28
31
|
ended_at: Optional[datetime] = None
|
|
29
32
|
status: RunStatus = RunStatus.PENDING
|
|
30
33
|
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
31
|
-
tool_calls: List[str] = Field(
|
|
34
|
+
tool_calls: List[str] = Field(
|
|
35
|
+
default_factory=list
|
|
36
|
+
) # IDs of associated tool call events
|
|
32
37
|
|
|
33
38
|
@classmethod
|
|
34
39
|
async def create(cls, metadata: Optional[Dict[str, Any]] = None) -> SessionRun:
|
|
35
40
|
"""Create a new session run asynchronously."""
|
|
36
|
-
return cls(
|
|
37
|
-
status=RunStatus.PENDING,
|
|
38
|
-
metadata=metadata or {}
|
|
39
|
-
)
|
|
41
|
+
return cls(status=RunStatus.PENDING, metadata=metadata or {})
|
|
40
42
|
|
|
41
43
|
async def mark_running(self) -> None:
|
|
42
44
|
"""Mark the run as started/running asynchronously."""
|
|
@@ -61,43 +63,40 @@ class SessionRun(BaseModel):
|
|
|
61
63
|
self.ended_at = datetime.now(timezone.utc)
|
|
62
64
|
if reason:
|
|
63
65
|
await self.set_metadata("cancel_reason", reason)
|
|
64
|
-
|
|
66
|
+
|
|
65
67
|
async def set_metadata(self, key: str, value: Any) -> None:
|
|
66
68
|
"""Set a metadata value asynchronously."""
|
|
67
69
|
self.metadata[key] = value
|
|
68
|
-
|
|
70
|
+
|
|
69
71
|
async def get_metadata(self, key: str, default: Any = None) -> Any:
|
|
70
72
|
"""Get a metadata value asynchronously."""
|
|
71
73
|
return self.metadata.get(key, default)
|
|
72
|
-
|
|
74
|
+
|
|
73
75
|
async def has_metadata(self, key: str) -> bool:
|
|
74
76
|
"""Check if a metadata key exists asynchronously."""
|
|
75
77
|
return key in self.metadata
|
|
76
|
-
|
|
78
|
+
|
|
77
79
|
async def remove_metadata(self, key: str) -> None:
|
|
78
80
|
"""Remove a metadata key-value pair asynchronously."""
|
|
79
81
|
if key in self.metadata:
|
|
80
82
|
del self.metadata[key]
|
|
81
|
-
|
|
83
|
+
|
|
82
84
|
async def get_duration(self) -> Optional[float]:
|
|
83
85
|
"""Get the duration of the run in seconds asynchronously."""
|
|
84
86
|
if self.ended_at is None:
|
|
85
87
|
return None
|
|
86
88
|
return (self.ended_at - self.started_at).total_seconds()
|
|
87
|
-
|
|
89
|
+
|
|
88
90
|
async def add_tool_call(self, tool_call_id: str) -> None:
|
|
89
91
|
"""Associate a tool call event with this run asynchronously."""
|
|
90
92
|
if tool_call_id not in self.tool_calls:
|
|
91
93
|
self.tool_calls.append(tool_call_id)
|
|
92
|
-
|
|
93
|
-
async def get_tool_calls(self, session: Any) -> List[Any]:
|
|
94
|
+
|
|
95
|
+
async def get_tool_calls(self, session: Any) -> List[Any]:
|
|
94
96
|
"""Get all tool call events associated with this run asynchronously."""
|
|
95
97
|
# We use Any type to avoid circular imports
|
|
96
|
-
return [
|
|
97
|
-
|
|
98
|
-
if event.id in self.tool_calls
|
|
99
|
-
]
|
|
100
|
-
|
|
98
|
+
return [event for event in session.events if event.id in self.tool_calls]
|
|
99
|
+
|
|
101
100
|
async def to_dict(self) -> Dict[str, Any]:
|
|
102
101
|
"""Convert the run to a dictionary asynchronously."""
|
|
103
102
|
result = {
|
|
@@ -105,11 +104,11 @@ class SessionRun(BaseModel):
|
|
|
105
104
|
"status": self.status.value,
|
|
106
105
|
"started_at": self.started_at.isoformat(),
|
|
107
106
|
"metadata": self.metadata,
|
|
108
|
-
"tool_calls": self.tool_calls
|
|
107
|
+
"tool_calls": self.tool_calls,
|
|
109
108
|
}
|
|
110
|
-
|
|
109
|
+
|
|
111
110
|
if self.ended_at:
|
|
112
111
|
result["ended_at"] = self.ended_at.isoformat()
|
|
113
112
|
result["duration"] = await self.get_duration()
|
|
114
|
-
|
|
115
|
-
return result
|
|
113
|
+
|
|
114
|
+
return result
|