chuk-ai-session-manager 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_ai_session_manager/__init__.py +57 -0
- chuk_ai_session_manager/exceptions.py +129 -0
- chuk_ai_session_manager/infinite_conversation.py +316 -0
- chuk_ai_session_manager/models/__init__.py +44 -0
- chuk_ai_session_manager/models/event_source.py +8 -0
- chuk_ai_session_manager/models/event_type.py +9 -0
- chuk_ai_session_manager/models/session.py +316 -0
- chuk_ai_session_manager/models/session_event.py +166 -0
- chuk_ai_session_manager/models/session_metadata.py +37 -0
- chuk_ai_session_manager/models/session_run.py +115 -0
- chuk_ai_session_manager/models/token_usage.py +316 -0
- chuk_ai_session_manager/sample_tools.py +194 -0
- chuk_ai_session_manager/session_aware_tool_processor.py +178 -0
- chuk_ai_session_manager/session_prompt_builder.py +474 -0
- chuk_ai_session_manager/storage/__init__.py +44 -0
- chuk_ai_session_manager/storage/base.py +50 -0
- chuk_ai_session_manager/storage/providers/__init__.py +0 -0
- chuk_ai_session_manager/storage/providers/file.py +348 -0
- chuk_ai_session_manager/storage/providers/memory.py +96 -0
- chuk_ai_session_manager/storage/providers/redis.py +295 -0
- chuk_ai_session_manager-0.1.1.dist-info/METADATA +501 -0
- chuk_ai_session_manager-0.1.1.dist-info/RECORD +24 -0
- chuk_ai_session_manager-0.1.1.dist-info/WHEEL +5 -0
- chuk_ai_session_manager-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# chuk_ai_session_manager/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
chuk session manager package.
|
|
4
|
+
|
|
5
|
+
This package provides session management capabilities.
|
|
6
|
+
"""
|
|
7
|
+
# Import core components for easier access
|
|
8
|
+
try:
|
|
9
|
+
from chuk_ai_session_manager.models.event_source import EventSource
|
|
10
|
+
from chuk_ai_session_manager.models.event_type import EventType
|
|
11
|
+
from chuk_ai_session_manager.models.session import Session
|
|
12
|
+
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
13
|
+
from chuk_ai_session_manager.models.session_metadata import SessionMetadata
|
|
14
|
+
from chuk_ai_session_manager.models.session_run import SessionRun, RunStatus
|
|
15
|
+
except ImportError:
|
|
16
|
+
# During package setup or circular imports, these might not be available
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
# Import storage components
|
|
20
|
+
try:
|
|
21
|
+
from chuk_ai_session_manager.storage.base import SessionStoreInterface, SessionStoreProvider
|
|
22
|
+
except ImportError:
|
|
23
|
+
# During package setup or circular imports, these might not be available
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
# Import exceptions
|
|
27
|
+
try:
|
|
28
|
+
from chuk_ai_session_manager.exceptions import (
|
|
29
|
+
SessionManagerError,
|
|
30
|
+
SessionNotFound,
|
|
31
|
+
SessionAlreadyExists,
|
|
32
|
+
InvalidSessionOperation,
|
|
33
|
+
)
|
|
34
|
+
except ImportError:
|
|
35
|
+
# During package setup or circular imports, these might not be available
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
__version__ = "0.1.0"
|
|
39
|
+
|
|
40
|
+
# Define __all__ only if imports succeeded
|
|
41
|
+
__all__ = []
|
|
42
|
+
|
|
43
|
+
# Check which imports succeeded and add them to __all__
|
|
44
|
+
for name in [
|
|
45
|
+
# Models
|
|
46
|
+
'EventSource', 'EventType', 'Session', 'SessionEvent',
|
|
47
|
+
'SessionMetadata', 'SessionRun', 'RunStatus',
|
|
48
|
+
|
|
49
|
+
# Storage
|
|
50
|
+
'SessionStoreInterface', 'SessionStoreProvider',
|
|
51
|
+
|
|
52
|
+
# Exceptions
|
|
53
|
+
'SessionManagerError', 'SessionNotFound',
|
|
54
|
+
'SessionAlreadyExists', 'InvalidSessionOperation',
|
|
55
|
+
]:
|
|
56
|
+
if name in globals():
|
|
57
|
+
__all__.append(name)
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
# chuk_ai_session_manager/exceptions.py
|
|
2
|
+
"""
|
|
3
|
+
Exception classes for the chuk session manager.
|
|
4
|
+
|
|
5
|
+
This module defines the exception hierarchy used throughout the
|
|
6
|
+
session manager to provide specific, informative error conditions
|
|
7
|
+
for various failure modes.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
class SessionManagerError(Exception):
|
|
11
|
+
"""
|
|
12
|
+
Base exception for all session manager errors.
|
|
13
|
+
|
|
14
|
+
All other session manager exceptions inherit from this class,
|
|
15
|
+
making it easy to catch all session-related errors with a single
|
|
16
|
+
except clause if needed.
|
|
17
|
+
"""
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class SessionNotFound(SessionManagerError):
|
|
22
|
+
"""
|
|
23
|
+
Raised when the requested session ID is not found in storage.
|
|
24
|
+
|
|
25
|
+
This exception is typically raised when:
|
|
26
|
+
- Attempting to retrieve a session with an invalid ID
|
|
27
|
+
- Accessing a session that has been deleted
|
|
28
|
+
- Using an ID that does not conform to expected format
|
|
29
|
+
"""
|
|
30
|
+
def __init__(self, session_id=None, message=None):
|
|
31
|
+
self.session_id = session_id
|
|
32
|
+
default_message = f"Session not found: {session_id}" if session_id else "Session not found"
|
|
33
|
+
super().__init__(message or default_message)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class SessionAlreadyExists(SessionManagerError):
|
|
37
|
+
"""
|
|
38
|
+
Raised when attempting to create a session with an ID that already exists.
|
|
39
|
+
|
|
40
|
+
This exception is typically raised during session creation when:
|
|
41
|
+
- Explicitly setting an ID that conflicts with an existing session
|
|
42
|
+
- A UUID collision occurs (extremely rare)
|
|
43
|
+
"""
|
|
44
|
+
def __init__(self, session_id=None, message=None):
|
|
45
|
+
self.session_id = session_id
|
|
46
|
+
default_message = f"Session already exists: {session_id}" if session_id else "Session already exists"
|
|
47
|
+
super().__init__(message or default_message)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class InvalidSessionOperation(SessionManagerError):
|
|
51
|
+
"""
|
|
52
|
+
Raised when attempting an invalid operation on a session.
|
|
53
|
+
|
|
54
|
+
This exception is typically raised when:
|
|
55
|
+
- Performing operations on a closed or archived session
|
|
56
|
+
- Adding events with incorrect sequencing or relationships
|
|
57
|
+
- Attempting unsupported operations in the current session state
|
|
58
|
+
"""
|
|
59
|
+
def __init__(self, operation=None, reason=None, message=None):
|
|
60
|
+
self.operation = operation
|
|
61
|
+
self.reason = reason
|
|
62
|
+
|
|
63
|
+
if message:
|
|
64
|
+
default_message = message
|
|
65
|
+
elif operation and reason:
|
|
66
|
+
default_message = f"Invalid operation '{operation}': {reason}"
|
|
67
|
+
elif operation:
|
|
68
|
+
default_message = f"Invalid operation: {operation}"
|
|
69
|
+
else:
|
|
70
|
+
default_message = "Invalid session operation"
|
|
71
|
+
|
|
72
|
+
super().__init__(default_message)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class TokenLimitExceeded(SessionManagerError):
|
|
76
|
+
"""
|
|
77
|
+
Raised when a token limit is exceeded in a session operation.
|
|
78
|
+
|
|
79
|
+
This exception is typically raised when:
|
|
80
|
+
- Adding content that would exceed configured token limits
|
|
81
|
+
- Attempting to generate a prompt that exceeds model token limits
|
|
82
|
+
"""
|
|
83
|
+
def __init__(self, limit=None, actual=None, message=None):
|
|
84
|
+
self.limit = limit
|
|
85
|
+
self.actual = actual
|
|
86
|
+
|
|
87
|
+
if message:
|
|
88
|
+
default_message = message
|
|
89
|
+
elif limit and actual:
|
|
90
|
+
default_message = f"Token limit exceeded: {actual} > {limit}"
|
|
91
|
+
else:
|
|
92
|
+
default_message = "Token limit exceeded"
|
|
93
|
+
|
|
94
|
+
super().__init__(default_message)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class StorageError(SessionManagerError):
|
|
98
|
+
"""
|
|
99
|
+
Raised when a session storage operation fails.
|
|
100
|
+
|
|
101
|
+
This is a base class for more specific storage errors.
|
|
102
|
+
It can be raised directly for general storage failures.
|
|
103
|
+
"""
|
|
104
|
+
pass
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class ToolProcessingError(SessionManagerError):
|
|
108
|
+
"""
|
|
109
|
+
Raised when tool processing fails in a session.
|
|
110
|
+
|
|
111
|
+
This exception is typically raised when:
|
|
112
|
+
- A tool execution fails after all retries
|
|
113
|
+
- Invalid tool parameters are provided
|
|
114
|
+
- Tool results cannot be properly processed
|
|
115
|
+
"""
|
|
116
|
+
def __init__(self, tool_name=None, reason=None, message=None):
|
|
117
|
+
self.tool_name = tool_name
|
|
118
|
+
self.reason = reason
|
|
119
|
+
|
|
120
|
+
if message:
|
|
121
|
+
default_message = message
|
|
122
|
+
elif tool_name and reason:
|
|
123
|
+
default_message = f"Tool '{tool_name}' processing error: {reason}"
|
|
124
|
+
elif tool_name:
|
|
125
|
+
default_message = f"Tool '{tool_name}' processing error"
|
|
126
|
+
else:
|
|
127
|
+
default_message = "Tool processing error"
|
|
128
|
+
|
|
129
|
+
super().__init__(default_message)
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
# chuk_ai_session_manager/infinite_conversation.py
|
|
2
|
+
"""
|
|
3
|
+
InfiniteConversationManager for handling conversations that exceed token limits.
|
|
4
|
+
|
|
5
|
+
This module provides support for managing conversations that span multiple
|
|
6
|
+
session segments, with automatic summarization and context building.
|
|
7
|
+
"""
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import List, Dict, Any, Optional, Callable, Tuple, Union
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
from chuk_ai_session_manager.models.session import Session
|
|
14
|
+
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
15
|
+
from chuk_ai_session_manager.models.event_type import EventType
|
|
16
|
+
from chuk_ai_session_manager.models.event_source import EventSource
|
|
17
|
+
from chuk_ai_session_manager.storage import SessionStoreProvider
|
|
18
|
+
|
|
19
|
+
# Type for LLM function callbacks
|
|
20
|
+
LLMCallbackAsync = Callable[[List[Dict[str, str]], str], Any]
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SummarizationStrategy(str, Enum):
|
|
26
|
+
"""Different strategies for summarizing conversation segments."""
|
|
27
|
+
BASIC = "basic" # General overview of the conversation
|
|
28
|
+
KEY_POINTS = "key_points" # Focus on key information points
|
|
29
|
+
TOPIC_BASED = "topic_based" # Organize by topics discussed
|
|
30
|
+
QUERY_FOCUSED = "query_focused" # Focus on user's questions
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class InfiniteConversationManager:
|
|
34
|
+
"""
|
|
35
|
+
Manages conversations that can theoretically be infinite in length.
|
|
36
|
+
|
|
37
|
+
This manager automatically segments conversations that exceed token limits
|
|
38
|
+
by creating a chain of sessions with summaries that provide context.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
token_threshold: int = 3000,
|
|
44
|
+
max_turns_per_segment: int = 20,
|
|
45
|
+
summarization_strategy: SummarizationStrategy = SummarizationStrategy.BASIC
|
|
46
|
+
):
|
|
47
|
+
"""
|
|
48
|
+
Initialize the infinite conversation manager.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
token_threshold: Maximum tokens before creating a new segment
|
|
52
|
+
max_turns_per_segment: Maximum conversation turns per segment
|
|
53
|
+
summarization_strategy: Strategy to use for summarization
|
|
54
|
+
"""
|
|
55
|
+
self.token_threshold = token_threshold
|
|
56
|
+
self.max_turns_per_segment = max_turns_per_segment
|
|
57
|
+
self.summarization_strategy = summarization_strategy
|
|
58
|
+
|
|
59
|
+
async def process_message(
|
|
60
|
+
self,
|
|
61
|
+
session_id: str,
|
|
62
|
+
message: str,
|
|
63
|
+
source: EventSource,
|
|
64
|
+
llm_callback: LLMCallbackAsync,
|
|
65
|
+
model: str = "gpt-3.5-turbo"
|
|
66
|
+
) -> str:
|
|
67
|
+
"""
|
|
68
|
+
Process a new message in the conversation.
|
|
69
|
+
|
|
70
|
+
This method:
|
|
71
|
+
1. Adds the message to the current session
|
|
72
|
+
2. Checks if token threshold is exceeded
|
|
73
|
+
3. If needed, creates a summary and starts a new session
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
session_id: ID of the current session
|
|
77
|
+
message: The message content
|
|
78
|
+
source: Source of the message (USER or LLM)
|
|
79
|
+
llm_callback: Async callback for LLM calls
|
|
80
|
+
model: The model to use for token counting
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
The current session ID (may be a new one if threshold was exceeded)
|
|
84
|
+
"""
|
|
85
|
+
# Get the store
|
|
86
|
+
store = SessionStoreProvider.get_store()
|
|
87
|
+
|
|
88
|
+
# Get the current session
|
|
89
|
+
session = await store.get(session_id)
|
|
90
|
+
if not session:
|
|
91
|
+
raise ValueError(f"Session {session_id} not found")
|
|
92
|
+
|
|
93
|
+
# Add the message to the session
|
|
94
|
+
event = await SessionEvent.create_with_tokens(
|
|
95
|
+
message=message,
|
|
96
|
+
prompt=message if source == EventSource.USER else "",
|
|
97
|
+
completion=message if source == EventSource.LLM else "",
|
|
98
|
+
model=model,
|
|
99
|
+
source=source,
|
|
100
|
+
type=EventType.MESSAGE
|
|
101
|
+
)
|
|
102
|
+
await session.add_event_and_save(event)
|
|
103
|
+
|
|
104
|
+
# Check if we've exceeded the token threshold
|
|
105
|
+
if await self._should_create_new_segment(session):
|
|
106
|
+
logger.info(f"Token threshold exceeded for session {session_id}. Creating new segment.")
|
|
107
|
+
|
|
108
|
+
# Create a summary of the current session
|
|
109
|
+
summary = await self._create_summary(session, llm_callback)
|
|
110
|
+
|
|
111
|
+
# Add the summary to the current session
|
|
112
|
+
summary_event = SessionEvent(
|
|
113
|
+
message=summary,
|
|
114
|
+
source=EventSource.SYSTEM,
|
|
115
|
+
type=EventType.SUMMARY
|
|
116
|
+
)
|
|
117
|
+
await session.add_event_and_save(summary_event)
|
|
118
|
+
|
|
119
|
+
# Create a new session with the current as parent
|
|
120
|
+
new_session = await Session.create(parent_id=session_id)
|
|
121
|
+
|
|
122
|
+
# Return the new session ID
|
|
123
|
+
return new_session.id
|
|
124
|
+
|
|
125
|
+
# No new segment needed, return the current session ID
|
|
126
|
+
return session_id
|
|
127
|
+
|
|
128
|
+
async def _should_create_new_segment(self, session: Session) -> bool:
|
|
129
|
+
"""
|
|
130
|
+
Determine if we should create a new session segment.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
session: The current session
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
True if a new segment should be created
|
|
137
|
+
"""
|
|
138
|
+
# Check token count
|
|
139
|
+
if session.total_tokens >= self.token_threshold:
|
|
140
|
+
return True
|
|
141
|
+
|
|
142
|
+
# Check turn count
|
|
143
|
+
message_events = [e for e in session.events if e.type == EventType.MESSAGE]
|
|
144
|
+
if len(message_events) >= self.max_turns_per_segment:
|
|
145
|
+
return True
|
|
146
|
+
|
|
147
|
+
return False
|
|
148
|
+
|
|
149
|
+
async def _create_summary(
|
|
150
|
+
self,
|
|
151
|
+
session: Session,
|
|
152
|
+
llm_callback: LLMCallbackAsync
|
|
153
|
+
) -> str:
|
|
154
|
+
"""
|
|
155
|
+
Create a summary of the session.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
session: The session to summarize
|
|
159
|
+
llm_callback: Async callback for LLM calls
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
A summary string
|
|
163
|
+
"""
|
|
164
|
+
# Get message events
|
|
165
|
+
message_events = [e for e in session.events if e.type == EventType.MESSAGE]
|
|
166
|
+
|
|
167
|
+
# Create a conversation history for the LLM
|
|
168
|
+
messages = []
|
|
169
|
+
|
|
170
|
+
# Add system prompt based on summarization strategy
|
|
171
|
+
system_prompt = self._get_summarization_prompt()
|
|
172
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
173
|
+
|
|
174
|
+
# Add the conversation history
|
|
175
|
+
for event in message_events:
|
|
176
|
+
role = "user" if event.source == EventSource.USER else "assistant"
|
|
177
|
+
content = event.message
|
|
178
|
+
messages.append({"role": role, "content": content})
|
|
179
|
+
|
|
180
|
+
# Call the LLM to generate a summary
|
|
181
|
+
summary = await llm_callback(messages)
|
|
182
|
+
return summary
|
|
183
|
+
|
|
184
|
+
def _get_summarization_prompt(self) -> str:
|
|
185
|
+
"""
|
|
186
|
+
Get the prompt for summarization based on the selected strategy.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
A prompt string
|
|
190
|
+
"""
|
|
191
|
+
if self.summarization_strategy == SummarizationStrategy.BASIC:
|
|
192
|
+
return "Please provide a concise summary of this conversation. Focus on the main topic and key information exchanged."
|
|
193
|
+
|
|
194
|
+
elif self.summarization_strategy == SummarizationStrategy.KEY_POINTS:
|
|
195
|
+
return "Summarize this conversation by identifying and listing the key points discussed. Focus on the most important information exchanged."
|
|
196
|
+
|
|
197
|
+
elif self.summarization_strategy == SummarizationStrategy.TOPIC_BASED:
|
|
198
|
+
return "Create a summary of this conversation organized by topics discussed. Identify the main subject areas and the key points within each."
|
|
199
|
+
|
|
200
|
+
elif self.summarization_strategy == SummarizationStrategy.QUERY_FOCUSED:
|
|
201
|
+
return "Summarize this conversation by focusing on the user's main questions and the key answers provided. Prioritize what the user was seeking to learn."
|
|
202
|
+
|
|
203
|
+
else:
|
|
204
|
+
return "Please provide a brief summary of this conversation."
|
|
205
|
+
|
|
206
|
+
async def build_context_for_llm(
|
|
207
|
+
self,
|
|
208
|
+
session_id: str,
|
|
209
|
+
max_messages: int = 10,
|
|
210
|
+
include_summaries: bool = True
|
|
211
|
+
) -> List[Dict[str, str]]:
|
|
212
|
+
"""
|
|
213
|
+
Build context for an LLM call from the current session and its ancestors.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
session_id: ID of the current session
|
|
217
|
+
max_messages: Maximum number of recent messages to include
|
|
218
|
+
include_summaries: Whether to include summaries from parent sessions
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
A list of messages suitable for an LLM call
|
|
222
|
+
"""
|
|
223
|
+
# Get the store
|
|
224
|
+
store = SessionStoreProvider.get_store()
|
|
225
|
+
|
|
226
|
+
# Get the current session
|
|
227
|
+
session = await store.get(session_id)
|
|
228
|
+
if not session:
|
|
229
|
+
raise ValueError(f"Session {session_id} not found")
|
|
230
|
+
|
|
231
|
+
# Initialize context
|
|
232
|
+
context = []
|
|
233
|
+
|
|
234
|
+
# Add summaries from ancestor sessions if requested
|
|
235
|
+
if include_summaries:
|
|
236
|
+
# Get all ancestors
|
|
237
|
+
ancestors = await session.ancestors()
|
|
238
|
+
|
|
239
|
+
# Get summaries from ancestors (most distant to most recent)
|
|
240
|
+
summaries = []
|
|
241
|
+
for ancestor in ancestors:
|
|
242
|
+
summary_event = next(
|
|
243
|
+
(e for e in reversed(ancestor.events) if e.type == EventType.SUMMARY),
|
|
244
|
+
None
|
|
245
|
+
)
|
|
246
|
+
if summary_event:
|
|
247
|
+
summaries.append(summary_event.message)
|
|
248
|
+
|
|
249
|
+
# If we have summaries, add them as a system message
|
|
250
|
+
if summaries:
|
|
251
|
+
context.append({
|
|
252
|
+
"role": "system",
|
|
253
|
+
"content": "Previous conversation context: " + " ".join(summaries)
|
|
254
|
+
})
|
|
255
|
+
|
|
256
|
+
# Get recent messages from the current session
|
|
257
|
+
message_events = [e for e in session.events if e.type == EventType.MESSAGE]
|
|
258
|
+
recent_messages = message_events[-max_messages:] if len(message_events) > max_messages else message_events
|
|
259
|
+
|
|
260
|
+
# Add messages to context
|
|
261
|
+
for event in recent_messages:
|
|
262
|
+
role = "user" if event.source == EventSource.USER else "assistant"
|
|
263
|
+
content = event.message
|
|
264
|
+
context.append({"role": role, "content": content})
|
|
265
|
+
|
|
266
|
+
return context
|
|
267
|
+
|
|
268
|
+
async def get_session_chain(self, session_id: str) -> List[Session]:
|
|
269
|
+
"""
|
|
270
|
+
Return sessions from root → … → current.
|
|
271
|
+
|
|
272
|
+
The `Session.ancestors()` helper usually returns the chain in
|
|
273
|
+
*reverse* (closest parent first). Tests expect root-first order,
|
|
274
|
+
so we reverse it and then append the current session.
|
|
275
|
+
"""
|
|
276
|
+
store = SessionStoreProvider.get_store()
|
|
277
|
+
session = await store.get(session_id)
|
|
278
|
+
if not session:
|
|
279
|
+
raise ValueError(f"Session {session_id} not found")
|
|
280
|
+
|
|
281
|
+
ancestors = await session.ancestors()
|
|
282
|
+
# ensure order root → … → parent
|
|
283
|
+
ancestors = list(reversed(ancestors))
|
|
284
|
+
return ancestors + [session]
|
|
285
|
+
|
|
286
|
+
async def get_full_conversation_history(
|
|
287
|
+
self,
|
|
288
|
+
session_id: str
|
|
289
|
+
) -> List[Tuple[str, EventSource, str]]:
|
|
290
|
+
"""
|
|
291
|
+
Get the full conversation history across all session segments.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
session_id: ID of the current session
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
A list of (role, source, content) tuples representing the conversation
|
|
298
|
+
"""
|
|
299
|
+
# Get the session chain
|
|
300
|
+
sessions = await self.get_session_chain(session_id)
|
|
301
|
+
|
|
302
|
+
# Initialize history
|
|
303
|
+
history = []
|
|
304
|
+
|
|
305
|
+
# Process each session in the chain
|
|
306
|
+
for session in sessions:
|
|
307
|
+
# Get message events from this session
|
|
308
|
+
message_events = [e for e in session.events if e.type == EventType.MESSAGE]
|
|
309
|
+
|
|
310
|
+
# Add to history
|
|
311
|
+
for event in message_events:
|
|
312
|
+
role = "user" if event.source == EventSource.USER else "assistant"
|
|
313
|
+
content = event.message
|
|
314
|
+
history.append((role, event.source, content))
|
|
315
|
+
|
|
316
|
+
return history
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# chuk_ai_session_manager/models/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
Core models for the chuk session manager.
|
|
4
|
+
"""
|
|
5
|
+
# Import each model separately to avoid circular imports
|
|
6
|
+
try:
|
|
7
|
+
from chuk_ai_session_manager.models.event_source import EventSource
|
|
8
|
+
except ImportError:
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from chuk_ai_session_manager.models.event_type import EventType
|
|
13
|
+
except ImportError:
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
from chuk_ai_session_manager.models.session_event import SessionEvent
|
|
18
|
+
except ImportError:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from chuk_ai_session_manager.models.session_metadata import SessionMetadata
|
|
23
|
+
except ImportError:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
try:
|
|
27
|
+
from chuk_ai_session_manager.models.session_run import SessionRun, RunStatus
|
|
28
|
+
except ImportError:
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
# Import Session last since it might depend on the above
|
|
32
|
+
try:
|
|
33
|
+
from chuk_ai_session_manager.models.session import Session
|
|
34
|
+
except ImportError:
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
# Define __all__ based on what was successfully imported
|
|
38
|
+
__all__ = []
|
|
39
|
+
|
|
40
|
+
# Check which imports succeeded and add them to __all__
|
|
41
|
+
for name in ['EventSource', 'EventType', 'SessionEvent', 'SessionMetadata',
|
|
42
|
+
'SessionRun', 'RunStatus', 'Session']:
|
|
43
|
+
if name in globals():
|
|
44
|
+
__all__.append(name)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# chuk_ai_session_manager/models/event_type.py
|
|
2
|
+
from enum import Enum
|
|
3
|
+
class EventType(str, Enum):
|
|
4
|
+
"""Type of the session event."""
|
|
5
|
+
MESSAGE = "message"
|
|
6
|
+
SUMMARY = "summary"
|
|
7
|
+
TOOL_CALL = "tool_call"
|
|
8
|
+
REFERENCE = "reference"
|
|
9
|
+
CONTEXT_BRIDGE = "context_bridge"
|