chuk-ai-session-manager 0.3__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,442 +1,80 @@
1
- # chuk_ai_session_manager/api/simple_api.py
1
+ # src/chuk_ai_session_manager/api/simple_api.py
2
2
  """
3
- Unified SessionManager with built-in infinite context support.
3
+ Simple API convenience functions for the CHUK AI Session Manager.
4
+
5
+ This module provides easy-to-use functions for common session management tasks,
6
+ building on top of the SessionManager class.
4
7
 
5
8
  Usage:
6
- from chuk_ai_session_manager import SessionManager
9
+ from chuk_ai_session_manager import track_conversation
7
10
 
8
- # Regular session
9
- sm = SessionManager()
11
+ # Quick tracking
12
+ await track_conversation("Hello!", "Hi there!")
10
13
 
11
- # Infinite context session
12
- sm = SessionManager(infinite_context=True)
14
+ # Track with model info
15
+ await track_conversation(
16
+ "What's the weather?",
17
+ "It's sunny and 72°F",
18
+ model="gpt-4",
19
+ provider="openai"
20
+ )
13
21
 
14
- # Everything else is identical
15
- await sm.user_says("Hello!")
16
- await sm.ai_responds("Hi there!", model="gpt-4")
22
+ # Infinite context
23
+ await track_infinite_conversation(
24
+ "Tell me a long story",
25
+ "Once upon a time...",
26
+ token_threshold=4000
27
+ )
17
28
  """
18
29
 
19
- from __future__ import annotations
20
30
  import asyncio
21
31
  import logging
22
32
  from typing import Any, Dict, List, Optional, Union, Callable
23
- from datetime import datetime
24
33
 
25
- from chuk_ai_session_manager.models.session import Session
26
- from chuk_ai_session_manager.models.session_event import SessionEvent
27
- from chuk_ai_session_manager.models.event_source import EventSource
28
- from chuk_ai_session_manager.models.event_type import EventType
29
- from chuk_ai_session_manager.session_storage import get_backend, ChukSessionsStore
34
+ from chuk_ai_session_manager.session_manager import SessionManager
30
35
 
31
36
  logger = logging.getLogger(__name__)
32
37
 
33
- class SessionManager:
34
- """
35
- Unified session manager with built-in infinite context support.
36
-
37
- Automatically handles session segmentation, summarization, and context
38
- preservation when infinite_context=True is enabled.
39
- """
40
-
41
- def __init__(
42
- self,
43
- session_id: Optional[str] = None,
44
- infinite_context: bool = False,
45
- token_threshold: int = 4000,
46
- max_turns_per_segment: int = 20
47
- ):
48
- """
49
- Initialize a session manager.
50
-
51
- Args:
52
- session_id: Use existing session or create new one
53
- infinite_context: Enable automatic infinite context handling
54
- token_threshold: Token limit before creating new session (infinite mode)
55
- max_turns_per_segment: Turn limit before creating new session (infinite mode)
56
- """
57
- self._session: Optional[Session] = None
58
- self._session_id = session_id
59
- self._is_new = session_id is None
60
-
61
- # Infinite context settings
62
- self._infinite_context = infinite_context
63
- self._token_threshold = token_threshold
64
- self._max_turns_per_segment = max_turns_per_segment
65
-
66
- # Infinite context state
67
- self._session_chain: List[str] = []
68
- self._full_conversation: List[Dict[str, Any]] = []
69
- self._total_segments = 1
70
-
71
- @property
72
- def session_id(self) -> str:
73
- """Get the current session ID."""
74
- if self._session:
75
- return self._session.id
76
- elif self._session_id:
77
- return self._session_id
78
- else:
79
- import uuid
80
- self._session_id = str(uuid.uuid4())
81
- return self._session_id
82
-
83
- @property
84
- def is_infinite(self) -> bool:
85
- """Check if infinite context is enabled."""
86
- return self._infinite_context
87
-
88
- async def _ensure_session(self) -> Session:
89
- """Ensure we have a session, creating one if needed."""
90
- if self._session is None:
91
- backend = get_backend()
92
- store = ChukSessionsStore(backend)
93
-
94
- if self._is_new:
95
- self._session = await Session.create()
96
- self._session_id = self._session.id
97
-
98
- # Always save new sessions immediately
99
- await store.save(self._session)
100
-
101
- # Initialize session chain for infinite context
102
- if self._infinite_context:
103
- self._session_chain = [self._session_id]
104
- else:
105
- self._session = await store.get(self._session_id)
106
- if self._session is None:
107
- raise ValueError(f"Session {self._session_id} not found")
108
- return self._session
109
-
110
- async def _should_create_new_segment(self) -> bool:
111
- """Check if we should create a new session segment."""
112
- if not self._infinite_context:
113
- return False
114
-
115
- session = await self._ensure_session()
116
-
117
- # Check token threshold
118
- if session.total_tokens >= self._token_threshold:
119
- return True
120
-
121
- # Check turn threshold
122
- message_events = [e for e in session.events if e.type == EventType.MESSAGE]
123
- if len(message_events) >= self._max_turns_per_segment:
124
- return True
125
-
126
- return False
127
-
128
- async def _create_summary(self) -> str:
129
- """Create a summary of the current session."""
130
- session = await self._ensure_session()
131
- message_events = [e for e in session.events if e.type == EventType.MESSAGE]
132
-
133
- # Simple summary generation
134
- user_messages = [e for e in message_events if e.source == EventSource.USER]
135
-
136
- topics = []
137
- for event in user_messages:
138
- content = str(event.message)
139
- if "?" in content:
140
- question = content.split("?")[0].strip()
141
- if len(question) > 10:
142
- topics.append(question[:50])
143
-
144
- if topics:
145
- summary = f"User discussed: {'; '.join(topics[:3])}"
146
- if len(topics) > 3:
147
- summary += f" and {len(topics) - 3} other topics"
148
- else:
149
- summary = f"Conversation with {len(user_messages)} user messages and {len(message_events) - len(user_messages)} responses"
150
-
151
- return summary
152
-
153
- async def _create_new_segment(self) -> str:
154
- """Create a new session segment with summary."""
155
- # Create summary of current session
156
- summary = await self._create_summary()
157
-
158
- # Add summary to current session
159
- summary_event = SessionEvent(
160
- message=summary,
161
- source=EventSource.SYSTEM,
162
- type=EventType.SUMMARY
163
- )
164
- current_session = await self._ensure_session()
165
- await current_session.add_event_and_save(summary_event)
166
-
167
- # Create new session with current as parent
168
- new_session = await Session.create(parent_id=self._session_id)
169
-
170
- # Update our state
171
- old_session_id = self._session_id
172
- self._session_id = new_session.id
173
- self._session = new_session
174
- self._session_chain.append(self._session_id)
175
- self._total_segments += 1
176
-
177
- logger.info(f"Created new session segment: {old_session_id} -> {self._session_id}")
178
- return self._session_id
179
-
180
- async def user_says(self, message: str, **metadata) -> str:
181
- """
182
- Track a user message.
183
-
184
- Args:
185
- message: What the user said
186
- **metadata: Optional metadata to attach
187
-
188
- Returns:
189
- The current session ID (may change in infinite mode)
190
- """
191
- # Check for segmentation before adding message
192
- if await self._should_create_new_segment():
193
- await self._create_new_segment()
194
-
195
- session = await self._ensure_session()
196
-
197
- # Create and add the event
198
- event = await SessionEvent.create_with_tokens(
199
- message=message,
200
- prompt=message,
201
- model="gpt-4o-mini",
202
- source=EventSource.USER,
203
- type=EventType.MESSAGE
204
- )
205
-
206
- # Add metadata
207
- for key, value in metadata.items():
208
- await event.set_metadata(key, value)
209
-
210
- await session.add_event_and_save(event)
211
-
212
- # Track in full conversation for infinite context
213
- if self._infinite_context:
214
- self._full_conversation.append({
215
- "role": "user",
216
- "content": message,
217
- "timestamp": event.timestamp.isoformat(),
218
- "session_id": self._session_id
219
- })
220
-
221
- return self._session_id
222
-
223
- async def ai_responds(
224
- self,
225
- response: str,
226
- model: str = "unknown",
227
- provider: str = "unknown",
228
- **metadata
229
- ) -> str:
230
- """
231
- Track an AI response.
232
-
233
- Args:
234
- response: The AI's response
235
- model: Model name
236
- provider: Provider name
237
- **metadata: Optional metadata
238
-
239
- Returns:
240
- The current session ID (may change in infinite mode)
241
- """
242
- # Check for segmentation before adding message
243
- if await self._should_create_new_segment():
244
- await self._create_new_segment()
245
-
246
- session = await self._ensure_session()
247
-
248
- # Create and add the event
249
- event = await SessionEvent.create_with_tokens(
250
- message=response,
251
- prompt="",
252
- completion=response,
253
- model=model,
254
- source=EventSource.LLM,
255
- type=EventType.MESSAGE
256
- )
257
-
258
- # Add metadata
259
- full_metadata = {
260
- "model": model,
261
- "provider": provider,
262
- "timestamp": datetime.now().isoformat(),
263
- **metadata
264
- }
265
-
266
- for key, value in full_metadata.items():
267
- await event.set_metadata(key, value)
268
-
269
- await session.add_event_and_save(event)
270
-
271
- # Track in full conversation for infinite context
272
- if self._infinite_context:
273
- self._full_conversation.append({
274
- "role": "assistant",
275
- "content": response,
276
- "timestamp": event.timestamp.isoformat(),
277
- "session_id": self._session_id,
278
- "model": model,
279
- "provider": provider
280
- })
281
-
282
- return self._session_id
283
-
284
- async def tool_used(
285
- self,
286
- tool_name: str,
287
- arguments: Dict[str, Any],
288
- result: Any,
289
- error: Optional[str] = None,
290
- **metadata
291
- ) -> str:
292
- """Track a tool call."""
293
- session = await self._ensure_session()
294
-
295
- tool_message = {
296
- "tool": tool_name,
297
- "arguments": arguments,
298
- "result": result,
299
- "error": error,
300
- "success": error is None
301
- }
302
-
303
- event = SessionEvent(
304
- message=tool_message,
305
- source=EventSource.SYSTEM,
306
- type=EventType.TOOL_CALL
307
- )
308
-
309
- for key, value in metadata.items():
310
- await event.set_metadata(key, value)
311
-
312
- await session.add_event_and_save(event)
313
- return self._session_id
314
-
315
- async def get_conversation(self, include_all_segments: bool = None) -> List[Dict[str, Any]]:
316
- """
317
- Get conversation history.
318
-
319
- Args:
320
- include_all_segments: Include all segments (defaults to infinite_context setting)
321
-
322
- Returns:
323
- List of conversation turns
324
- """
325
- if include_all_segments is None:
326
- include_all_segments = self._infinite_context
327
-
328
- if self._infinite_context and include_all_segments:
329
- # Return full conversation across all segments
330
- return self._full_conversation.copy()
331
- else:
332
- # Return current session only
333
- session = await self._ensure_session()
334
- conversation = []
335
- for event in session.events:
336
- if event.type == EventType.MESSAGE:
337
- turn = {
338
- "role": "user" if event.source == EventSource.USER else "assistant",
339
- "content": event.message,
340
- "timestamp": event.timestamp.isoformat()
341
- }
342
- conversation.append(turn)
343
-
344
- return conversation
345
-
346
- async def get_session_chain(self) -> List[str]:
347
- """Get the chain of session IDs (infinite context only)."""
348
- if self._infinite_context:
349
- return self._session_chain.copy()
350
- else:
351
- return [self._session_id]
352
-
353
- async def get_stats(self, include_all_segments: bool = None) -> Dict[str, Any]:
354
- """
355
- Get conversation statistics.
356
-
357
- Args:
358
- include_all_segments: Include all segments (defaults to infinite_context setting)
359
-
360
- Returns:
361
- Dictionary with conversation stats
362
- """
363
- if include_all_segments is None:
364
- include_all_segments = self._infinite_context
365
-
366
- session = await self._ensure_session()
367
-
368
- if self._infinite_context and include_all_segments:
369
- # Calculate stats across all segments
370
- user_messages = len([t for t in self._full_conversation if t["role"] == "user"])
371
- ai_messages = len([t for t in self._full_conversation if t["role"] == "assistant"])
372
-
373
- # Get token/cost stats by loading all sessions in chain
374
- total_tokens = 0
375
- total_cost = 0.0
376
- total_events = 0
377
-
378
- backend = get_backend()
379
- store = ChukSessionsStore(backend)
380
-
381
- for session_id in self._session_chain:
382
- try:
383
- sess = await store.get(session_id)
384
- if sess:
385
- total_tokens += sess.total_tokens
386
- total_cost += sess.total_cost
387
- total_events += len(sess.events)
388
- except Exception:
389
- # Skip if can't load session
390
- pass
391
-
392
- return {
393
- "session_id": self._session_id,
394
- "session_segments": self._total_segments,
395
- "session_chain": self._session_chain,
396
- "total_events": total_events,
397
- "user_messages": user_messages,
398
- "ai_messages": ai_messages,
399
- "tool_calls": 0, # TODO: Track tools in full conversation
400
- "total_tokens": total_tokens,
401
- "estimated_cost": total_cost,
402
- "created_at": session.metadata.created_at.isoformat(),
403
- "last_update": session.last_update_time.isoformat(),
404
- "infinite_context": True
405
- }
406
- else:
407
- # Current session stats only
408
- user_messages = sum(1 for e in session.events
409
- if e.type == EventType.MESSAGE and e.source == EventSource.USER)
410
- ai_messages = sum(1 for e in session.events
411
- if e.type == EventType.MESSAGE and e.source == EventSource.LLM)
412
- tool_calls = sum(1 for e in session.events if e.type == EventType.TOOL_CALL)
413
-
414
- return {
415
- "session_id": session.id,
416
- "session_segments": 1,
417
- "total_events": len(session.events),
418
- "user_messages": user_messages,
419
- "ai_messages": ai_messages,
420
- "tool_calls": tool_calls,
421
- "total_tokens": session.total_tokens,
422
- "estimated_cost": session.total_cost,
423
- "created_at": session.metadata.created_at.isoformat(),
424
- "last_update": session.last_update_time.isoformat(),
425
- "infinite_context": self._infinite_context
426
- }
427
-
428
38
 
429
- # Convenience functions remain the same but simpler
430
39
  async def track_conversation(
431
40
  user_message: str,
432
41
  ai_response: str,
433
42
  model: str = "unknown",
434
43
  provider: str = "unknown",
44
+ session_id: Optional[str] = None,
435
45
  infinite_context: bool = False,
436
46
  token_threshold: int = 4000
437
47
  ) -> str:
438
- """Quick way to track a single conversation turn."""
48
+ """
49
+ Quick way to track a single conversation turn.
50
+
51
+ This is the simplest way to track a conversation exchange between
52
+ a user and an AI assistant.
53
+
54
+ Args:
55
+ user_message: What the user said.
56
+ ai_response: What the AI responded.
57
+ model: The model used (e.g., "gpt-4", "claude-3").
58
+ provider: The provider (e.g., "openai", "anthropic").
59
+ session_id: Optional existing session ID to continue.
60
+ infinite_context: Enable infinite context support.
61
+ token_threshold: Token limit for infinite context segmentation.
62
+
63
+ Returns:
64
+ The session ID (useful for continuing the conversation).
65
+
66
+ Example:
67
+ ```python
68
+ session_id = await track_conversation(
69
+ "What's the capital of France?",
70
+ "The capital of France is Paris.",
71
+ model="gpt-3.5-turbo",
72
+ provider="openai"
73
+ )
74
+ ```
75
+ """
439
76
  sm = SessionManager(
77
+ session_id=session_id,
440
78
  infinite_context=infinite_context,
441
79
  token_threshold=token_threshold
442
80
  )
@@ -444,24 +82,60 @@ async def track_conversation(
444
82
  session_id = await sm.ai_responds(ai_response, model=model, provider=provider)
445
83
  return session_id
446
84
 
85
+
447
86
  async def track_llm_call(
448
87
  user_input: str,
449
88
  llm_function: Callable[[str], Union[str, Any]],
450
89
  model: str = "unknown",
451
90
  provider: str = "unknown",
452
91
  session_manager: Optional[SessionManager] = None,
92
+ session_id: Optional[str] = None,
453
93
  infinite_context: bool = False,
454
94
  token_threshold: int = 4000
455
95
  ) -> tuple[str, str]:
456
- """Track an LLM call automatically."""
96
+ """
97
+ Track an LLM call automatically.
98
+
99
+ This function wraps your LLM call and automatically tracks both the
100
+ input and output in a session.
101
+
102
+ Args:
103
+ user_input: The user's input to the LLM.
104
+ llm_function: Function that calls the LLM (sync or async).
105
+ model: The model being used.
106
+ provider: The provider being used.
107
+ session_manager: Optional existing SessionManager to use.
108
+ session_id: Optional session ID if not using session_manager.
109
+ infinite_context: Enable infinite context support.
110
+ token_threshold: Token limit for infinite context.
111
+
112
+ Returns:
113
+ Tuple of (response_text, session_id).
114
+
115
+ Example:
116
+ ```python
117
+ async def call_openai(prompt):
118
+ # Your OpenAI call here
119
+ return response.choices[0].message.content
120
+
121
+ response, session_id = await track_llm_call(
122
+ "Explain quantum computing",
123
+ call_openai,
124
+ model="gpt-4",
125
+ provider="openai"
126
+ )
127
+ ```
128
+ """
457
129
  if session_manager is None:
458
130
  session_manager = SessionManager(
131
+ session_id=session_id,
459
132
  infinite_context=infinite_context,
460
133
  token_threshold=token_threshold
461
134
  )
462
135
 
463
136
  await session_manager.user_says(user_input)
464
137
 
138
+ # Call the LLM function
465
139
  if asyncio.iscoroutinefunction(llm_function):
466
140
  ai_response = await llm_function(user_input)
467
141
  else:
@@ -469,10 +143,13 @@ async def track_llm_call(
469
143
 
470
144
  # Handle different response formats
471
145
  if isinstance(ai_response, dict) and "choices" in ai_response:
146
+ # OpenAI format
472
147
  response_text = ai_response["choices"][0]["message"]["content"]
473
148
  elif hasattr(ai_response, "content"):
149
+ # Object with content attribute
474
150
  response_text = ai_response.content
475
151
  else:
152
+ # Plain string or other
476
153
  response_text = str(ai_response)
477
154
 
478
155
  session_id = await session_manager.ai_responds(
@@ -481,27 +158,201 @@ async def track_llm_call(
481
158
 
482
159
  return response_text, session_id
483
160
 
161
+
484
162
  async def quick_conversation(
485
163
  user_message: str,
486
164
  ai_response: str,
165
+ model: str = "unknown",
166
+ provider: str = "unknown",
487
167
  infinite_context: bool = False
488
168
  ) -> Dict[str, Any]:
489
- """Quickest way to track a conversation and get basic stats."""
490
- session_id = await track_conversation(
491
- user_message, ai_response, infinite_context=infinite_context
492
- )
493
- sm = SessionManager(session_id, infinite_context=infinite_context)
169
+ """
170
+ Quickest way to track a conversation and get basic stats.
171
+
172
+ This is perfect for one-off tracking where you want immediate
173
+ statistics about the conversation.
174
+
175
+ Args:
176
+ user_message: What the user said.
177
+ ai_response: What the AI responded.
178
+ model: The model used.
179
+ provider: The provider used.
180
+ infinite_context: Enable infinite context support.
181
+
182
+ Returns:
183
+ Dictionary with conversation statistics.
184
+
185
+ Example:
186
+ ```python
187
+ stats = await quick_conversation(
188
+ "Hello!",
189
+ "Hi there! How can I help you today?",
190
+ model="gpt-3.5-turbo"
191
+ )
192
+ print(f"Tokens used: {stats['total_tokens']}")
193
+ print(f"Cost: ${stats['estimated_cost']:.4f}")
194
+ ```
195
+ """
196
+ # Create a new session manager
197
+ sm = SessionManager(infinite_context=infinite_context)
198
+
199
+ # Track the conversation
200
+ await sm.user_says(user_message)
201
+ await sm.ai_responds(ai_response, model=model, provider=provider)
202
+
203
+ # Return stats directly
494
204
  return await sm.get_stats()
495
205
 
206
+
496
207
  async def track_infinite_conversation(
497
208
  user_message: str,
498
209
  ai_response: str,
499
210
  model: str = "unknown",
500
211
  provider: str = "unknown",
501
- token_threshold: int = 4000
212
+ session_id: Optional[str] = None,
213
+ token_threshold: int = 4000,
214
+ max_turns: int = 20
502
215
  ) -> str:
503
- """Track a conversation with infinite context support."""
216
+ """
217
+ Track a conversation with infinite context support.
218
+
219
+ This automatically handles long conversations by creating new
220
+ session segments when limits are reached, maintaining context
221
+ through summaries.
222
+
223
+ Args:
224
+ user_message: What the user said.
225
+ ai_response: What the AI responded.
226
+ model: The model used.
227
+ provider: The provider used.
228
+ session_id: Optional existing session ID to continue.
229
+ token_threshold: Create new segment after this many tokens.
230
+ max_turns: Create new segment after this many turns.
231
+
232
+ Returns:
233
+ The current session ID (may be different if segmented).
234
+
235
+ Example:
236
+ ```python
237
+ # First message
238
+ session_id = await track_infinite_conversation(
239
+ "Tell me about the history of computing",
240
+ "Computing history begins with...",
241
+ model="gpt-4"
242
+ )
243
+
244
+ # Continue the conversation
245
+ session_id = await track_infinite_conversation(
246
+ "What about quantum computers?",
247
+ "Quantum computing represents...",
248
+ session_id=session_id,
249
+ model="gpt-4"
250
+ )
251
+ ```
252
+ """
504
253
  return await track_conversation(
505
- user_message, ai_response, model=model, provider=provider,
506
- infinite_context=True, token_threshold=token_threshold
507
- )
254
+ user_message,
255
+ ai_response,
256
+ model=model,
257
+ provider=provider,
258
+ session_id=session_id,
259
+ infinite_context=True,
260
+ token_threshold=token_threshold
261
+ )
262
+
263
+
264
+ async def track_tool_use(
265
+ tool_name: str,
266
+ arguments: Dict[str, Any],
267
+ result: Any,
268
+ session_id: Optional[str] = None,
269
+ error: Optional[str] = None,
270
+ **metadata
271
+ ) -> str:
272
+ """
273
+ Track a tool/function call in a session.
274
+
275
+ Args:
276
+ tool_name: Name of the tool that was called.
277
+ arguments: Arguments passed to the tool.
278
+ result: Result returned by the tool.
279
+ session_id: Optional existing session ID.
280
+ error: Optional error if the tool failed.
281
+ **metadata: Additional metadata to store.
282
+
283
+ Returns:
284
+ The session ID.
285
+
286
+ Example:
287
+ ```python
288
+ session_id = await track_tool_use(
289
+ "calculator",
290
+ {"operation": "add", "a": 5, "b": 3},
291
+ {"result": 8},
292
+ session_id=session_id
293
+ )
294
+ ```
295
+ """
296
+ sm = SessionManager(session_id=session_id)
297
+ return await sm.tool_used(
298
+ tool_name=tool_name,
299
+ arguments=arguments,
300
+ result=result,
301
+ error=error,
302
+ **metadata
303
+ )
304
+
305
+
306
+ async def get_session_stats(
307
+ session_id: str,
308
+ include_all_segments: bool = False
309
+ ) -> Dict[str, Any]:
310
+ """
311
+ Get statistics for an existing session.
312
+
313
+ Args:
314
+ session_id: The session ID to get stats for.
315
+ include_all_segments: For infinite context sessions, include all segments.
316
+
317
+ Returns:
318
+ Dictionary with session statistics.
319
+
320
+ Example:
321
+ ```python
322
+ stats = await get_session_stats("session-123")
323
+ print(f"Total messages: {stats['total_messages']}")
324
+ print(f"Total cost: ${stats['estimated_cost']:.4f}")
325
+ ```
326
+ """
327
+ sm = SessionManager(session_id=session_id)
328
+ return await sm.get_stats(include_all_segments=include_all_segments)
329
+
330
+
331
+ async def get_conversation_history(
332
+ session_id: str,
333
+ include_all_segments: bool = False
334
+ ) -> List[Dict[str, Any]]:
335
+ """
336
+ Get the conversation history for a session.
337
+
338
+ Args:
339
+ session_id: The session ID to get history for.
340
+ include_all_segments: For infinite context sessions, include all segments.
341
+
342
+ Returns:
343
+ List of conversation turns.
344
+
345
+ Example:
346
+ ```python
347
+ history = await get_conversation_history("session-123")
348
+ for turn in history:
349
+ print(f"{turn['role']}: {turn['content']}")
350
+ ```
351
+ """
352
+ sm = SessionManager(session_id=session_id)
353
+ return await sm.get_conversation(include_all_segments=include_all_segments)
354
+
355
+
356
+ # Backwards compatibility aliases
357
+ track_llm_interaction = track_llm_call
358
+ quick_stats = quick_conversation