noesium 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. noesium/agents/askura_agent/__init__.py +22 -0
  2. noesium/agents/askura_agent/askura_agent.py +480 -0
  3. noesium/agents/askura_agent/conversation.py +164 -0
  4. noesium/agents/askura_agent/extractor.py +175 -0
  5. noesium/agents/askura_agent/memory.py +14 -0
  6. noesium/agents/askura_agent/models.py +239 -0
  7. noesium/agents/askura_agent/prompts.py +202 -0
  8. noesium/agents/askura_agent/reflection.py +234 -0
  9. noesium/agents/askura_agent/summarizer.py +30 -0
  10. noesium/agents/askura_agent/utils.py +6 -0
  11. noesium/agents/deep_research/__init__.py +13 -0
  12. noesium/agents/deep_research/agent.py +398 -0
  13. noesium/agents/deep_research/prompts.py +84 -0
  14. noesium/agents/deep_research/schemas.py +42 -0
  15. noesium/agents/deep_research/state.py +54 -0
  16. noesium/agents/search/__init__.py +5 -0
  17. noesium/agents/search/agent.py +474 -0
  18. noesium/agents/search/state.py +28 -0
  19. noesium/core/__init__.py +1 -1
  20. noesium/core/agent/base.py +10 -2
  21. noesium/core/goalith/decomposer/llm_decomposer.py +1 -1
  22. noesium/core/llm/__init__.py +1 -1
  23. noesium/core/llm/base.py +2 -2
  24. noesium/core/llm/litellm.py +42 -21
  25. noesium/core/llm/llamacpp.py +25 -4
  26. noesium/core/llm/ollama.py +43 -22
  27. noesium/core/llm/openai.py +25 -5
  28. noesium/core/llm/openrouter.py +1 -1
  29. noesium/core/toolify/base.py +9 -2
  30. noesium/core/toolify/config.py +2 -2
  31. noesium/core/toolify/registry.py +21 -5
  32. noesium/core/tracing/opik_tracing.py +7 -7
  33. noesium/core/vector_store/__init__.py +2 -2
  34. noesium/core/vector_store/base.py +1 -1
  35. noesium/core/vector_store/pgvector.py +10 -13
  36. noesium/core/vector_store/weaviate.py +2 -1
  37. noesium/toolkits/__init__.py +1 -0
  38. noesium/toolkits/arxiv_toolkit.py +310 -0
  39. noesium/toolkits/audio_aliyun_toolkit.py +441 -0
  40. noesium/toolkits/audio_toolkit.py +370 -0
  41. noesium/toolkits/bash_toolkit.py +332 -0
  42. noesium/toolkits/document_toolkit.py +454 -0
  43. noesium/toolkits/file_edit_toolkit.py +552 -0
  44. noesium/toolkits/github_toolkit.py +395 -0
  45. noesium/toolkits/gmail_toolkit.py +575 -0
  46. noesium/toolkits/image_toolkit.py +425 -0
  47. noesium/toolkits/memory_toolkit.py +398 -0
  48. noesium/toolkits/python_executor_toolkit.py +334 -0
  49. noesium/toolkits/search_toolkit.py +451 -0
  50. noesium/toolkits/serper_toolkit.py +623 -0
  51. noesium/toolkits/tabular_data_toolkit.py +537 -0
  52. noesium/toolkits/user_interaction_toolkit.py +365 -0
  53. noesium/toolkits/video_toolkit.py +168 -0
  54. noesium/toolkits/wikipedia_toolkit.py +420 -0
  55. {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/METADATA +56 -48
  56. {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/RECORD +59 -23
  57. {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/licenses/LICENSE +1 -1
  58. {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/WHEEL +0 -0
  59. {noesium-0.1.0.dist-info → noesium-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,22 @@
1
+ """
2
+ AskuraAgent - A general-purpose dynamic conversation agent.
3
+
4
+ AskuraAgent provides a flexible, configurable framework for human-in-the-loop
5
+ conversations that adapt to different user communication styles and dynamically
6
+ collect required information through natural conversation flow.
7
+ """
8
+
9
+ from .askura_agent import AskuraAgent
10
+ from .conversation import ConversationManager
11
+ from .extractor import InformationExtractor
12
+ from .models import AskuraConfig, AskuraResponse, AskuraState, InformationSlot
13
+
14
+ __all__ = [
15
+ "AskuraAgent",
16
+ "AskuraConfig",
17
+ "AskuraState",
18
+ "AskuraResponse",
19
+ "ConversationManager",
20
+ "InformationExtractor",
21
+ "InformationSlot",
22
+ ]
@@ -0,0 +1,480 @@
1
+ """
2
+ AskuraAgent - A general-purpose dynamic conversation agent.
3
+
4
+ AskuraAgent provides a flexible, configurable framework for human-in-the-loop
5
+ conversations that adapt to different user communication styles and dynamically
6
+ collect required information through natural conversation flow.
7
+ """
8
+
9
+ import uuid
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ try:
13
+ from langchain_core.messages import AIMessage, HumanMessage
14
+ from langchain_core.runnables import RunnableConfig
15
+ from langgraph.checkpoint.memory import InMemorySaver
16
+ from langgraph.graph import END, START, StateGraph
17
+ from langgraph.graph.message import add_messages
18
+
19
+ LANGCHAIN_AVAILABLE = True
20
+ except ImportError:
21
+ AIMessage = None
22
+ HumanMessage = None
23
+ RunnableConfig = None
24
+ InMemorySaver = None
25
+ StateGraph = None
26
+ END = None
27
+ START = None
28
+ add_messages = None
29
+ LANGCHAIN_AVAILABLE = False
30
+
31
+ from noesium.core.agent import BaseConversationAgent
32
+ from noesium.core.tracing import NodeLoggingCallback, TokenUsageCallback
33
+ from noesium.core.utils.logging import get_logger
34
+ from noesium.core.utils.typing import override
35
+
36
+ from .conversation import ConversationManager
37
+ from .extractor import InformationExtractor
38
+ from .memory import Memory
39
+ from .models import AskuraConfig, AskuraResponse, AskuraState, MessageRoutingDecision
40
+ from .prompts import get_conversation_analysis_prompts
41
+ from .reflection import Reflection
42
+ from .summarizer import Summarizer
43
+
44
+ logger = get_logger(__name__)
45
+
46
+
47
+ class AskuraAgent(BaseConversationAgent):
48
+ """
49
+ A general-purpose dynamic conversation agent.
50
+
51
+ AskuraAgent provides a flexible, configurable framework for human-in-the-loop
52
+ conversations that adapt to different user communication styles and dynamically
53
+ collect required information through natural conversation flow.
54
+ """
55
+
56
+ def __init__(self, config: AskuraConfig, extraction_tools: Optional[Dict[str, Any]] = None):
57
+ """Initialize the AskuraAgent."""
58
+ # Initialize base class with LLM configuration
59
+ super().__init__(llm_provider=config.llm_api_provider, model_name=config.model_name)
60
+
61
+ self.config = config
62
+ self.extraction_tools = extraction_tools or {}
63
+ self.checkpointer = InMemorySaver()
64
+
65
+ # Initialize components (pass LLM client to enable intelligent behavior)
66
+ self.conversation_manager = ConversationManager(config, llm_client=self.llm)
67
+ self.information_extractor = InformationExtractor(config, self.extraction_tools, llm_client=self.llm)
68
+ self.reflection = Reflection(config, llm_client=self.llm)
69
+ self.summarizer = Summarizer(config, llm_client=self.llm, reflection=self.reflection)
70
+ self.memory = Memory()
71
+
72
+ # Build the conversation graph
73
+ self.graph = self._build_graph()
74
+ self.export_graph()
75
+
76
+ @override
77
+ def _build_graph(self) -> StateGraph:
78
+ """Build the agent's graph. Required by BaseGraphicAgent."""
79
+ return self._build_conversation_graph()
80
+
81
+ @override
82
+ def get_state_class(self):
83
+ """Get the state class for this agent's graph. Required by BaseGraphicAgent."""
84
+ return AskuraState
85
+
86
+ @override
87
+ def start_conversation(self, user_id: str, initial_message: Optional[str] = None) -> AskuraResponse:
88
+ """Start a new conversation with a user. Required by BaseConversationAgent."""
89
+ session_id = str(uuid.uuid4())
90
+ now = self._now_iso()
91
+
92
+ # Create initial state
93
+ state = AskuraState(
94
+ user_id=user_id,
95
+ session_id=session_id,
96
+ messages=[],
97
+ conversation_context={},
98
+ extracted_info={},
99
+ turns=0,
100
+ created_at=now,
101
+ updated_at=now,
102
+ next_action=None,
103
+ requires_user_input=False,
104
+ is_complete=False,
105
+ custom_data={},
106
+ )
107
+
108
+ # Add initial message if provided
109
+ if initial_message:
110
+ user_msg = HumanMessage(content=initial_message)
111
+ state.messages = add_messages(state.messages, [user_msg])
112
+
113
+ # Store state
114
+ self._session_states[session_id] = state
115
+
116
+ # Run the graph to get initial response
117
+ response, updated_state = self._run_graph(state)
118
+
119
+ # Update stored state with the updated state from graph execution
120
+ self._session_states[session_id] = updated_state
121
+
122
+ logger.info(f"Started conversation for user {user_id}, session {session_id}")
123
+ return response
124
+
125
+ @override
126
+ def process_user_message(self, user_id: str, session_id: str, message: str) -> AskuraResponse:
127
+ """Process a user message and return the agent's response. Required by BaseConversationAgent."""
128
+
129
+ # Get the current state
130
+ state = self._session_states.get(session_id)
131
+ if not state:
132
+ raise ValueError(f"Session {session_id} not found")
133
+
134
+ # Add user message to state
135
+ user_msg = HumanMessage(content=message)
136
+ state.messages = add_messages(state.messages, [user_msg])
137
+ state.updated_at = self._now_iso()
138
+ # Ensure we prioritize extraction on the next turn to avoid loops
139
+ state.pending_extraction = True
140
+
141
+ # Run the graph to process the message
142
+ response, updated_state = self._run_graph(state)
143
+
144
+ # Update stored state with the updated state from graph execution
145
+ self._session_states[session_id] = updated_state
146
+
147
+ return response
148
+
149
+ def _run_graph(self, state: AskuraState) -> tuple[AskuraResponse, AskuraState]:
150
+ """Run the conversation graph with the given state."""
151
+ try:
152
+ # Create callbacks with references so we can access token usage
153
+ node_callback = NodeLoggingCallback(node_id="graph")
154
+ token_callback = TokenUsageCallback(model_name=self.config.model_name, verbose=True)
155
+
156
+ # Run the graph with per-session thread_id for checkpoints
157
+ config = RunnableConfig(
158
+ configurable={"thread_id": state.session_id},
159
+ recursion_limit=self.config.max_conversation_turns,
160
+ callbacks=[node_callback, token_callback],
161
+ )
162
+ result = self.graph.invoke(state, config)
163
+
164
+ # Convert result back to AskuraState if it's a dict
165
+ if isinstance(result, dict):
166
+ result = AskuraState(**result)
167
+
168
+ # Create response from final state
169
+ return self._create_response(result), result
170
+
171
+ except Exception as e:
172
+ logger.error(f"Error running AskuraAgent graph: {e}")
173
+ return self._create_error_response(state, str(e)), state
174
+
175
+ def _build_conversation_graph(self) -> StateGraph:
176
+ """Build the conversation graph."""
177
+ builder = StateGraph(AskuraState)
178
+
179
+ # Add nodes (delegated to AskuraNodes)
180
+ builder.add_node("context_analysis", self._context_analysis_node)
181
+ builder.add_node("message_dispatcher", self._message_dispatcher_node)
182
+ builder.add_node("start_deep_thinking", self._start_deep_thinking_node)
183
+ builder.add_node("information_extractor", self._information_extractor_node)
184
+ builder.add_node("memory_retrival", self._memory_retrival_node)
185
+ builder.add_node("memory_retention", self._memory_retention_node)
186
+ builder.add_node("reflection", self._reflection_node)
187
+ builder.add_node("next_action", self._next_action_node)
188
+ builder.add_node("response_generator", self._response_generator_node)
189
+ builder.add_node("human_review", self._human_review_node)
190
+ builder.add_node("summarizer", self._summarizer_node)
191
+
192
+ # Entry: analyze context first, then decide action
193
+ builder.add_edge(START, "context_analysis")
194
+ builder.add_edge("context_analysis", "message_dispatcher")
195
+ builder.add_edge("start_deep_thinking", "information_extractor")
196
+ builder.add_edge("start_deep_thinking", "memory_retrival")
197
+ builder.add_edge("information_extractor", "reflection")
198
+ builder.add_edge("memory_retrival", "reflection")
199
+ builder.add_edge("reflection", "memory_retention")
200
+ builder.add_edge("reflection", "next_action")
201
+ builder.add_edge("response_generator", "human_review")
202
+ builder.add_edge("human_review", "summarizer")
203
+ builder.add_edge("summarizer", END)
204
+
205
+ builder.add_conditional_edges(
206
+ "message_dispatcher",
207
+ self._new_message_router,
208
+ {
209
+ "start_deep_thinking": "start_deep_thinking",
210
+ "response_generator": "response_generator",
211
+ "end": END,
212
+ },
213
+ )
214
+
215
+ # NextAction routing
216
+ builder.add_conditional_edges(
217
+ "next_action",
218
+ self._next_action_router,
219
+ {
220
+ "response_generator": "response_generator",
221
+ "summarizer": "summarizer",
222
+ "end": END,
223
+ },
224
+ )
225
+
226
+ # Human review routing
227
+ builder.add_conditional_edges(
228
+ "human_review",
229
+ self._human_review_router,
230
+ {
231
+ "continue": "context_analysis",
232
+ "end": END,
233
+ },
234
+ )
235
+
236
+ return builder.compile(checkpointer=self.checkpointer, interrupt_before=["human_review"])
237
+
238
+ def _create_response(self, state: AskuraState) -> AskuraResponse:
239
+ """Create response from final state. Required by BaseConversationAgent."""
240
+ # Get last assistant message
241
+ last_message = None
242
+ for msg in reversed(state.messages):
243
+ if isinstance(msg, AIMessage):
244
+ last_message = msg.content
245
+ break
246
+
247
+ return AskuraResponse(
248
+ message=last_message or "I'm here to help!",
249
+ session_id=state.session_id,
250
+ is_complete=state.is_complete,
251
+ confidence=self._calculate_confidence(state),
252
+ next_actions=[state.next_action_plan.next_action] if state.next_action_plan else [],
253
+ requires_user_input=state.requires_user_input,
254
+ metadata={
255
+ "turns": state.turns,
256
+ "conversation_context": state.conversation_context,
257
+ "information_slots": state.extracted_info,
258
+ },
259
+ custom_data=state.custom_data,
260
+ )
261
+
262
+ def _create_error_response(self, state: AskuraState, error_message: str) -> AskuraResponse:
263
+ """Create error response."""
264
+ return AskuraResponse(
265
+ message=f"I encountered an issue while processing your request. Please try again. Error: {error_message}",
266
+ session_id=state.session_id,
267
+ is_complete=False,
268
+ confidence=0.0,
269
+ metadata={"error": error_message},
270
+ requires_user_input=True,
271
+ )
272
+
273
+ def _calculate_confidence(self, state: AskuraState) -> float:
274
+ """Calculate confidence score based on gathered information."""
275
+ information_slots = state.extracted_info
276
+
277
+ # Count filled slots
278
+ filled_slots = sum(1 for slot in self.config.information_slots if information_slots.get(slot.name))
279
+ total_slots = len(self.config.information_slots)
280
+
281
+ if total_slots == 0:
282
+ return 1.0
283
+
284
+ return min(filled_slots / total_slots, 1.0)
285
+
286
+ def _start_deep_thinking_node(self, state: AskuraState, config: RunnableConfig) -> dict:
287
+ """Start deep thinking node - indicates deep processing is beginning."""
288
+ logger.info("StartDeepThinking: Beginning deep processing")
289
+ return {}
290
+
291
+ def _message_dispatcher_node(self, state: AskuraState, config: RunnableConfig) -> dict:
292
+ """Message dispatcher node - prepares state for routing decision."""
293
+ logger.info("MessageDispatcher: Preparing for routing decision")
294
+ # This is a pass-through node that could be used to set flags or prepare state
295
+ # Currently just passes through, routing logic is in the conditional router
296
+ return {}
297
+
298
+ def _new_message_router(self, state: AskuraState) -> str:
299
+ """Enhanced message router that uses LLM to evaluate routing decisions for fluent conversation."""
300
+ logger.info("MessageRouter: Evaluating routing decision using LLM")
301
+
302
+ # Get the most recent user message for evaluation
303
+ last_user_message = None
304
+ for msg in reversed(state.messages):
305
+ if isinstance(msg, HumanMessage):
306
+ last_user_message = msg.content
307
+ break
308
+
309
+ if not last_user_message:
310
+ logger.warning("MessageRouter: No user message found, defaulting to response_generator")
311
+ return "response_generator"
312
+
313
+ try:
314
+ # Prepare context for routing evaluation
315
+ conversation_context = state.conversation_context.to_dict() if state.conversation_context else {}
316
+ extracted_info = state.extracted_info or {}
317
+
318
+ # Get the routing evaluation prompts
319
+ system_prompt, user_prompt = get_conversation_analysis_prompts(
320
+ "message_routing",
321
+ conversation_purpose=state.conversation_context.conversation_purpose,
322
+ user_message=last_user_message,
323
+ conversation_context=conversation_context,
324
+ extracted_info=extracted_info,
325
+ )
326
+
327
+ # Use LLM to make routing decision
328
+ routing_decision = self.llm.structured_completion(
329
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
330
+ response_model=MessageRoutingDecision,
331
+ temperature=0.2,
332
+ max_tokens=300,
333
+ )
334
+ logger.info(f"MessageRouter: Routing decision: {routing_decision}")
335
+ return routing_decision.routing_destination
336
+
337
+ except Exception as e:
338
+ logger.error(f"MessageRouter: Error in LLM routing evaluation: {e}")
339
+ return "response_generator"
340
+
341
+ def _context_analysis_node(self, state: AskuraState, config: RunnableConfig):
342
+ logger.info("ContextAnalysis: Analyzing conversation context")
343
+ conversation_context = self.conversation_manager.analyze_conversation_context(state)
344
+ return {"conversation_context": conversation_context}
345
+
346
+ def _memory_retrival_node(self, state: AskuraState, config: RunnableConfig):
347
+ logger.info("MemoryRetrival: Retrieving memory")
348
+ return {"memory": self.memory.load(state.session_id)}
349
+
350
+ def _memory_retention_node(self, state: AskuraState, config: RunnableConfig):
351
+ logger.info("MemoryRetention: Retaining memory")
352
+ self.memory.save(state)
353
+ return None
354
+
355
+ def _reflection_node(self, state: AskuraState, config: RunnableConfig):
356
+ logger.info("Reflection: Evaluating knowledge completeness using LLM")
357
+
358
+ # Extract recent user messages for context
359
+ recent_user_messages = self._format_recent_user_messages(state.messages)
360
+
361
+ # Perform LLM-enhanced knowledge gap analysis
362
+ gap_analysis = self.reflection.evaluate_knowledge_gap(state, recent_user_messages)
363
+
364
+ # Update state with the enhanced analysis results
365
+ updated_state = {
366
+ "knowledge_gap": gap_analysis.knowledge_gap_summary,
367
+ "suggested_next_topics": gap_analysis.suggested_next_topics,
368
+ "custom_data": {
369
+ "gap_analysis": {
370
+ "critical_missing_info": gap_analysis.critical_missing_info,
371
+ "readiness_to_proceed": gap_analysis.readiness_to_proceed,
372
+ "reasoning": gap_analysis.reasoning,
373
+ }
374
+ },
375
+ }
376
+
377
+ logger.info(f"Knowledge gap summary: {gap_analysis.knowledge_gap_summary}")
378
+ logger.info(f"Readiness to proceed: {gap_analysis.readiness_to_proceed}")
379
+
380
+ return updated_state
381
+
382
+ def _next_action_node(self, state: AskuraState, config: RunnableConfig) -> AskuraState:
383
+ logger.info("NextAction: Selecting next action")
384
+ conversation_context = state.conversation_context
385
+ # Always extract fresh recent user messages to avoid stale data - optimize for token efficiency
386
+ recent_user_messages = self._format_recent_user_messages(state.messages)
387
+
388
+ # Enhanced context enrichment for specific interests
389
+ self._enrich_context_with_suggestions(state, recent_user_messages)
390
+ is_ready_to_summarize = self.summarizer.is_ready_to_summarize(state)
391
+
392
+ action_result = self.reflection.next_action(
393
+ state=state,
394
+ context=conversation_context,
395
+ recent_messages=recent_user_messages,
396
+ ready_to_summarize=is_ready_to_summarize,
397
+ )
398
+ state.next_action_plan = action_result
399
+ state.turns += 1
400
+ logger.info(
401
+ f"Next action: {action_result.next_action} "
402
+ f"(intent: {action_result.intent_type}, confidence: {action_result.confidence})"
403
+ )
404
+ return state
405
+
406
+ def _next_action_router(self, state: AskuraState) -> str:
407
+ logger.info("NextActionRouter: Routing next action")
408
+ if self.summarizer.is_ready_to_summarize(state) or state.turns >= self.config.max_conversation_turns:
409
+ return "summarizer"
410
+ if state.is_complete:
411
+ return "end"
412
+ return "response_generator"
413
+
414
+ def _information_extractor_node(self, state: AskuraState, config: RunnableConfig):
415
+ logger.info("InformationExtractor: Extracting information from user message")
416
+
417
+ if not state.messages:
418
+ logger.warning("InformationExtractor: No messages to extract information from")
419
+ return {"pending_extraction": False}
420
+
421
+ last_user_msg = next((msg for msg in reversed(state.messages) if isinstance(msg, HumanMessage)), None)
422
+ if not last_user_msg:
423
+ logger.warning("InformationExtractor: No last user message to extract information from")
424
+ return {"pending_extraction": False}
425
+
426
+ extracted_info = self.information_extractor.extract_all_information(last_user_msg.content, state)
427
+ return {"extracted_info": extracted_info, "pending_extraction": False}
428
+
429
+ def _response_generator_node(self, state: AskuraState, config: RunnableConfig) -> AskuraState:
430
+ logger.info("ResponseGenerator: Generating contextual response to guide conversation")
431
+
432
+ utterance = self.conversation_manager.generate_response(state)
433
+ ai_message = AIMessage(content=utterance)
434
+ state.messages = add_messages(state.messages, [ai_message])
435
+ state.requires_user_input = True
436
+ return state
437
+
438
+ def _summarizer_node(self, state: AskuraState, config: RunnableConfig) -> AskuraState:
439
+ logger.info("Summarizer: Generating conversation summary")
440
+
441
+ if self.summarizer.is_ready_to_summarize(state) or state.turns >= self.config.max_conversation_turns:
442
+ summary = self.summarizer.summarize(state)
443
+ summary_message = AIMessage(content=summary)
444
+ state.messages = add_messages(state.messages, [summary_message])
445
+ state.is_complete = True
446
+ state.requires_user_input = False
447
+ else:
448
+ # Not ready yet; keep asking
449
+ state.requires_user_input = True
450
+ return state
451
+
452
+ def _human_review_node(self, state: AskuraState, config: RunnableConfig) -> AskuraState:
453
+ logger.info("HumanReview: Awaiting human input")
454
+ # When resumed, mark extraction needed
455
+ state.requires_user_input = False
456
+ state.pending_extraction = True
457
+ return state
458
+
459
+ def _human_review_router(self, state: AskuraState) -> str:
460
+ logger.info("HumanReviewRouter: Routing human review")
461
+ if state.is_complete:
462
+ return "end"
463
+ return "continue"
464
+
465
+ def _enrich_context_with_suggestions(self, state: AskuraState, recent_user_messages: List[str]) -> None:
466
+ """Enrich context with specific suggestions when user shows interest but lacks knowledge."""
467
+ # TODO: enrich context with specific suggestions when user shows interest but lacks knowledge.
468
+
469
+ def _format_recent_user_messages(self, messages) -> List[str]:
470
+ """Format recent user messages while preserving important context."""
471
+ # Take last 3 user messages, but keep more content for context
472
+ user_messages = []
473
+ recent_messages = [m for m in messages if isinstance(m, HumanMessage)][-3:]
474
+
475
+ for msg in recent_messages:
476
+ # Preserve full short messages, only truncate very long ones
477
+ content = msg.content if len(msg.content) <= 200 else msg.content[:200] + "..."
478
+ user_messages.append(content)
479
+
480
+ return user_messages
@@ -0,0 +1,164 @@
1
+ """
2
+ Conversation Manager for AskuraAgent - Handles dynamic conversation analysis and flow control.
3
+ """
4
+
5
+ from typing import List, Optional
6
+
7
+ try:
8
+ from langchain_core.messages import HumanMessage
9
+
10
+ LANGCHAIN_AVAILABLE = True
11
+ except ImportError:
12
+ HumanMessage = None
13
+ LANGCHAIN_AVAILABLE = False
14
+
15
+ from noesium.core.llm import BaseLLMClient
16
+ from noesium.core.utils.logging import get_logger
17
+
18
+ from .models import AskuraConfig, AskuraState, ConversationContext
19
+ from .prompts import get_conversation_analysis_prompts, get_response_generation_prompts
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class ConversationManager:
25
+ """Manages dynamic conversation analysis and flow control."""
26
+
27
+ def __init__(self, config: AskuraConfig, llm_client: Optional[BaseLLMClient] = None):
28
+ """Initialize the conversation manager."""
29
+ self.config = config
30
+ self.llm = llm_client
31
+
32
+ def analyze_conversation_context(self, state: AskuraState, message_depth: int = 3) -> ConversationContext:
33
+ """Analyze conversation context to understand user preferences and conversation flow."""
34
+ if isinstance(self.config.conversation_purpose, str):
35
+ context = ConversationContext(conversation_purpose=self.config.conversation_purpose)
36
+ else:
37
+ context = ConversationContext(conversation_purpose="\n".join(self.config.conversation_purpose))
38
+
39
+ if not state.messages:
40
+ logger.warning("No recent messages found")
41
+ return context
42
+
43
+ # Analyze user engagement and style
44
+ user_messages = [msg for msg in state.messages[-message_depth * 2 :] if isinstance(msg, HumanMessage)]
45
+ if not user_messages:
46
+ logger.warning("No user messages found")
47
+ state.missing_info = self._get_missing_slots(state)
48
+ return context
49
+
50
+ last_user_text = user_messages[-1].content
51
+
52
+ try:
53
+ if not self.llm or not isinstance(last_user_text, str):
54
+ raise ValueError("LLM client or last user text is not valid")
55
+
56
+ # Prepare recent messages for analysis - optimize for token efficiency
57
+ recent_messages_text = self._format_recent_messages(user_messages[-message_depth:])
58
+
59
+ # Get structured prompts for conversation analysis
60
+ system_prompt, user_prompt = get_conversation_analysis_prompts(
61
+ "conversation_context",
62
+ conversation_purpose=context.conversation_purpose,
63
+ recent_messages=recent_messages_text,
64
+ )
65
+
66
+ # Use structured completion with retry for reliable analysis
67
+ context = self.llm.structured_completion(
68
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
69
+ response_model=ConversationContext,
70
+ temperature=0.3,
71
+ max_tokens=500,
72
+ )
73
+ except Exception as e:
74
+ logger.warning(f"Error analyzing conversation context using LLM: {e}")
75
+
76
+ # Analyze what information we have and what's missing
77
+ state.missing_info = self._get_missing_slots(state)
78
+ return context
79
+
80
+ def generate_response(self, state: AskuraState) -> str:
81
+ """Generate contextual responses to guide conversation flow."""
82
+ # This method generates strategic responses that balance natural conversation
83
+ # with purposeful information collection based on missing information slots
84
+
85
+ # Get conversation purpose from context or config
86
+ conversation_purpose = (
87
+ state.conversation_context.conversation_purpose
88
+ if state.conversation_context and state.conversation_context.conversation_purpose
89
+ else (
90
+ self.config.conversation_purpose
91
+ if isinstance(self.config.conversation_purpose, str)
92
+ else "\n".join(self.config.conversation_purpose)
93
+ )
94
+ )
95
+
96
+ # Get missing information slots for strategic guidance
97
+ self._get_missing_slots(state)
98
+ missing_info_descriptions = []
99
+ for slot in self.config.information_slots:
100
+ if slot.required and not state.extracted_info.get(slot.name):
101
+ missing_info_descriptions.append(f"- {slot.name}: {slot.description}")
102
+
103
+ missing_required_slots = (
104
+ "\n".join(missing_info_descriptions)
105
+ if missing_info_descriptions
106
+ else "All key information has been collected"
107
+ )
108
+
109
+ system_prompt, user_prompt = get_response_generation_prompts(
110
+ conversation_purpose=conversation_purpose,
111
+ missing_required_slots=missing_required_slots,
112
+ intent_type=state.next_action_plan.intent_type if state.next_action_plan else "casual conversation",
113
+ next_action_reasoning=(
114
+ state.next_action_plan.reasoning
115
+ if state.next_action_plan
116
+ else "Building rapport and guiding conversation naturally toward the purpose"
117
+ ),
118
+ known_slots=str(state.extracted_info) if state.extracted_info else "Nothing specific collected yet",
119
+ )
120
+ utterance = self.llm.completion(
121
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
122
+ temperature=0.7,
123
+ max_tokens=200,
124
+ )
125
+ if isinstance(utterance, str):
126
+ utterance = utterance.strip()
127
+ # Remove surrounding quotes if present
128
+ if utterance.startswith('"') and utterance.endswith('"'):
129
+ utterance = utterance[1:-1]
130
+ elif utterance.startswith("'") and utterance.endswith("'"):
131
+ utterance = utterance[1:-1]
132
+ return utterance
133
+
134
+ def _get_missing_slots(self, state: AskuraState) -> dict[str, str]:
135
+ """Get dictionary of missing information with slot names as keys and descriptions as values."""
136
+ missing = {}
137
+ information_slots = state.extracted_info
138
+
139
+ # Sort slots by priority (higher priority first)
140
+ for slot in sorted(self.config.information_slots, key=lambda slot: slot.priority, reverse=True):
141
+ if slot.required and not information_slots.get(slot.name):
142
+ missing[f"ask_{slot.name}"] = f"Need to collect {slot.description}"
143
+
144
+ return missing
145
+
146
+ def _format_recent_messages(self, messages: List[HumanMessage]) -> str:
147
+ """Format recent messages while preserving important context."""
148
+ if not messages:
149
+ return ""
150
+
151
+ # Preserve more context while still being efficient
152
+ formatted = []
153
+ for i, msg in enumerate(messages):
154
+ # Keep full short messages, smart truncation for long ones
155
+ if len(msg.content) <= 300:
156
+ content = msg.content
157
+ else:
158
+ # Keep beginning and end for context
159
+ content = msg.content[:200] + "..." + msg.content[-50:]
160
+
161
+ role_prefix = "User" if i == len(messages) - 1 else f"U{i+1}" # Mark most recent
162
+ formatted.append(f"{role_prefix}: {content}")
163
+
164
+ return "\n".join(formatted) # Use newlines for better readability