noesium 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/agents/askura_agent/__init__.py +22 -0
- noesium/agents/askura_agent/askura_agent.py +480 -0
- noesium/agents/askura_agent/conversation.py +164 -0
- noesium/agents/askura_agent/extractor.py +175 -0
- noesium/agents/askura_agent/memory.py +14 -0
- noesium/agents/askura_agent/models.py +239 -0
- noesium/agents/askura_agent/prompts.py +202 -0
- noesium/agents/askura_agent/reflection.py +234 -0
- noesium/agents/askura_agent/summarizer.py +30 -0
- noesium/agents/askura_agent/utils.py +6 -0
- noesium/agents/deep_research/__init__.py +13 -0
- noesium/agents/deep_research/agent.py +398 -0
- noesium/agents/deep_research/prompts.py +84 -0
- noesium/agents/deep_research/schemas.py +42 -0
- noesium/agents/deep_research/state.py +54 -0
- noesium/agents/search/__init__.py +5 -0
- noesium/agents/search/agent.py +474 -0
- noesium/agents/search/state.py +28 -0
- noesium/core/__init__.py +1 -1
- noesium/core/agent/base.py +10 -2
- noesium/core/goalith/decomposer/llm_decomposer.py +1 -1
- noesium/core/llm/__init__.py +1 -1
- noesium/core/llm/base.py +2 -2
- noesium/core/llm/litellm.py +42 -21
- noesium/core/llm/llamacpp.py +25 -4
- noesium/core/llm/ollama.py +43 -22
- noesium/core/llm/openai.py +25 -5
- noesium/core/llm/openrouter.py +1 -1
- noesium/core/toolify/base.py +9 -2
- noesium/core/toolify/config.py +2 -2
- noesium/core/toolify/registry.py +21 -5
- noesium/core/tracing/opik_tracing.py +7 -7
- noesium/core/vector_store/__init__.py +2 -2
- noesium/core/vector_store/base.py +1 -1
- noesium/core/vector_store/pgvector.py +10 -13
- noesium/core/vector_store/weaviate.py +2 -1
- noesium/toolkits/__init__.py +1 -0
- noesium/toolkits/arxiv_toolkit.py +310 -0
- noesium/toolkits/audio_aliyun_toolkit.py +441 -0
- noesium/toolkits/audio_toolkit.py +370 -0
- noesium/toolkits/bash_toolkit.py +332 -0
- noesium/toolkits/document_toolkit.py +454 -0
- noesium/toolkits/file_edit_toolkit.py +552 -0
- noesium/toolkits/github_toolkit.py +395 -0
- noesium/toolkits/gmail_toolkit.py +575 -0
- noesium/toolkits/image_toolkit.py +425 -0
- noesium/toolkits/memory_toolkit.py +398 -0
- noesium/toolkits/python_executor_toolkit.py +334 -0
- noesium/toolkits/search_toolkit.py +451 -0
- noesium/toolkits/serper_toolkit.py +623 -0
- noesium/toolkits/tabular_data_toolkit.py +537 -0
- noesium/toolkits/user_interaction_toolkit.py +365 -0
- noesium/toolkits/video_toolkit.py +168 -0
- noesium/toolkits/wikipedia_toolkit.py +420 -0
- noesium-0.2.1.dist-info/METADATA +253 -0
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/RECORD +59 -23
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/licenses/LICENSE +1 -1
- noesium-0.1.0.dist-info/METADATA +0 -525
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/WHEEL +0 -0
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Information Extractor for AskuraAgent - Handles multi-topic information extraction.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, Optional
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from langchain_core.tools import BaseTool
|
|
9
|
+
|
|
10
|
+
LANGCHAIN_AVAILABLE = True
|
|
11
|
+
except ImportError:
|
|
12
|
+
BaseTool = None
|
|
13
|
+
LANGCHAIN_AVAILABLE = False
|
|
14
|
+
|
|
15
|
+
from noesium.core.llm import BaseLLMClient
|
|
16
|
+
from noesium.core.utils.logging import get_logger
|
|
17
|
+
|
|
18
|
+
from .models import AskuraConfig, AskuraState, InformationSlot
|
|
19
|
+
|
|
20
|
+
logger = get_logger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class InformationExtractor:
|
|
24
|
+
"""Handles extraction of information from user messages."""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self, config: AskuraConfig, extraction_tools: Dict[str, Any], llm_client: Optional[BaseLLMClient] = None
|
|
28
|
+
):
|
|
29
|
+
"""Initialize the information extractor."""
|
|
30
|
+
self.config = config
|
|
31
|
+
self.extraction_tools = extraction_tools
|
|
32
|
+
self.llm = llm_client
|
|
33
|
+
|
|
34
|
+
def extract_all_information(self, user_message: str, current_state: Optional[AskuraState] = None) -> Dict[str, Any]:
|
|
35
|
+
"""Extract all possible information from a user message using all available tools.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
user_message: The current user message to extract information from
|
|
39
|
+
current_state: Optional current state containing previously extracted information
|
|
40
|
+
"""
|
|
41
|
+
extracted_info = {}
|
|
42
|
+
|
|
43
|
+
# Get current partial extraction state for context
|
|
44
|
+
current_extractions = {}
|
|
45
|
+
if current_state and current_state.extracted_info:
|
|
46
|
+
current_extractions = current_state.extracted_info.copy()
|
|
47
|
+
|
|
48
|
+
for slot in self.config.information_slots:
|
|
49
|
+
if not slot.extraction_tools:
|
|
50
|
+
continue
|
|
51
|
+
try:
|
|
52
|
+
result = self._extract_slot_information_with_tools(user_message, slot, current_extractions)
|
|
53
|
+
if result:
|
|
54
|
+
extracted_info[slot.name] = result
|
|
55
|
+
except Exception as e:
|
|
56
|
+
logger.warning(f"Failed to extract {slot.name}: {e}")
|
|
57
|
+
|
|
58
|
+
return self._merge_extracted_info(current_state, extracted_info)
|
|
59
|
+
|
|
60
|
+
def _merge_extracted_info(self, state: AskuraState, extracted_info: Dict[str, Any]) -> Dict[str, Any]:
|
|
61
|
+
"""Update state with extracted information, handling conflicts and merging data."""
|
|
62
|
+
merged = state.extracted_info
|
|
63
|
+
for slot_name, extracted_value in extracted_info.items():
|
|
64
|
+
if not merged.get(slot_name):
|
|
65
|
+
# Simple assignment for new values
|
|
66
|
+
merged[slot_name] = extracted_value
|
|
67
|
+
logger.info(f"Extracted slot {slot_name}: {extracted_value}")
|
|
68
|
+
else:
|
|
69
|
+
# Merge existing values for certain types
|
|
70
|
+
merged[slot_name] = self._merge_values(merged[slot_name], extracted_value, slot_name)
|
|
71
|
+
logger.info(f"Updated slot {slot_name}: {merged[slot_name]}")
|
|
72
|
+
return merged
|
|
73
|
+
|
|
74
|
+
def _extract_slot_information_with_tools(
|
|
75
|
+
self, user_message: str, slot: InformationSlot, current_extractions: Dict[str, Any]
|
|
76
|
+
) -> Optional[Any]:
|
|
77
|
+
"""Extract information for a specific slot with context from current extractions."""
|
|
78
|
+
valid_tools = [tool_name for tool_name in slot.extraction_tools if tool_name in self.extraction_tools]
|
|
79
|
+
if not valid_tools:
|
|
80
|
+
logger.warning(f"No valid tools found for slot {slot.name}, skipping extraction")
|
|
81
|
+
return None
|
|
82
|
+
|
|
83
|
+
for tool_name in valid_tools:
|
|
84
|
+
try:
|
|
85
|
+
tool = self.extraction_tools[tool_name]
|
|
86
|
+
|
|
87
|
+
# Prepare context for the tool
|
|
88
|
+
context_prompt = self._build_extraction_context_prompt(slot, current_extractions)
|
|
89
|
+
tool_context = {
|
|
90
|
+
"user_message": user_message,
|
|
91
|
+
"slot_name": slot.name,
|
|
92
|
+
"slot_description": slot.description,
|
|
93
|
+
"current_extractions": current_extractions,
|
|
94
|
+
"conversation_context": current_extractions,
|
|
95
|
+
"context_prompt": context_prompt,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# Handle both callable tools and LangChain tools
|
|
99
|
+
if isinstance(tool, BaseTool):
|
|
100
|
+
result = tool.invoke(tool_context)
|
|
101
|
+
elif callable(tool):
|
|
102
|
+
# Pass context to callable tools
|
|
103
|
+
try:
|
|
104
|
+
result = tool(user_message, current_extractions)
|
|
105
|
+
except TypeError:
|
|
106
|
+
# Fallback to original signature if tool doesn't accept context
|
|
107
|
+
result = tool(user_message)
|
|
108
|
+
else:
|
|
109
|
+
logger.warning(f"Tool {tool_name} is not callable or a LangChain tool, skipping")
|
|
110
|
+
continue
|
|
111
|
+
|
|
112
|
+
# Check if the tool returned useful information
|
|
113
|
+
if result and self._is_valid_extraction(result, slot):
|
|
114
|
+
return self._process_extraction_result(result, slot)
|
|
115
|
+
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.warning(f"Tool {tool_name} failed: {e}")
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
def _is_valid_extraction(self, result: Dict[str, Any], slot: InformationSlot) -> bool:
|
|
123
|
+
"""Check if the extraction result is valid for the slot."""
|
|
124
|
+
# Basic validation - check if result has any non-empty values
|
|
125
|
+
if not result:
|
|
126
|
+
return False
|
|
127
|
+
|
|
128
|
+
# Check if any value in the result is not None/empty
|
|
129
|
+
for value in result.values():
|
|
130
|
+
if value is not None and value != "" and value != []:
|
|
131
|
+
return True
|
|
132
|
+
|
|
133
|
+
return False
|
|
134
|
+
|
|
135
|
+
def _process_extraction_result(self, result: Dict[str, Any], slot: InformationSlot) -> Any:
|
|
136
|
+
"""Process the extraction result based on slot configuration."""
|
|
137
|
+
# For now, return the result as-is
|
|
138
|
+
# This can be extended with more sophisticated processing
|
|
139
|
+
return result
|
|
140
|
+
|
|
141
|
+
def _merge_values(self, existing_value: Any, new_value: Any, slot_name: str) -> Any:
|
|
142
|
+
"""Merge existing and new values intelligently."""
|
|
143
|
+
|
|
144
|
+
# Handle list merging
|
|
145
|
+
if isinstance(existing_value, list) and isinstance(new_value, list):
|
|
146
|
+
# Merge and deduplicate
|
|
147
|
+
merged = list(set(existing_value + new_value))
|
|
148
|
+
return merged
|
|
149
|
+
|
|
150
|
+
# Handle dict merging
|
|
151
|
+
elif isinstance(existing_value, dict) and isinstance(new_value, dict):
|
|
152
|
+
merged = existing_value.copy()
|
|
153
|
+
merged.update(new_value)
|
|
154
|
+
return merged
|
|
155
|
+
|
|
156
|
+
# For other types, prefer the new value if it's not None/empty
|
|
157
|
+
elif new_value is not None and new_value != "" and new_value != []:
|
|
158
|
+
return new_value
|
|
159
|
+
else:
|
|
160
|
+
return existing_value
|
|
161
|
+
|
|
162
|
+
def _build_extraction_context_prompt(self, slot: InformationSlot, current_extractions: Dict[str, Any]) -> str:
|
|
163
|
+
"""Build a context prompt to help tools understand current extraction state."""
|
|
164
|
+
if not current_extractions:
|
|
165
|
+
return f"Extract information for slot '{slot.name}': {slot.description}"
|
|
166
|
+
|
|
167
|
+
context_parts = [f"Extract information for slot '{slot.name}': {slot.description}"]
|
|
168
|
+
context_parts.append("\nCurrently extracted information:")
|
|
169
|
+
|
|
170
|
+
for slot_name, value in current_extractions.items():
|
|
171
|
+
if value and value not in (None, "", [], {}):
|
|
172
|
+
context_parts.append(f"- {slot_name}: {value}")
|
|
173
|
+
|
|
174
|
+
context_parts.append(f"\nFocus on extracting missing or additional information for '{slot.name}'.")
|
|
175
|
+
return "\n".join(context_parts)
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Schemas for AskuraAgent - Flexible data structures for dynamic conversations.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from typing import Any, Dict, List, Optional, Sequence, Type, Union
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel, Field
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from langchain_core.messages import BaseMessage
|
|
12
|
+
|
|
13
|
+
LANGCHAIN_AVAILABLE = True
|
|
14
|
+
except ImportError:
|
|
15
|
+
BaseMessage = None
|
|
16
|
+
LANGCHAIN_AVAILABLE = False
|
|
17
|
+
|
|
18
|
+
from noesium.core.consts import GEMINI_FLASH
|
|
19
|
+
|
|
20
|
+
from .utils import get_enum_value
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ConversationStyle(str, Enum):
|
|
24
|
+
"""User conversation styles."""
|
|
25
|
+
|
|
26
|
+
DIRECT = "direct"
|
|
27
|
+
EXPLORATORY = "exploratory"
|
|
28
|
+
CASUAL = "casual"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ConversationDepth(str, Enum):
|
|
32
|
+
"""Conversation depth levels."""
|
|
33
|
+
|
|
34
|
+
SURFACE = "surface"
|
|
35
|
+
MODERATE = "moderate"
|
|
36
|
+
DEEP = "deep"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class UserConfidence(str, Enum):
|
|
40
|
+
"""User confidence levels."""
|
|
41
|
+
|
|
42
|
+
LOW = "low"
|
|
43
|
+
MEDIUM = "medium"
|
|
44
|
+
HIGH = "high"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class ConversationFlow(str, Enum):
|
|
48
|
+
"""Conversation flow patterns."""
|
|
49
|
+
|
|
50
|
+
NATURAL = "natural"
|
|
51
|
+
GUIDED = "guided"
|
|
52
|
+
USER_LED = "user_led"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ConversationSentiment(str, Enum):
|
|
56
|
+
"""Conversation sentiment states."""
|
|
57
|
+
|
|
58
|
+
POSITIVE = "positive"
|
|
59
|
+
NEUTRAL = "neutral"
|
|
60
|
+
NEGATIVE = "negative"
|
|
61
|
+
UNCERTAIN = "uncertain"
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class ConversationMomentum(str, Enum):
|
|
65
|
+
"""Conversation momentum states."""
|
|
66
|
+
|
|
67
|
+
POSITIVE = "positive"
|
|
68
|
+
NEUTRAL = "neutral"
|
|
69
|
+
NEGATIVE = "negative"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class NextActionPlan(BaseModel):
|
|
73
|
+
"""Response for intent classification and next action determination."""
|
|
74
|
+
|
|
75
|
+
next_action: str = Field(description="The selected next action from available options")
|
|
76
|
+
intent_type: str = Field(description="Intent classification: 'smalltalk' or 'task'")
|
|
77
|
+
is_smalltalk: bool = Field(description="Whether the user's intent is smalltalk")
|
|
78
|
+
reasoning: str = Field(description="Brief explanation of why this action was chosen")
|
|
79
|
+
confidence: float = Field(default=0.0, description="Confidence score (0.0-1.0) in the action choice")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class KnowledgeGapAnalysis(BaseModel):
|
|
83
|
+
"""Analysis of knowledge gaps and next topics to explore."""
|
|
84
|
+
|
|
85
|
+
knowledge_gap_summary: str = Field(description="Overall summary of what's missing compared to conversation purpose")
|
|
86
|
+
critical_missing_info: List[str] = Field(
|
|
87
|
+
description="Most important information still needed", default_factory=list
|
|
88
|
+
)
|
|
89
|
+
suggested_next_topics: List[str] = Field(description="3-5 specific topics to explore next", default_factory=list)
|
|
90
|
+
readiness_to_proceed: float = Field(
|
|
91
|
+
description="Confidence (0.0-1.0) that we can proceed with current information", default=0.0
|
|
92
|
+
)
|
|
93
|
+
reasoning: str = Field(description="Analysis reasoning and recommendations")
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class MessageRoutingDecision(BaseModel):
|
|
97
|
+
"""LLM-based routing decision for new messages."""
|
|
98
|
+
|
|
99
|
+
routing_destination: str = Field(description="Where to route: 'start_deep_thinking' or 'response_generator'")
|
|
100
|
+
reasoning: str = Field(description="Brief explanation of the routing decision")
|
|
101
|
+
confidence: float = Field(default=0.0, description="Confidence score (0.0-1.0) in the routing decision")
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class InformationSlot(BaseModel):
|
|
105
|
+
"""Configuration for an information slot to be collected."""
|
|
106
|
+
|
|
107
|
+
name: str
|
|
108
|
+
description: str
|
|
109
|
+
priority: int = Field(default=1, description="Higher number = higher priority")
|
|
110
|
+
required: bool = Field(default=True)
|
|
111
|
+
extraction_tools: List[str] = Field(default_factory=list, description="Names of extraction tools to use")
|
|
112
|
+
extraction_model: Optional[Type[BaseModel]] = Field(default=None, description="Pydantic model class for extraction")
|
|
113
|
+
question_templates: Dict[str, Dict[str, Dict[str, str]]] = Field(default_factory=dict)
|
|
114
|
+
validation_rules: List[str] = Field(default_factory=list)
|
|
115
|
+
dependencies: List[str] = Field(default_factory=list, description="Other slots this depends on")
|
|
116
|
+
|
|
117
|
+
class Config:
|
|
118
|
+
"""Pydantic configuration."""
|
|
119
|
+
|
|
120
|
+
use_enum_values = True
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class ConversationContext(BaseModel):
|
|
124
|
+
"""Analysis of conversation context."""
|
|
125
|
+
|
|
126
|
+
# Conversation purpose
|
|
127
|
+
conversation_purpose: str = Field(default="")
|
|
128
|
+
conversation_on_track_confidence: float = Field(default=0.0)
|
|
129
|
+
|
|
130
|
+
# Conversation vibe
|
|
131
|
+
information_density: float = Field(default=0.0)
|
|
132
|
+
conversation_style: ConversationStyle = Field(default=ConversationStyle.DIRECT)
|
|
133
|
+
conversation_depth: ConversationDepth = Field(default=ConversationDepth.SURFACE)
|
|
134
|
+
user_confidence: UserConfidence = Field(default=UserConfidence.MEDIUM)
|
|
135
|
+
conversation_flow: ConversationFlow = Field(default=ConversationFlow.NATURAL)
|
|
136
|
+
conversation_momentum: ConversationMomentum = Field(default=ConversationMomentum.POSITIVE)
|
|
137
|
+
last_message_sentiment: ConversationSentiment = Field(default=ConversationSentiment.NEUTRAL)
|
|
138
|
+
|
|
139
|
+
class Config:
|
|
140
|
+
"""Pydantic configuration."""
|
|
141
|
+
|
|
142
|
+
use_enum_values = True
|
|
143
|
+
|
|
144
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
145
|
+
"""Convert to dictionary."""
|
|
146
|
+
return {
|
|
147
|
+
"conversation_purpose": self.conversation_purpose,
|
|
148
|
+
"conversation_on_track_confidence": self.conversation_on_track_confidence,
|
|
149
|
+
"information_density": self.information_density,
|
|
150
|
+
"conversation_style": get_enum_value(self.conversation_style),
|
|
151
|
+
"conversation_depth": get_enum_value(self.conversation_depth),
|
|
152
|
+
"user_confidence": get_enum_value(self.user_confidence),
|
|
153
|
+
"conversation_flow": get_enum_value(self.conversation_flow),
|
|
154
|
+
"conversation_momentum": get_enum_value(self.conversation_momentum),
|
|
155
|
+
"last_message_sentiment": get_enum_value(self.last_message_sentiment),
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def keep_first(left: str, right: str) -> str:
|
|
160
|
+
return left if left else right
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class AskuraState(BaseModel):
|
|
164
|
+
"""Core state for AskuraAgent conversations."""
|
|
165
|
+
|
|
166
|
+
# Metadata
|
|
167
|
+
user_id: str = Field(default="")
|
|
168
|
+
session_id: str = Field(default="")
|
|
169
|
+
turns: int = Field(default=0)
|
|
170
|
+
created_at: str = Field(default="")
|
|
171
|
+
updated_at: str = Field(default="")
|
|
172
|
+
|
|
173
|
+
# Conversation state
|
|
174
|
+
messages: Sequence[BaseMessage] = Field(default_factory=list)
|
|
175
|
+
conversation_context: ConversationContext = Field(default_factory=ConversationContext)
|
|
176
|
+
|
|
177
|
+
# Information slots (dynamic based on configuration)
|
|
178
|
+
extracted_info: Dict[str, Any] = Field(default_factory=dict)
|
|
179
|
+
missing_info: Dict[str, str] = Field(
|
|
180
|
+
default_factory=dict, description="Information slot name -> description of what's missing"
|
|
181
|
+
)
|
|
182
|
+
knowledge_gap: str = Field(
|
|
183
|
+
default="", description="Summary of knowledge gap between conversation purpose and current status"
|
|
184
|
+
)
|
|
185
|
+
suggested_next_topics: List[str] = Field(default_factory=list)
|
|
186
|
+
|
|
187
|
+
# Memory state
|
|
188
|
+
memory: Dict[str, Any] = Field(default_factory=dict)
|
|
189
|
+
|
|
190
|
+
# Next action analysis results
|
|
191
|
+
next_action_plan: Optional[NextActionPlan] = Field(default=None)
|
|
192
|
+
|
|
193
|
+
# Agent control
|
|
194
|
+
requires_user_input: bool = Field(default=True)
|
|
195
|
+
is_complete: bool = Field(default=False)
|
|
196
|
+
pending_extraction: bool = Field(default=False)
|
|
197
|
+
|
|
198
|
+
# Custom fields (for specific agents)
|
|
199
|
+
custom_data: Dict[str, Any] = Field(default_factory=dict)
|
|
200
|
+
|
|
201
|
+
class Config:
|
|
202
|
+
"""Pydantic configuration."""
|
|
203
|
+
|
|
204
|
+
use_enum_values = True
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
class AskuraConfig(BaseModel):
|
|
208
|
+
"""Configuration for AskuraAgent."""
|
|
209
|
+
|
|
210
|
+
# LLM configuration
|
|
211
|
+
llm_api_provider: str = "openrouter"
|
|
212
|
+
model_name: str = GEMINI_FLASH
|
|
213
|
+
temperature: float = 0.7
|
|
214
|
+
max_tokens: int = 1000
|
|
215
|
+
|
|
216
|
+
# Purposes of the conversation
|
|
217
|
+
conversation_purpose: Union[List[str], str] = Field(default="")
|
|
218
|
+
max_conversation_turns: int = 15
|
|
219
|
+
|
|
220
|
+
# Information slots configuration
|
|
221
|
+
information_slots: List[InformationSlot] = Field(default_factory=list)
|
|
222
|
+
|
|
223
|
+
# Custom configuration
|
|
224
|
+
custom_config: Dict[str, Any] = Field(default_factory=dict)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
class AskuraResponse(BaseModel):
|
|
228
|
+
"""Response from AskuraAgent."""
|
|
229
|
+
|
|
230
|
+
message: str
|
|
231
|
+
session_id: str
|
|
232
|
+
is_complete: bool = False
|
|
233
|
+
confidence: float = 0.0
|
|
234
|
+
next_actions: List[str] = Field(default_factory=list)
|
|
235
|
+
requires_user_input: bool = True
|
|
236
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
237
|
+
|
|
238
|
+
# Custom response data
|
|
239
|
+
custom_data: Dict[str, Any] = Field(default_factory=dict)
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompts for AskuraAgent - Structured prompts for conversation analysis and management.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
# Structured extraction prompts - optimized for structured_completion
|
|
6
|
+
CONVERSATION_ANALYSIS_SYSTEM_PROMPTS = {
|
|
7
|
+
"conversation_context": """Analyze conversation style and alignment with purpose.
|
|
8
|
+
|
|
9
|
+
Assess key factors:
|
|
10
|
+
- Style: direct (goal-oriented), exploratory (curious), casual (relaxed)
|
|
11
|
+
- User confidence: low (hesitant), medium (balanced), high (assertive)
|
|
12
|
+
- Flow: natural (organic), guided (following direction), user_led (user driving)
|
|
13
|
+
- Sentiment: positive (enthusiastic), neutral (balanced), negative (frustrated), uncertain (confused)
|
|
14
|
+
- Momentum: positive (building), neutral (steady), negative (losing interest)
|
|
15
|
+
- On-track confidence (0.0-1.0): How well conversation aligns with purpose
|
|
16
|
+
* 0.0-0.3: Off-track, not addressing purpose
|
|
17
|
+
* 0.4-0.6: Partially on-track, some relevance
|
|
18
|
+
* 0.7-0.8: Mostly on-track, good alignment
|
|
19
|
+
* 0.9-1.0: Highly focused on purpose""",
|
|
20
|
+
"knowledge_gap_analysis": """Analyze knowledge gap and suggest next topics to help achieve conversation purpose.
|
|
21
|
+
|
|
22
|
+
**Instructions:**
|
|
23
|
+
1. Evaluate how well current knowledge aligns with the conversation purpose
|
|
24
|
+
2. Identify key knowledge gaps that prevent achieving the purpose
|
|
25
|
+
3. Suggest 3-5 specific next topics that would help bridge these gaps
|
|
26
|
+
4. Provide a clear summary of the overall knowledge gap
|
|
27
|
+
5. Consider user's conversation style and preferences when suggesting topics
|
|
28
|
+
|
|
29
|
+
**Analysis should help determine:**
|
|
30
|
+
- Whether we have enough information to proceed
|
|
31
|
+
- What critical information is still needed
|
|
32
|
+
- How to prioritize gathering remaining information
|
|
33
|
+
- Topics that would naturally engage the user based on their style""",
|
|
34
|
+
"determine_next_action": """Classify MOST RECENT message intent and select optimal next action.
|
|
35
|
+
|
|
36
|
+
Intent Classification (focus ONLY on last message):
|
|
37
|
+
- "smalltalk": Greetings, pleasantries, casual conversation
|
|
38
|
+
- "task": Goal-oriented, information requests, specific questions, task content
|
|
39
|
+
|
|
40
|
+
Decision Guidelines:
|
|
41
|
+
- If MOST RECENT message is smalltalk: respond appropriately but guide toward task
|
|
42
|
+
- If MOST RECENT message is task: focus on gathering missing information
|
|
43
|
+
- If conversation off-track (<0.4): prioritize redirecting to purpose
|
|
44
|
+
- If conversation on-track (>0.7): focus on collecting missing info
|
|
45
|
+
- If user confidence low: choose supportive, confidence-boosting actions
|
|
46
|
+
- If momentum negative: provide encouragement or redirect
|
|
47
|
+
- Balance staying on purpose with maintaining engagement
|
|
48
|
+
|
|
49
|
+
Reasoning must explicitly reference the MOST RECENT user message.""",
|
|
50
|
+
"message_routing": """Evaluate if the user's message requires deep thinking or can be handled with a quick response to guide conversation.
|
|
51
|
+
|
|
52
|
+
**Decision Criteria:**
|
|
53
|
+
|
|
54
|
+
Deep thinking is required IF BOTH conditions are met:
|
|
55
|
+
1. **Contains Purpose-Related Info**: Message contains information directly related to the conversation purpose
|
|
56
|
+
2. **Needs Extraction/Reflection**: Message contains specific details, facts, preferences, or decisions that should be extracted and reflected upon
|
|
57
|
+
|
|
58
|
+
Quick response is appropriate when:
|
|
59
|
+
- Message is casual conversation, greetings, or small talk
|
|
60
|
+
- Message is off-topic from the conversation purpose
|
|
61
|
+
- Message asks general questions without providing extractable information
|
|
62
|
+
- Message needs guidance to stay on topic
|
|
63
|
+
|
|
64
|
+
**Instructions:**
|
|
65
|
+
1. Evaluate if the message contains information related to the conversation purpose
|
|
66
|
+
2. Determine if the message contains extractable information that requires reflection
|
|
67
|
+
3. Choose routing destination: 'start_deep_thinking' if both criteria met, otherwise 'response_generator'
|
|
68
|
+
4. Explain your reasoning clearly""",
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
CONVERSATION_ANALYSIS_USER_PROMPTS = {
|
|
72
|
+
"conversation_context": """Conversation purpose: {conversation_purpose}
|
|
73
|
+
|
|
74
|
+
Recent messages: {recent_messages}""",
|
|
75
|
+
"knowledge_gap_analysis": """**Conversation Purpose:** {conversation_purpose}
|
|
76
|
+
|
|
77
|
+
**Current Context:**
|
|
78
|
+
{conversation_context}
|
|
79
|
+
|
|
80
|
+
**What We Know (Extracted Information):**
|
|
81
|
+
{extracted_info}
|
|
82
|
+
|
|
83
|
+
**What We're Missing (Required Information):**
|
|
84
|
+
{missing_info}
|
|
85
|
+
|
|
86
|
+
**Retrieved Memory:**
|
|
87
|
+
{memory}
|
|
88
|
+
|
|
89
|
+
**Recent Conversation:**
|
|
90
|
+
{recent_messages}""",
|
|
91
|
+
"determine_next_action": """Context: {conversation_context}
|
|
92
|
+
Ready to summarize: {ready_to_summarize}
|
|
93
|
+
Available actions: {available_actions}
|
|
94
|
+
Recent messages: {recent_messages}""",
|
|
95
|
+
"message_routing": """**Conversation Purpose:** {conversation_purpose}
|
|
96
|
+
|
|
97
|
+
**Current User Message:** {user_message}
|
|
98
|
+
|
|
99
|
+
**Conversation Context:**
|
|
100
|
+
{conversation_context}
|
|
101
|
+
|
|
102
|
+
**Current Extracted Information:**
|
|
103
|
+
{extracted_info}""",
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# Backward compatibility - combined prompts for legacy usage
|
|
107
|
+
CONVERSATION_ANALYSIS_PROMPTS = {
|
|
108
|
+
"conversation_context": CONVERSATION_ANALYSIS_SYSTEM_PROMPTS["conversation_context"]
|
|
109
|
+
+ "\n\n"
|
|
110
|
+
+ CONVERSATION_ANALYSIS_USER_PROMPTS["conversation_context"],
|
|
111
|
+
"knowledge_gap_analysis": CONVERSATION_ANALYSIS_SYSTEM_PROMPTS["knowledge_gap_analysis"]
|
|
112
|
+
+ "\n\n"
|
|
113
|
+
+ CONVERSATION_ANALYSIS_USER_PROMPTS["knowledge_gap_analysis"],
|
|
114
|
+
"determine_next_action": CONVERSATION_ANALYSIS_SYSTEM_PROMPTS["determine_next_action"]
|
|
115
|
+
+ "\n\n"
|
|
116
|
+
+ CONVERSATION_ANALYSIS_USER_PROMPTS["determine_next_action"],
|
|
117
|
+
"message_routing": CONVERSATION_ANALYSIS_SYSTEM_PROMPTS["message_routing"]
|
|
118
|
+
+ "\n\n"
|
|
119
|
+
+ CONVERSATION_ANALYSIS_USER_PROMPTS["message_routing"],
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def get_conversation_analysis_prompts(analysis_type: str, **kwargs) -> tuple[str, str]:
|
|
124
|
+
"""Get separated system and user prompts for conversation analysis."""
|
|
125
|
+
system_prompt = CONVERSATION_ANALYSIS_SYSTEM_PROMPTS.get(analysis_type, "")
|
|
126
|
+
user_prompt_template = CONVERSATION_ANALYSIS_USER_PROMPTS.get(analysis_type, "")
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
user_prompt = user_prompt_template.format(**kwargs)
|
|
130
|
+
return system_prompt, user_prompt
|
|
131
|
+
except KeyError:
|
|
132
|
+
return system_prompt, user_prompt_template
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_conversation_analysis_prompt(analysis_type: str, **kwargs) -> str:
|
|
136
|
+
"""Backward compatibility - get combined prompt for conversation analysis."""
|
|
137
|
+
prompt = CONVERSATION_ANALYSIS_PROMPTS.get(analysis_type, "")
|
|
138
|
+
try:
|
|
139
|
+
return prompt.format(**kwargs)
|
|
140
|
+
except KeyError:
|
|
141
|
+
return prompt
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
# TODO (xmingc): I like the idea of letting the system hold a limited number of improvisations.
|
|
145
|
+
RESPONSE_GENERATION_SYSTEM_PROMPT = """You are a witty and creative travel planning assistant. Generate a short, precise, and inspiring question that incorporates relevant context naturally. Feel free to make slight improvisations - add wordplay, use creative language, make clever observations, or add a touch of humor when appropriate. The question should be conversational, memorable, and always encouraging.
|
|
146
|
+
Keep it under 3 sentences but make it delightful and engaging. Return only the question, no additional text.
|
|
147
|
+
|
|
148
|
+
**Strategic Response Guidelines:**
|
|
149
|
+
- **Balance natural conversation with purposeful direction** - Be genuinely conversational but strategically guide toward missing information
|
|
150
|
+
- **Ask strategic follow-up questions** - Frame questions around genuine curiosity that happens to align with our information goals
|
|
151
|
+
- **Provide context and options** - When guiding toward a topic, give examples or choices to make it easier for the user to respond
|
|
152
|
+
- **Build on user's interests** - Connect their current topic to the information we need to collect. When user shows interest but may lack knowledge, provide concrete options/suggestions
|
|
153
|
+
- Ask ONE specific question that helps the user think about their plans
|
|
154
|
+
|
|
155
|
+
**Information Collection Strategies:**
|
|
156
|
+
- **For destination/location info**: Share travel experiences, ask about dream places, mention interesting locations
|
|
157
|
+
- **For dates/timing**: Talk about seasons, upcoming events, or personal scheduling preferences
|
|
158
|
+
- **For interests/preferences**: Share enthusiasm about activities, ask about past experiences, mention options
|
|
159
|
+
- **For logistics (budget, group size)**: Frame around planning considerations or past experiences
|
|
160
|
+
- **For general context**: Use open-ended questions that invite storytelling and detailed sharing
|
|
161
|
+
|
|
162
|
+
**Special Cases:**
|
|
163
|
+
- **If no information is missing**: Focus on deeper exploration, clarification, or moving toward completion
|
|
164
|
+
- **If user seems hesitant**: Provide encouragement and make sharing feel easier with specific examples or options
|
|
165
|
+
- **If off-topic**: Gently redirect through relevant connections or shared interests
|
|
166
|
+
|
|
167
|
+
Generate a single, natural response without quotes or formatting - just the raw conversational text that feels natural while strategically moving toward the missing information we need."""
|
|
168
|
+
|
|
169
|
+
RESPONSE_GENERATION_USER_PROMPT = """**Conversation Purpose:** {conversation_purpose}
|
|
170
|
+
**Missing Key Information:** {missing_required_slots}
|
|
171
|
+
|
|
172
|
+
**Current Situation:**
|
|
173
|
+
- User's intent: {intent_type}
|
|
174
|
+
- Context: {next_action_reasoning}
|
|
175
|
+
- What we know: {known_slots}
|
|
176
|
+
|
|
177
|
+
Generate an appropriate response based on this context."""
|
|
178
|
+
|
|
179
|
+
# Backward compatibility - combined prompt for legacy usage
|
|
180
|
+
RESPONSE_GENERATION_PROMPT = RESPONSE_GENERATION_SYSTEM_PROMPT + "\n\n" + RESPONSE_GENERATION_USER_PROMPT
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def get_response_generation_prompts(**kwargs) -> tuple[str, str]:
|
|
184
|
+
"""Get separated system and user prompts for response generation."""
|
|
185
|
+
try:
|
|
186
|
+
user_prompt = RESPONSE_GENERATION_USER_PROMPT.format(**kwargs)
|
|
187
|
+
return RESPONSE_GENERATION_SYSTEM_PROMPT, user_prompt
|
|
188
|
+
except KeyError:
|
|
189
|
+
return RESPONSE_GENERATION_SYSTEM_PROMPT, RESPONSE_GENERATION_USER_PROMPT
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def get_response_generation_prompt(**kwargs) -> str:
|
|
193
|
+
"""Backward compatibility - get combined prompt for response generation."""
|
|
194
|
+
try:
|
|
195
|
+
return RESPONSE_GENERATION_PROMPT.format(**kwargs)
|
|
196
|
+
except KeyError:
|
|
197
|
+
return RESPONSE_GENERATION_PROMPT
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def get_next_question_prompt(**kwargs) -> str:
|
|
201
|
+
"""Backward compatibility - redirect to response generation prompt."""
|
|
202
|
+
return get_response_generation_prompt(**kwargs)
|