cartesia-line 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartesia-line might be problematic. Click here for more details.
- cartesia_line-0.0.1.dist-info/METADATA +25 -0
- cartesia_line-0.0.1.dist-info/RECORD +27 -0
- cartesia_line-0.0.1.dist-info/WHEEL +5 -0
- cartesia_line-0.0.1.dist-info/licenses/LICENSE +201 -0
- cartesia_line-0.0.1.dist-info/top_level.txt +1 -0
- line/__init__.py +29 -0
- line/bridge.py +348 -0
- line/bus.py +401 -0
- line/call_request.py +25 -0
- line/events.py +218 -0
- line/harness.py +257 -0
- line/harness_types.py +109 -0
- line/nodes/__init__.py +7 -0
- line/nodes/base.py +60 -0
- line/nodes/conversation_context.py +66 -0
- line/nodes/reasoning.py +223 -0
- line/routes.py +618 -0
- line/tools/__init__.py +9 -0
- line/tools/system_tools.py +120 -0
- line/tools/tool_types.py +39 -0
- line/user_bridge.py +200 -0
- line/utils/__init__.py +0 -0
- line/utils/aio.py +62 -0
- line/utils/gemini_utils.py +152 -0
- line/utils/openai_utils.py +122 -0
- line/voice_agent_app.py +147 -0
- line/voice_agent_system.py +230 -0
line/nodes/reasoning.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ReasoningNode
|
|
3
|
+
|
|
4
|
+
A base class for agent reasoning using the template method pattern.
|
|
5
|
+
|
|
6
|
+
- Handles conversation history and tool calls.
|
|
7
|
+
- Defines a standard flow for generating agent responses.
|
|
8
|
+
- Subclasses implement `process_context()` to provide custom reasoning.
|
|
9
|
+
|
|
10
|
+
This class simplifies building agents that need both conversation management and tool integration.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from abc import abstractmethod
|
|
14
|
+
from typing import TYPE_CHECKING, Any, AsyncGenerator, List, Optional, Union
|
|
15
|
+
|
|
16
|
+
from loguru import logger
|
|
17
|
+
|
|
18
|
+
from line.bus import Message
|
|
19
|
+
from line.events import (
|
|
20
|
+
AgentGenerationComplete,
|
|
21
|
+
AgentResponse,
|
|
22
|
+
EventInstance,
|
|
23
|
+
EventType,
|
|
24
|
+
ToolCall,
|
|
25
|
+
ToolResult,
|
|
26
|
+
UserTranscriptionReceived,
|
|
27
|
+
)
|
|
28
|
+
from line.nodes.base import Node
|
|
29
|
+
from line.nodes.conversation_context import ConversationContext
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ReasoningNode(Node):
|
|
36
|
+
"""
|
|
37
|
+
Template method pattern for reasoning functionality.
|
|
38
|
+
|
|
39
|
+
Manages conversation context, tool handling, and defines the generation flow.
|
|
40
|
+
Subclasses implement process_context() to provide specialized reasoning logic
|
|
41
|
+
while inheriting conversation management and tool capabilities.
|
|
42
|
+
|
|
43
|
+
Template Method Flow:
|
|
44
|
+
1. generate() - Template method (defines the flow)
|
|
45
|
+
2. _build_conversation_context() - Standard context building
|
|
46
|
+
3. process_context() - Subclass-specific processing (abstract)
|
|
47
|
+
4. Tool handling - Automatic for NodeToolCall results
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
system_prompt: str,
|
|
53
|
+
max_context_length: int = 100,
|
|
54
|
+
node_id: Optional[str] = None,
|
|
55
|
+
):
|
|
56
|
+
"""
|
|
57
|
+
Initialize the reasoning node
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
system_prompt: System prompt for the LLM
|
|
61
|
+
max_context_length: Maximum number of conversation turns to keep
|
|
62
|
+
node_id: Unique identifier for the node. Defaults to uuid4().
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
super().__init__(node_id=node_id)
|
|
66
|
+
self.system_prompt = system_prompt
|
|
67
|
+
self.max_context_length = max_context_length
|
|
68
|
+
|
|
69
|
+
# Keep track of the conversation history, including user messages,
|
|
70
|
+
# assistant messages, and tool calls.
|
|
71
|
+
# This is a list of the events.
|
|
72
|
+
self.conversation_events: List[Any] = []
|
|
73
|
+
|
|
74
|
+
logger.info(f"{self} initialized")
|
|
75
|
+
|
|
76
|
+
def on_interrupt_generate(self, message: Message) -> None:
|
|
77
|
+
"""Handle interrupt event."""
|
|
78
|
+
super().on_interrupt_generate(message)
|
|
79
|
+
|
|
80
|
+
async def generate(
|
|
81
|
+
self, message: Message
|
|
82
|
+
) -> AsyncGenerator[Union[AgentResponse, ToolCall, ToolResult, EventType], None]:
|
|
83
|
+
"""Run the generation flow for all ReasoningNode subclasses.
|
|
84
|
+
|
|
85
|
+
Users should implement :method:`process_context` to provide specialized reasoning logic.
|
|
86
|
+
|
|
87
|
+
Flow:
|
|
88
|
+
1. Check for conversation messages
|
|
89
|
+
2. Build conversation context. To override, implement :method:`_build_conversation_context`
|
|
90
|
+
3. Call subclass-specific process_context() method
|
|
91
|
+
4. Yield all events (e.g. AgentResponse, ToolCall, ToolResult, etc.) for observability
|
|
92
|
+
|
|
93
|
+
This method expects :method:`process_context` to yield AgentResponse | ToolCall | ToolResult.
|
|
94
|
+
All events are yielded to the bus. But different events are handled differently.
|
|
95
|
+
|
|
96
|
+
- AgentResponse:
|
|
97
|
+
- The text response from the LM.
|
|
98
|
+
- If this is the speaking node, this will be sent to the user.
|
|
99
|
+
- ToolCall: Record that the LM requested a tool call.
|
|
100
|
+
- ToolResult: Record that the tool call was executed and the result.
|
|
101
|
+
- Does not necessarily correspond to a previous ToolCall if the user decided not to yield one.
|
|
102
|
+
- This is common for tool calls that are sync or run very quickly.
|
|
103
|
+
- EventType: Custom result types (e.g., FormProcessingResult)
|
|
104
|
+
|
|
105
|
+
Yields:
|
|
106
|
+
AgentResponse: Text responses.
|
|
107
|
+
ToolCall: Tool execution requests.
|
|
108
|
+
ToolResult: Tool execution results.
|
|
109
|
+
EventType: Custom result types (e.g., FormProcessingResult)
|
|
110
|
+
"""
|
|
111
|
+
if not self.conversation_events:
|
|
112
|
+
return
|
|
113
|
+
|
|
114
|
+
# 1. Build standardized conversation context.
|
|
115
|
+
ctx = self._build_conversation_context()
|
|
116
|
+
|
|
117
|
+
# 2. Let subclass do specialized processing
|
|
118
|
+
logger.info(f"💬 Processing context: {ctx.events}")
|
|
119
|
+
async for chunk in self.process_context(ctx):
|
|
120
|
+
# Save the event to the conversation history.
|
|
121
|
+
self.add_event(chunk)
|
|
122
|
+
|
|
123
|
+
# Yield the event to the user.
|
|
124
|
+
yield chunk
|
|
125
|
+
|
|
126
|
+
yield AgentGenerationComplete()
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
async def process_context(
|
|
130
|
+
self, context: ConversationContext
|
|
131
|
+
) -> AsyncGenerator[Union[AgentResponse, ToolCall], None]:
|
|
132
|
+
"""
|
|
133
|
+
Abstract method for subclass-specific processing logic.
|
|
134
|
+
|
|
135
|
+
This is where subclasses implement their specialized reasoning:
|
|
136
|
+
- Voice agents: Stream LLM responses
|
|
137
|
+
- Form fillers: Extract structured data
|
|
138
|
+
- RAG agents: Query knowledge bases
|
|
139
|
+
- Chat agents: Generate conversational responses
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
context: Standardized conversation context with messages, tools, and metadata
|
|
143
|
+
|
|
144
|
+
Yields:
|
|
145
|
+
AgentResponse: Text content for the user
|
|
146
|
+
ToolCall: Tool execution requests
|
|
147
|
+
Custom types: Subclass-specific results (will be yielded directly)
|
|
148
|
+
"""
|
|
149
|
+
# This is an abstract async generator - subclasses must implement
|
|
150
|
+
raise NotImplementedError("Subclasses must implement process_context")
|
|
151
|
+
yield # This makes it a generator function (unreachable)
|
|
152
|
+
|
|
153
|
+
def _build_conversation_context(self) -> ConversationContext:
|
|
154
|
+
"""
|
|
155
|
+
Build standardized conversation context for processing.
|
|
156
|
+
|
|
157
|
+
This method creates a ConversationContext with recent messages, system prompt,
|
|
158
|
+
and available tools. Used by the template method to provide consistent
|
|
159
|
+
context to all subclasses.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
ConversationContext: Standardized context for process_context()
|
|
163
|
+
"""
|
|
164
|
+
# Use recent messages based on max_context_length
|
|
165
|
+
recent_messages = self.conversation_events
|
|
166
|
+
if len(recent_messages) > self.max_context_length:
|
|
167
|
+
recent_messages = recent_messages[-self.max_context_length :]
|
|
168
|
+
|
|
169
|
+
return ConversationContext(
|
|
170
|
+
events=recent_messages,
|
|
171
|
+
system_prompt=self.system_prompt,
|
|
172
|
+
metadata={
|
|
173
|
+
"max_context_length": self.max_context_length,
|
|
174
|
+
"total_messages": len(self.conversation_events),
|
|
175
|
+
},
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
def add_event(self, event: EventInstance):
|
|
179
|
+
"""
|
|
180
|
+
Add an event to `self.conversation_events`.
|
|
181
|
+
|
|
182
|
+
Events of type AgentResponse and UserTranscriptionReceived are merged if they are consecutive.
|
|
183
|
+
This is useful to avoid having to merge the context of these events (i.e. the text) when we
|
|
184
|
+
are building the conversation context.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
event: The event to add to the conversation events.
|
|
188
|
+
"""
|
|
189
|
+
# This is a utility because sometimes we get a BusMessage instead of an EventInstance.
|
|
190
|
+
if isinstance(event, Message):
|
|
191
|
+
event = event.event
|
|
192
|
+
|
|
193
|
+
if len(self.conversation_events) == 0:
|
|
194
|
+
self.conversation_events.append(event)
|
|
195
|
+
return
|
|
196
|
+
|
|
197
|
+
# Merge the content of the same consecutive events for AgentResponse and UserTranscriptionReceived.
|
|
198
|
+
# This allows us to easily build and send the conversation context to the LM.
|
|
199
|
+
mergeable_events = (AgentResponse, UserTranscriptionReceived)
|
|
200
|
+
for event_type in mergeable_events:
|
|
201
|
+
if isinstance(event, event_type) and isinstance(self.conversation_events[-1], event_type):
|
|
202
|
+
self.conversation_events[-1] = event_type(
|
|
203
|
+
content=self.conversation_events[-1].content + event.content
|
|
204
|
+
)
|
|
205
|
+
return
|
|
206
|
+
|
|
207
|
+
self.conversation_events.append(event)
|
|
208
|
+
|
|
209
|
+
def clear_context(self) -> List[Any]:
|
|
210
|
+
"""
|
|
211
|
+
Clear all conversation events and return them.
|
|
212
|
+
|
|
213
|
+
For long running conversations, the ability to clear context is crucial
|
|
214
|
+
to sustaining LLM performance by preventing context window from growing too large
|
|
215
|
+
and maintaining response quality.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
List[Any]: The conversation events that were cleared
|
|
219
|
+
"""
|
|
220
|
+
cleared_events = self.conversation_events.copy()
|
|
221
|
+
self.conversation_events = []
|
|
222
|
+
logger.debug(f"{self} cleared {len(cleared_events)} conversation events")
|
|
223
|
+
return cleared_events
|