dv-pipecat-ai 0.0.85.dev698__py3-none-any.whl → 0.0.85.dev814__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (45) hide show
  1. {dv_pipecat_ai-0.0.85.dev698.dist-info → dv_pipecat_ai-0.0.85.dev814.dist-info}/METADATA +23 -18
  2. {dv_pipecat_ai-0.0.85.dev698.dist-info → dv_pipecat_ai-0.0.85.dev814.dist-info}/RECORD +45 -43
  3. pipecat/adapters/services/aws_nova_sonic_adapter.py +116 -6
  4. pipecat/pipeline/runner.py +6 -2
  5. pipecat/pipeline/task.py +40 -55
  6. pipecat/processors/aggregators/llm_context.py +40 -2
  7. pipecat/processors/frameworks/rtvi.py +1 -0
  8. pipecat/runner/daily.py +59 -20
  9. pipecat/runner/run.py +149 -67
  10. pipecat/runner/types.py +5 -5
  11. pipecat/services/assemblyai/models.py +6 -0
  12. pipecat/services/assemblyai/stt.py +13 -5
  13. pipecat/services/asyncai/tts.py +3 -0
  14. pipecat/services/aws/llm.py +33 -16
  15. pipecat/services/aws/nova_sonic/context.py +69 -0
  16. pipecat/services/aws/nova_sonic/llm.py +199 -89
  17. pipecat/services/aws/stt.py +2 -0
  18. pipecat/services/aws_nova_sonic/context.py +8 -12
  19. pipecat/services/cartesia/stt.py +77 -70
  20. pipecat/services/cartesia/tts.py +3 -1
  21. pipecat/services/deepgram/flux/stt.py +4 -0
  22. pipecat/services/elevenlabs/tts.py +82 -41
  23. pipecat/services/fish/tts.py +3 -0
  24. pipecat/services/google/stt.py +4 -0
  25. pipecat/services/lmnt/tts.py +2 -0
  26. pipecat/services/neuphonic/tts.py +3 -0
  27. pipecat/services/openai/tts.py +37 -6
  28. pipecat/services/piper/tts.py +7 -9
  29. pipecat/services/playht/tts.py +3 -0
  30. pipecat/services/rime/tts.py +9 -8
  31. pipecat/services/riva/stt.py +3 -1
  32. pipecat/services/salesforce/__init__.py +9 -0
  33. pipecat/services/salesforce/llm.py +465 -0
  34. pipecat/services/sarvam/tts.py +87 -10
  35. pipecat/services/speechmatics/stt.py +3 -1
  36. pipecat/services/stt_service.py +23 -10
  37. pipecat/services/tts_service.py +64 -13
  38. pipecat/transports/base_input.py +3 -0
  39. pipecat/transports/base_output.py +71 -77
  40. pipecat/transports/smallwebrtc/connection.py +5 -0
  41. pipecat/transports/smallwebrtc/request_handler.py +42 -0
  42. pipecat/utils/string.py +1 -0
  43. {dv_pipecat_ai-0.0.85.dev698.dist-info → dv_pipecat_ai-0.0.85.dev814.dist-info}/WHEEL +0 -0
  44. {dv_pipecat_ai-0.0.85.dev698.dist-info → dv_pipecat_ai-0.0.85.dev814.dist-info}/licenses/LICENSE +0 -0
  45. {dv_pipecat_ai-0.0.85.dev698.dist-info → dv_pipecat_ai-0.0.85.dev814.dist-info}/top_level.txt +0 -0
@@ -14,6 +14,7 @@ from typing import AsyncGenerator, Dict, Literal, Optional
14
14
 
15
15
  from loguru import logger
16
16
  from openai import AsyncOpenAI, BadRequestError
17
+ from pydantic import BaseModel
17
18
 
18
19
  from pipecat.frames.frames import (
19
20
  ErrorFrame,
@@ -55,6 +56,17 @@ class OpenAITTSService(TTSService):
55
56
 
56
57
  OPENAI_SAMPLE_RATE = 24000 # OpenAI TTS always outputs at 24kHz
57
58
 
59
+ class InputParams(BaseModel):
60
+ """Input parameters for OpenAI TTS configuration.
61
+
62
+ Parameters:
63
+ instructions: Instructions to guide voice synthesis behavior.
64
+ speed: Voice speed control (0.25 to 4.0, default 1.0).
65
+ """
66
+
67
+ instructions: Optional[str] = None
68
+ speed: Optional[float] = None
69
+
58
70
  def __init__(
59
71
  self,
60
72
  *,
@@ -65,6 +77,7 @@ class OpenAITTSService(TTSService):
65
77
  sample_rate: Optional[int] = None,
66
78
  instructions: Optional[str] = None,
67
79
  speed: Optional[float] = None,
80
+ params: Optional[InputParams] = None,
68
81
  **kwargs,
69
82
  ):
70
83
  """Initialize OpenAI TTS service.
@@ -77,7 +90,11 @@ class OpenAITTSService(TTSService):
77
90
  sample_rate: Output audio sample rate in Hz. If None, uses OpenAI's default 24kHz.
78
91
  instructions: Optional instructions to guide voice synthesis behavior.
79
92
  speed: Voice speed control (0.25 to 4.0, default 1.0).
93
+ params: Optional synthesis controls (acting instructions, speed, ...).
80
94
  **kwargs: Additional keyword arguments passed to TTSService.
95
+
96
+ .. deprecated:: 0.0.91
97
+ The `instructions` and `speed` parameters are deprecated, use `InputParams` instead.
81
98
  """
82
99
  if sample_rate and sample_rate != self.OPENAI_SAMPLE_RATE:
83
100
  logger.warning(
@@ -86,12 +103,26 @@ class OpenAITTSService(TTSService):
86
103
  )
87
104
  super().__init__(sample_rate=sample_rate, **kwargs)
88
105
 
89
- self._speed = speed
90
106
  self.set_model_name(model)
91
107
  self.set_voice(voice)
92
- self._instructions = instructions
93
108
  self._client = AsyncOpenAI(api_key=api_key, base_url=base_url)
94
109
 
110
+ if instructions or speed:
111
+ import warnings
112
+
113
+ with warnings.catch_warnings():
114
+ warnings.simplefilter("always")
115
+ warnings.warn(
116
+ "The `instructions` and `speed` parameters are deprecated, use `InputParams` instead.",
117
+ DeprecationWarning,
118
+ stacklevel=2,
119
+ )
120
+
121
+ self._settings = {
122
+ "instructions": params.instructions if params else instructions,
123
+ "speed": params.speed if params else speed,
124
+ }
125
+
95
126
  def can_generate_metrics(self) -> bool:
96
127
  """Check if this service can generate processing metrics.
97
128
 
@@ -144,11 +175,11 @@ class OpenAITTSService(TTSService):
144
175
  "response_format": "pcm",
145
176
  }
146
177
 
147
- if self._instructions:
148
- create_params["instructions"] = self._instructions
178
+ if self._settings["instructions"]:
179
+ create_params["instructions"] = self._settings["instructions"]
149
180
 
150
- if self._speed:
151
- create_params["speed"] = self._speed
181
+ if self._settings["speed"]:
182
+ create_params["speed"] = self._settings["speed"]
152
183
 
153
184
  async with self._client.audio.speech.with_streaming_response.create(
154
185
  **create_params
@@ -14,7 +14,6 @@ from loguru import logger
14
14
  from pipecat.frames.frames import (
15
15
  ErrorFrame,
16
16
  Frame,
17
- TTSAudioRawFrame,
18
17
  TTSStartedFrame,
19
18
  TTSStoppedFrame,
20
19
  )
@@ -99,16 +98,15 @@ class PiperTTSService(TTSService):
99
98
 
100
99
  await self.start_tts_usage_metrics(text)
101
100
 
101
+ yield TTSStartedFrame()
102
+
102
103
  CHUNK_SIZE = self.chunk_size
103
104
 
104
- yield TTSStartedFrame()
105
- async for chunk in response.content.iter_chunked(CHUNK_SIZE):
106
- # remove wav header if present
107
- if chunk.startswith(b"RIFF"):
108
- chunk = chunk[44:]
109
- if len(chunk) > 0:
110
- await self.stop_ttfb_metrics()
111
- yield TTSAudioRawFrame(chunk, self.sample_rate, 1)
105
+ async for frame in self._stream_audio_frames_from_iterator(
106
+ response.content.iter_chunked(CHUNK_SIZE), strip_wav_header=True
107
+ ):
108
+ await self.stop_ttfb_metrics()
109
+ yield frame
112
110
  except Exception as e:
113
111
  logger.error(f"Error in run_tts: {e}")
114
112
  yield ErrorFrame(error=str(e))
@@ -269,6 +269,8 @@ class PlayHTTTSService(InterruptibleTTSService):
269
269
  raise ValueError("WebSocket URL is not a string")
270
270
 
271
271
  self._websocket = await websocket_connect(self._websocket_url)
272
+
273
+ await self._call_event_handler("on_connected")
272
274
  except ValueError as e:
273
275
  logger.error(f"{self} initialization error: {e}")
274
276
  self._websocket = None
@@ -291,6 +293,7 @@ class PlayHTTTSService(InterruptibleTTSService):
291
293
  finally:
292
294
  self._request_id = None
293
295
  self._websocket = None
296
+ await self._call_event_handler("on_disconnected")
294
297
 
295
298
  async def _get_websocket_url(self):
296
299
  """Retrieve WebSocket URL from PlayHT API."""
@@ -255,6 +255,8 @@ class RimeTTSService(AudioContextWordTTSService):
255
255
  url = f"{self._url}?{params}"
256
256
  headers = {"Authorization": f"Bearer {self._api_key}"}
257
257
  self._websocket = await websocket_connect(url, additional_headers=headers)
258
+
259
+ await self._call_event_handler("on_connected")
258
260
  except Exception as e:
259
261
  logger.error(f"{self} initialization error: {e}")
260
262
  self._websocket = None
@@ -272,6 +274,7 @@ class RimeTTSService(AudioContextWordTTSService):
272
274
  finally:
273
275
  self._context_id = None
274
276
  self._websocket = None
277
+ await self._call_event_handler("on_disconnected")
275
278
 
276
279
  def _get_websocket(self):
277
280
  """Get active websocket connection or raise exception."""
@@ -553,15 +556,13 @@ class RimeHttpTTSService(TTSService):
553
556
 
554
557
  CHUNK_SIZE = self.chunk_size
555
558
 
556
- async for chunk in response.content.iter_chunked(CHUNK_SIZE):
557
- if need_to_strip_wav_header and chunk.startswith(b"RIFF"):
558
- chunk = chunk[44:]
559
- need_to_strip_wav_header = False
559
+ async for frame in self._stream_audio_frames_from_iterator(
560
+ response.content.iter_chunked(CHUNK_SIZE),
561
+ strip_wav_header=need_to_strip_wav_header,
562
+ ):
563
+ await self.stop_ttfb_metrics()
564
+ yield frame
560
565
 
561
- if len(chunk) > 0:
562
- await self.stop_ttfb_metrics()
563
- frame = TTSAudioRawFrame(chunk, self.sample_rate, 1)
564
- yield frame
565
566
  except Exception as e:
566
567
  logger.exception(f"Error generating TTS: {e}")
567
568
  yield ErrorFrame(error=f"Rime TTS error: {str(e)}")
@@ -583,7 +583,9 @@ class RivaSegmentedSTTService(SegmentedSTTService):
583
583
  self._config.language_code = self._language
584
584
 
585
585
  @traced_stt
586
- async def _handle_transcription(self, transcript: str, language: Optional[Language] = None):
586
+ async def _handle_transcription(
587
+ self, transcript: str, is_final: bool, language: Optional[Language] = None
588
+ ):
587
589
  """Handle a transcription result with tracing."""
588
590
  pass
589
591
 
@@ -0,0 +1,9 @@
1
+ #
2
+ # Copyright (c) 2024–2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
+
7
+ from .llm import SalesforceAgentLLMService
8
+
9
+ __all__ = ["SalesforceAgentLLMService"]
@@ -0,0 +1,465 @@
1
+ #
2
+ # Copyright (c) 2024–2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
+
7
+ """Salesforce Agent API LLM service implementation."""
8
+
9
+ import asyncio
10
+ import json
11
+ import os
12
+ import time
13
+ from typing import Any, Dict, List, Optional, AsyncGenerator
14
+ from dataclasses import dataclass
15
+
16
+ import httpx
17
+ from loguru import logger
18
+
19
+ from pipecat.frames.frames import (
20
+ Frame,
21
+ LLMFullResponseEndFrame,
22
+ LLMFullResponseStartFrame,
23
+ LLMMessagesFrame,
24
+ LLMTextFrame,
25
+ LLMUpdateSettingsFrame,
26
+ )
27
+ from pipecat.processors.aggregators.openai_llm_context import (
28
+ OpenAILLMContext,
29
+ OpenAILLMContextFrame,
30
+ )
31
+ from pipecat.processors.frame_processor import FrameDirection
32
+ from pipecat.services.llm_service import LLMService
33
+ from pipecat.services.openai.llm import (
34
+ OpenAIAssistantContextAggregator,
35
+ OpenAIContextAggregatorPair,
36
+ OpenAIUserContextAggregator,
37
+ )
38
+ from pipecat.processors.aggregators.llm_response import (
39
+ LLMAssistantAggregatorParams,
40
+ LLMUserAggregatorParams,
41
+ )
42
+ from env_config import api_config
43
+
44
+
45
+ @dataclass
46
+ class SalesforceSessionInfo:
47
+ """Information about an active Salesforce Agent session."""
48
+
49
+ session_id: str
50
+ agent_id: str
51
+ created_at: float
52
+ last_used: float
53
+
54
+
55
+ class SalesforceAgentLLMService(LLMService):
56
+ """Salesforce Agent API LLM service implementation.
57
+
58
+ This service integrates with Salesforce Agent API to provide conversational
59
+ AI capabilities using Salesforce's Agentforce platform. It follows the same
60
+ pattern as Vistaar LLM service for proper frame processing.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ *,
66
+ model: str = "salesforce-agent",
67
+ session_timeout_secs: float = 3600.0,
68
+ **kwargs,
69
+ ):
70
+ """Initialize Salesforce Agent LLM service.
71
+
72
+ Reads configuration from environment variables:
73
+ - SALESFORCE_AGENT_ID: The Salesforce agent ID to interact with
74
+ - SALESFORCE_API_KEY: OAuth access token (optional, will use client credentials if not provided)
75
+ - SALESFORCE_ORG_DOMAIN: Salesforce org domain (e.g., https://myorg.my.salesforce.com)
76
+ - SALESFORCE_CLIENT_ID: Connected app client ID for OAuth
77
+ - SALESFORCE_CLIENT_SECRET: Connected app client secret for OAuth
78
+ - SALESFORCE_API_HOST: Salesforce API host (default: https://api.salesforce.com)
79
+
80
+ Args:
81
+ model: The model name (defaults to "salesforce-agent").
82
+ session_timeout_secs: Session timeout in seconds (default: 1 hour).
83
+ **kwargs: Additional arguments passed to parent LLMService.
84
+ """
85
+ # Initialize parent LLM service
86
+ super().__init__(**kwargs)
87
+
88
+ self._agent_id = api_config.SALESFORCE_AGENT_ID
89
+ self._api_key = api_config.SALESFORCE_API_KEY
90
+ self._org_domain = api_config.SALESFORCE_ORG_DOMAIN
91
+ self._client_id = api_config.SALESFORCE_CLIENT_ID
92
+ self._client_secret = api_config.SALESFORCE_CLIENT_SECRET
93
+ self._api_host = api_config.SALESFORCE_API_HOST
94
+
95
+
96
+ # Validate required environment variables
97
+ required_vars = {
98
+ "SALESFORCE_AGENT_ID": self._agent_id,
99
+ "SALESFORCE_ORG_DOMAIN": self._org_domain,
100
+ "SALESFORCE_CLIENT_ID": self._client_id,
101
+ "SALESFORCE_CLIENT_SECRET": self._client_secret,
102
+ }
103
+
104
+ missing_vars = [var for var, value in required_vars.items() if not value]
105
+ if missing_vars:
106
+ raise ValueError(f"Missing required environment variables: {', '.join(missing_vars)}")
107
+
108
+ logger.info(f"Salesforce LLM initialized - Agent ID: {self._agent_id}")
109
+
110
+ self._session_timeout_secs = session_timeout_secs
111
+
112
+ # Session management
113
+ self._sessions: Dict[str, SalesforceSessionInfo] = {}
114
+ self._current_session_id: Optional[str] = None
115
+
116
+ # HTTP client for API calls
117
+ self._http_client = httpx.AsyncClient(timeout=30.0)
118
+
119
+
120
+ async def __aenter__(self):
121
+ """Async context manager entry."""
122
+ return self
123
+
124
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
125
+ """Async context manager exit."""
126
+ await self._cleanup_sessions()
127
+ await self._http_client.aclose()
128
+
129
+ def can_generate_metrics(self) -> bool:
130
+ """Check if this service can generate processing metrics."""
131
+ return True
132
+
133
+ async def _get_access_token(self) -> str:
134
+ """Get OAuth access token using client credentials."""
135
+ if self._api_key and not self._api_key.startswith("Bearer"):
136
+ return self._api_key
137
+
138
+ token_url = f"{self._org_domain}/services/oauth2/token"
139
+ data = {
140
+ "grant_type": "client_credentials",
141
+ "client_id": self._client_id,
142
+ "client_secret": self._client_secret,
143
+ }
144
+
145
+ try:
146
+ response = await self._http_client.post(token_url, data=data)
147
+ response.raise_for_status()
148
+ token_data = response.json()
149
+ return token_data["access_token"]
150
+ except Exception as e:
151
+ logger.error(f"Failed to get access token: {e}")
152
+ raise
153
+
154
+ async def _create_session(self) -> str:
155
+ """Create a new Salesforce Agent session."""
156
+ access_token = await self._get_access_token()
157
+ session_url = f"{self._api_host}/einstein/ai-agent/v1/agents/{self._agent_id}/sessions"
158
+
159
+ headers = {
160
+ "Authorization": f"Bearer {access_token}",
161
+ "Content-Type": "application/json",
162
+ }
163
+
164
+ external_session_key = f"pipecat-{int(time.time())}-{id(self)}"
165
+
166
+ payload = {
167
+ "externalSessionKey": external_session_key,
168
+ "instanceConfig": {"endpoint": self._org_domain},
169
+ "tz": "America/Los_Angeles",
170
+ "variables": [{"name": "$Context.EndUserLanguage", "type": "Text", "value": "en_US"}],
171
+ "featureSupport": "Streaming",
172
+ "streamingCapabilities": {"chunkTypes": ["Text"]},
173
+ "bypassUser": True,
174
+ }
175
+
176
+ try:
177
+ response = await self._http_client.post(session_url, headers=headers, json=payload)
178
+ response.raise_for_status()
179
+ session_data = response.json()
180
+ session_id = session_data["sessionId"]
181
+
182
+ # Store session info
183
+ current_time = time.time()
184
+ self._sessions[session_id] = SalesforceSessionInfo(
185
+ session_id=session_id,
186
+ agent_id=self._agent_id,
187
+ created_at=current_time,
188
+ last_used=current_time,
189
+ )
190
+
191
+ logger.debug(f"Created Salesforce Agent session: {session_id}")
192
+ return session_id
193
+
194
+ except Exception as e:
195
+ logger.error(f"Failed to create Salesforce Agent session: {e}")
196
+ raise
197
+
198
+ async def _get_or_create_session(self) -> str:
199
+ """Get existing session or create a new one."""
200
+ current_time = time.time()
201
+
202
+ # Check if current session is still valid
203
+ if self._current_session_id and self._current_session_id in self._sessions:
204
+ session = self._sessions[self._current_session_id]
205
+ if current_time - session.last_used < self._session_timeout_secs:
206
+ session.last_used = current_time
207
+ return self._current_session_id
208
+ else:
209
+ # Session expired, remove it
210
+ del self._sessions[self._current_session_id]
211
+ self._current_session_id = None
212
+
213
+ # Create new session
214
+ self._current_session_id = await self._create_session()
215
+ return self._current_session_id
216
+
217
+ async def _cleanup_sessions(self):
218
+ """Clean up expired sessions."""
219
+ current_time = time.time()
220
+ expired_sessions = []
221
+
222
+ for session_id, session in self._sessions.items():
223
+ if current_time - session.last_used > self._session_timeout_secs:
224
+ expired_sessions.append(session_id)
225
+
226
+ for session_id in expired_sessions:
227
+ try:
228
+ # End the session via API
229
+ access_token = await self._get_access_token()
230
+ url = f"{self._api_host}/einstein/ai-agent/v1/sessions/{session_id}"
231
+ headers = {
232
+ "Authorization": f"Bearer {access_token}",
233
+ "x-session-end-reason": "UserRequest",
234
+ }
235
+ await self._http_client.delete(url, headers=headers)
236
+ except Exception as e:
237
+ logger.warning(f"Failed to end session {session_id}: {e}")
238
+ finally:
239
+ del self._sessions[session_id]
240
+ if self._current_session_id == session_id:
241
+ self._current_session_id = None
242
+
243
+ def _extract_user_message(self, context: OpenAILLMContext) -> str:
244
+ """Extract the last user message from context.
245
+
246
+ Similar to Vistaar pattern - extract only the most recent user message.
247
+
248
+ Args:
249
+ context: The OpenAI LLM context containing messages.
250
+
251
+ Returns:
252
+ The last user message as a string.
253
+ """
254
+ messages = context.get_messages()
255
+
256
+ # Find the last user message (iterate in reverse for efficiency)
257
+ for message in reversed(messages):
258
+ if message.get("role") == "user":
259
+ content = message.get("content", "")
260
+
261
+ # Handle content that might be a list (for multimodal messages)
262
+ if isinstance(content, list):
263
+ text_parts = [
264
+ item.get("text", "") for item in content if item.get("type") == "text"
265
+ ]
266
+ content = " ".join(text_parts)
267
+
268
+ if isinstance(content, str):
269
+ return content.strip()
270
+
271
+ return ""
272
+
273
+ def _generate_sequence_id(self) -> int:
274
+ """Generate a sequence ID for the message."""
275
+ return int(time.time() * 1000) % 2147483647 # Keep within int32 range
276
+
277
+ async def _stream_salesforce_response(self, session_id: str, user_message: str) -> AsyncGenerator[str, None]:
278
+ """Stream response from Salesforce Agent API."""
279
+ access_token = await self._get_access_token()
280
+ url = f"{self._api_host}/einstein/ai-agent/v1/sessions/{session_id}/messages/stream"
281
+
282
+ headers = {
283
+ "Authorization": f"Bearer {access_token}",
284
+ "Content-Type": "application/json",
285
+ "Accept": "text/event-stream",
286
+ }
287
+
288
+ message_data = {
289
+ "message": {
290
+ "sequenceId": self._generate_sequence_id(),
291
+ "type": "Text",
292
+ "text": user_message
293
+ },
294
+ "variables": [
295
+ {
296
+ "name": "$Context.EndUserLanguage",
297
+ "type": "Text",
298
+ "value": "en_US"
299
+ }
300
+ ]
301
+ }
302
+
303
+ try:
304
+ logger.info(f"🌐 Salesforce API request: {user_message[:50]}...")
305
+ async with self._http_client.stream("POST", url, headers=headers, json=message_data) as response:
306
+ response.raise_for_status()
307
+
308
+ async for line in response.aiter_lines():
309
+ if not line:
310
+ continue
311
+
312
+ # Parse SSE format
313
+ if line.startswith("data: "):
314
+ try:
315
+ data = json.loads(line[6:])
316
+ message = data.get("message", {})
317
+ message_type = message.get("type")
318
+
319
+ if message_type == "TextChunk":
320
+ content = message.get("text", "") or message.get("message", "")
321
+ if content:
322
+ yield content
323
+ elif message_type == "EndOfTurn":
324
+ logger.info("🏁 Salesforce response complete")
325
+ break
326
+ elif message_type == "Inform":
327
+ # Skip INFORM events to avoid duplication
328
+ continue
329
+
330
+ except json.JSONDecodeError as e:
331
+ logger.warning(f"JSON decode error: {e}, line: {line}")
332
+ continue
333
+
334
+ except Exception as e:
335
+ logger.error(f"Failed to stream from Salesforce Agent API: {e}")
336
+ raise
337
+
338
+ async def _process_context(self, context: OpenAILLMContext):
339
+ """Process the LLM context and generate streaming response.
340
+
341
+ Following Vistaar pattern for simple, direct processing.
342
+
343
+ Args:
344
+ context: The OpenAI LLM context containing messages to process.
345
+ """
346
+ logger.info(f"🔄 Salesforce processing context with {len(context.get_messages())} messages")
347
+
348
+ # Extract user message from context first
349
+ user_message = self._extract_user_message(context)
350
+
351
+ if not user_message:
352
+ logger.warning("Salesforce: No user message found in context")
353
+ return
354
+
355
+ try:
356
+ logger.info(f"🎯 Salesforce extracted query: {user_message}")
357
+
358
+ # Start response
359
+ await self.push_frame(LLMFullResponseStartFrame())
360
+ await self.push_frame(LLMFullResponseStartFrame(),FrameDirection.UPSTREAM)
361
+ await self.start_processing_metrics()
362
+ await self.start_ttfb_metrics()
363
+
364
+ # Get or create session
365
+ session_id = await self._get_or_create_session()
366
+
367
+ first_chunk = True
368
+
369
+ # Stream the response
370
+ async for text_chunk in self._stream_salesforce_response(session_id, user_message):
371
+ if first_chunk:
372
+ await self.stop_ttfb_metrics()
373
+ first_chunk = False
374
+
375
+ # Push each text chunk as it arrives
376
+ await self.push_frame(LLMTextFrame(text=text_chunk))
377
+
378
+ except Exception as e:
379
+ logger.error(f"Salesforce context processing error: {type(e).__name__}: {str(e)}")
380
+ import traceback
381
+ logger.error(f"Salesforce traceback: {traceback.format_exc()}")
382
+ raise
383
+ finally:
384
+ await self.stop_processing_metrics()
385
+ await self.push_frame(LLMFullResponseEndFrame())
386
+ await self.push_frame(LLMFullResponseEndFrame(), FrameDirection.UPSTREAM)
387
+
388
+ async def process_frame(self, frame: Frame, direction: FrameDirection):
389
+ """Process frames for LLM completion requests.
390
+
391
+ Following the exact Vistaar pattern - call super() for non-context frames only.
392
+
393
+ Args:
394
+ frame: The frame to process.
395
+ direction: The direction of frame processing.
396
+ """
397
+ context = None
398
+ if isinstance(frame, OpenAILLMContextFrame):
399
+ context = frame.context
400
+ logger.info(f"🔍 Received OpenAILLMContextFrame with {len(context.get_messages())} messages")
401
+ elif isinstance(frame, LLMMessagesFrame):
402
+ context = OpenAILLMContext.from_messages(frame.messages)
403
+ logger.info(f"🔍 Received LLMMessagesFrame with {len(frame.messages)} messages")
404
+ elif isinstance(frame, LLMUpdateSettingsFrame):
405
+ # Call super for settings frames and update settings
406
+ await super().process_frame(frame, direction)
407
+ settings = frame.settings
408
+ logger.debug(f"Updated Salesforce settings: {settings}")
409
+ else:
410
+ # For non-context frames, call super and push them downstream
411
+ await super().process_frame(frame, direction)
412
+ await self.push_frame(frame, direction)
413
+
414
+ if context:
415
+ try:
416
+ await self._process_context(context)
417
+ except httpx.TimeoutException:
418
+ logger.error("Timeout while processing Salesforce request")
419
+ await self._call_event_handler("on_completion_timeout")
420
+ except Exception as e:
421
+ logger.error(f"Error processing Salesforce request: {e}")
422
+ raise
423
+
424
+ def create_context_aggregator(
425
+ self,
426
+ context: OpenAILLMContext,
427
+ *,
428
+ user_params: LLMUserAggregatorParams = LLMUserAggregatorParams(),
429
+ assistant_params: LLMAssistantAggregatorParams = LLMAssistantAggregatorParams(),
430
+ ) -> OpenAIContextAggregatorPair:
431
+ """Create context aggregators for Salesforce LLM.
432
+
433
+ Since Salesforce uses OpenAI-compatible message format, we reuse OpenAI's
434
+ context aggregators directly, similar to how Vistaar works.
435
+
436
+ Args:
437
+ context: The LLM context to create aggregators for.
438
+ user_params: Parameters for user message aggregation.
439
+ assistant_params: Parameters for assistant message aggregation.
440
+
441
+ Returns:
442
+ OpenAIContextAggregatorPair: A pair of OpenAI context aggregators,
443
+ compatible with Salesforce's OpenAI-like message format.
444
+ """
445
+ context.set_llm_adapter(self.get_llm_adapter())
446
+ user = OpenAIUserContextAggregator(context, params=user_params)
447
+ assistant = OpenAIAssistantContextAggregator(context, params=assistant_params)
448
+ return OpenAIContextAggregatorPair(_user=user, _assistant=assistant)
449
+
450
+ def get_llm_adapter(self):
451
+ """Get the LLM adapter for this service."""
452
+ from pipecat.adapters.services.open_ai_adapter import OpenAILLMAdapter
453
+ return OpenAILLMAdapter()
454
+
455
+ async def close(self):
456
+ """Close the HTTP client when the service is destroyed."""
457
+ await self._cleanup_sessions()
458
+ await self._http_client.aclose()
459
+
460
+ def __del__(self):
461
+ """Ensure the client is closed on deletion."""
462
+ try:
463
+ asyncio.create_task(self._http_client.aclose())
464
+ except:
465
+ pass