dv-pipecat-ai 0.0.85.dev818__py3-none-any.whl → 0.0.85.dev858__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/METADATA +2 -1
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/RECORD +32 -29
- pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +5 -1
- pipecat/frames/frames.py +34 -0
- pipecat/metrics/connection_metrics.py +45 -0
- pipecat/processors/aggregators/llm_response.py +25 -4
- pipecat/processors/dtmf_aggregator.py +17 -21
- pipecat/processors/frame_processor.py +51 -8
- pipecat/processors/metrics/frame_processor_metrics.py +108 -0
- pipecat/processors/transcript_processor.py +22 -1
- pipecat/serializers/__init__.py +2 -0
- pipecat/serializers/asterisk.py +16 -2
- pipecat/serializers/convox.py +2 -2
- pipecat/serializers/custom.py +2 -2
- pipecat/serializers/vi.py +326 -0
- pipecat/services/cartesia/tts.py +75 -10
- pipecat/services/deepgram/stt.py +317 -17
- pipecat/services/elevenlabs/stt.py +487 -19
- pipecat/services/elevenlabs/tts.py +28 -4
- pipecat/services/google/llm.py +26 -11
- pipecat/services/openai/base_llm.py +79 -14
- pipecat/services/salesforce/llm.py +321 -86
- pipecat/services/sarvam/tts.py +0 -1
- pipecat/services/soniox/stt.py +45 -10
- pipecat/services/vistaar/llm.py +97 -6
- pipecat/transcriptions/language.py +50 -0
- pipecat/transports/base_input.py +15 -11
- pipecat/transports/base_output.py +29 -3
- pipecat/utils/redis.py +58 -0
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/top_level.txt +0 -0
|
@@ -8,12 +8,12 @@
|
|
|
8
8
|
|
|
9
9
|
import asyncio
|
|
10
10
|
import json
|
|
11
|
-
import os
|
|
12
11
|
import time
|
|
13
|
-
from typing import Any, Dict, List, Optional, AsyncGenerator
|
|
14
12
|
from dataclasses import dataclass
|
|
13
|
+
from typing import AsyncGenerator, Dict, Optional
|
|
15
14
|
|
|
16
15
|
import httpx
|
|
16
|
+
from env_config import api_config
|
|
17
17
|
from loguru import logger
|
|
18
18
|
|
|
19
19
|
from pipecat.frames.frames import (
|
|
@@ -24,6 +24,10 @@ from pipecat.frames.frames import (
|
|
|
24
24
|
LLMTextFrame,
|
|
25
25
|
LLMUpdateSettingsFrame,
|
|
26
26
|
)
|
|
27
|
+
from pipecat.processors.aggregators.llm_response import (
|
|
28
|
+
LLMAssistantAggregatorParams,
|
|
29
|
+
LLMUserAggregatorParams,
|
|
30
|
+
)
|
|
27
31
|
from pipecat.processors.aggregators.openai_llm_context import (
|
|
28
32
|
OpenAILLMContext,
|
|
29
33
|
OpenAILLMContextFrame,
|
|
@@ -35,11 +39,7 @@ from pipecat.services.openai.llm import (
|
|
|
35
39
|
OpenAIContextAggregatorPair,
|
|
36
40
|
OpenAIUserContextAggregator,
|
|
37
41
|
)
|
|
38
|
-
from pipecat.
|
|
39
|
-
LLMAssistantAggregatorParams,
|
|
40
|
-
LLMUserAggregatorParams,
|
|
41
|
-
)
|
|
42
|
-
from env_config import api_config
|
|
42
|
+
from pipecat.utils.redis import create_async_redis_client
|
|
43
43
|
|
|
44
44
|
|
|
45
45
|
@dataclass
|
|
@@ -56,8 +56,8 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
56
56
|
"""Salesforce Agent API LLM service implementation.
|
|
57
57
|
|
|
58
58
|
This service integrates with Salesforce Agent API to provide conversational
|
|
59
|
-
|
|
60
|
-
|
|
59
|
+
|
|
60
|
+
AI capabilities using Salesforce's Agentforce platform.
|
|
61
61
|
"""
|
|
62
62
|
|
|
63
63
|
def __init__(
|
|
@@ -65,38 +65,47 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
65
65
|
*,
|
|
66
66
|
model: str = "salesforce-agent",
|
|
67
67
|
session_timeout_secs: float = 3600.0,
|
|
68
|
+
agent_id: str = api_config.SALESFORCE_AGENT_ID,
|
|
69
|
+
org_domain: str = api_config.SALESFORCE_ORG_DOMAIN,
|
|
70
|
+
client_id: str = api_config.SALESFORCE_CLIENT_ID,
|
|
71
|
+
client_secret: str = api_config.SALESFORCE_CLIENT_SECRET,
|
|
72
|
+
api_host: str = api_config.SALESFORCE_API_HOST,
|
|
73
|
+
redis_url: Optional[str] = None,
|
|
68
74
|
**kwargs,
|
|
69
75
|
):
|
|
70
76
|
"""Initialize Salesforce Agent LLM service.
|
|
71
77
|
|
|
72
78
|
Reads configuration from environment variables:
|
|
73
79
|
- SALESFORCE_AGENT_ID: The Salesforce agent ID to interact with
|
|
74
|
-
- SALESFORCE_API_KEY: OAuth access token (optional, will use client credentials if not provided)
|
|
75
80
|
- SALESFORCE_ORG_DOMAIN: Salesforce org domain (e.g., https://myorg.my.salesforce.com)
|
|
76
81
|
- SALESFORCE_CLIENT_ID: Connected app client ID for OAuth
|
|
77
82
|
- SALESFORCE_CLIENT_SECRET: Connected app client secret for OAuth
|
|
78
|
-
- SALESFORCE_API_HOST: Salesforce API host (
|
|
83
|
+
- SALESFORCE_API_HOST: Salesforce API host base URL (e.g., https://api.salesforce.com)
|
|
79
84
|
|
|
80
85
|
Args:
|
|
81
86
|
model: The model name (defaults to "salesforce-agent").
|
|
82
87
|
session_timeout_secs: Session timeout in seconds (default: 1 hour).
|
|
88
|
+
agent_id: Salesforce agent ID. Defaults to SALESFORCE_AGENT_ID.
|
|
89
|
+
org_domain: Salesforce org domain. Defaults to SALESFORCE_ORG_DOMAIN.
|
|
90
|
+
client_id: Salesforce connected app client ID. Defaults to SALESFORCE_CLIENT_ID.
|
|
91
|
+
client_secret: Salesforce connected app client secret. Defaults to SALESFORCE_CLIENT_SECRET.
|
|
92
|
+
api_host: Salesforce API host base URL. Defaults to SALESFORCE_API_HOST.
|
|
93
|
+
redis_url: Optional Redis URL override for token caching.
|
|
83
94
|
**kwargs: Additional arguments passed to parent LLMService.
|
|
84
95
|
"""
|
|
85
96
|
# Initialize parent LLM service
|
|
86
97
|
super().__init__(**kwargs)
|
|
87
|
-
|
|
88
|
-
self.
|
|
89
|
-
self.
|
|
90
|
-
self.
|
|
91
|
-
self.
|
|
92
|
-
self._client_secret = api_config.SALESFORCE_CLIENT_SECRET
|
|
93
|
-
self._api_host = api_config.SALESFORCE_API_HOST
|
|
94
|
-
|
|
98
|
+
self._agent_id = agent_id
|
|
99
|
+
self._org_domain = org_domain
|
|
100
|
+
self._client_id = client_id
|
|
101
|
+
self._client_secret = client_secret
|
|
102
|
+
self._api_host = api_host
|
|
95
103
|
|
|
96
104
|
# Validate required environment variables
|
|
97
105
|
required_vars = {
|
|
98
106
|
"SALESFORCE_AGENT_ID": self._agent_id,
|
|
99
107
|
"SALESFORCE_ORG_DOMAIN": self._org_domain,
|
|
108
|
+
"SALESFORCE_API_HOST": self._api_host,
|
|
100
109
|
"SALESFORCE_CLIENT_ID": self._client_id,
|
|
101
110
|
"SALESFORCE_CLIENT_SECRET": self._client_secret,
|
|
102
111
|
}
|
|
@@ -109,31 +118,165 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
109
118
|
|
|
110
119
|
self._session_timeout_secs = session_timeout_secs
|
|
111
120
|
|
|
121
|
+
if redis_url is not None:
|
|
122
|
+
self._redis_url = redis_url
|
|
123
|
+
else:
|
|
124
|
+
self._redis_url = getattr(api_config, "REDIS_URL", None)
|
|
125
|
+
self._redis_client = None
|
|
126
|
+
self._redis_client_init_attempted = False
|
|
127
|
+
self._token_cache_key = f"salesforce_agent_access_token:{self._agent_id}"
|
|
128
|
+
self._token_cache_leeway_secs = 300
|
|
129
|
+
self._sequence_counter = 0
|
|
130
|
+
self._warmup_task: Optional[asyncio.Task] = None
|
|
131
|
+
|
|
112
132
|
# Session management
|
|
113
133
|
self._sessions: Dict[str, SalesforceSessionInfo] = {}
|
|
114
134
|
self._current_session_id: Optional[str] = None
|
|
115
135
|
|
|
116
136
|
# HTTP client for API calls
|
|
117
|
-
self._http_client = httpx.AsyncClient(
|
|
118
|
-
|
|
137
|
+
self._http_client = httpx.AsyncClient(
|
|
138
|
+
timeout=30.0,
|
|
139
|
+
limits=httpx.Limits(
|
|
140
|
+
max_keepalive_connections=10,
|
|
141
|
+
max_connections=100,
|
|
142
|
+
keepalive_expiry=None,
|
|
143
|
+
),
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
self._schedule_session_warmup()
|
|
119
147
|
|
|
120
148
|
async def __aenter__(self):
|
|
121
149
|
"""Async context manager entry."""
|
|
150
|
+
await self.ensure_session_ready()
|
|
122
151
|
return self
|
|
123
152
|
|
|
124
153
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
125
154
|
"""Async context manager exit."""
|
|
155
|
+
if self._warmup_task:
|
|
156
|
+
try:
|
|
157
|
+
await asyncio.shield(self._warmup_task)
|
|
158
|
+
except Exception as exc: # pragma: no cover - warmup best effort
|
|
159
|
+
logger.debug(f"Salesforce warmup task failed during exit: {exc}")
|
|
160
|
+
finally:
|
|
161
|
+
self._warmup_task = None
|
|
162
|
+
|
|
126
163
|
await self._cleanup_sessions()
|
|
127
164
|
await self._http_client.aclose()
|
|
128
165
|
|
|
166
|
+
if self._redis_client:
|
|
167
|
+
close_coro = getattr(self._redis_client, "close", None)
|
|
168
|
+
if callable(close_coro):
|
|
169
|
+
try:
|
|
170
|
+
await close_coro()
|
|
171
|
+
except Exception as exc: # pragma: no cover - best effort cleanup
|
|
172
|
+
logger.debug(f"Failed to close Redis client cleanly: {exc}")
|
|
173
|
+
self._redis_client = None
|
|
174
|
+
self._redis_client_init_attempted = False
|
|
175
|
+
|
|
129
176
|
def can_generate_metrics(self) -> bool:
|
|
130
177
|
"""Check if this service can generate processing metrics."""
|
|
131
178
|
return True
|
|
132
179
|
|
|
133
|
-
|
|
134
|
-
"""
|
|
135
|
-
|
|
136
|
-
|
|
180
|
+
def _schedule_session_warmup(self):
|
|
181
|
+
"""Kick off background warm-up if an event loop is running."""
|
|
182
|
+
try:
|
|
183
|
+
loop = asyncio.get_running_loop()
|
|
184
|
+
except RuntimeError:
|
|
185
|
+
return
|
|
186
|
+
|
|
187
|
+
if loop.is_closed():
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
async def _warmup():
|
|
191
|
+
try:
|
|
192
|
+
await self.ensure_session_ready()
|
|
193
|
+
except Exception as exc: # pragma: no cover - warmup best effort
|
|
194
|
+
logger.warning(f"Salesforce warmup failed: {exc}")
|
|
195
|
+
raise
|
|
196
|
+
|
|
197
|
+
task = loop.create_task(_warmup())
|
|
198
|
+
|
|
199
|
+
def _on_done(warmup_task: asyncio.Task):
|
|
200
|
+
if warmup_task.cancelled():
|
|
201
|
+
logger.debug("Salesforce warmup task cancelled")
|
|
202
|
+
elif warmup_task.exception():
|
|
203
|
+
logger.warning(f"Salesforce warmup task error: {warmup_task.exception()}")
|
|
204
|
+
self._warmup_task = None
|
|
205
|
+
|
|
206
|
+
task.add_done_callback(_on_done)
|
|
207
|
+
self._warmup_task = task
|
|
208
|
+
|
|
209
|
+
def _get_redis_client(self):
|
|
210
|
+
"""Return a Redis client for token caching if configured."""
|
|
211
|
+
if self._redis_client is None and not self._redis_client_init_attempted:
|
|
212
|
+
self._redis_client_init_attempted = True
|
|
213
|
+
self._redis_client = create_async_redis_client(
|
|
214
|
+
self._redis_url, decode_responses=True, encoding="utf-8", logger=logger
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return self._redis_client
|
|
218
|
+
|
|
219
|
+
async def _get_cached_access_token(self) -> Optional[str]:
|
|
220
|
+
"""Return cached access token from Redis."""
|
|
221
|
+
redis_client = self._get_redis_client()
|
|
222
|
+
if not redis_client:
|
|
223
|
+
return None
|
|
224
|
+
|
|
225
|
+
try:
|
|
226
|
+
return await redis_client.get(self._token_cache_key)
|
|
227
|
+
except Exception as exc: # pragma: no cover - cache failures shouldn't break flow
|
|
228
|
+
logger.warning(f"Failed to read Salesforce token from Redis: {exc}")
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
async def _set_cached_access_token(self, token: str, expires_in: Optional[int]):
|
|
232
|
+
"""Persist access token in Redis with TTL matching Salesforce expiry."""
|
|
233
|
+
redis_client = self._get_redis_client()
|
|
234
|
+
if not redis_client:
|
|
235
|
+
return
|
|
236
|
+
|
|
237
|
+
ttl_seconds = 3600 # Default fallback
|
|
238
|
+
|
|
239
|
+
# Try to get expiration from expires_in parameter first
|
|
240
|
+
if expires_in is not None:
|
|
241
|
+
try:
|
|
242
|
+
ttl_seconds = max(int(expires_in) - self._token_cache_leeway_secs, 30)
|
|
243
|
+
logger.debug(f"Using expires_in parameter: {expires_in}s, TTL: {ttl_seconds}s")
|
|
244
|
+
except (TypeError, ValueError):
|
|
245
|
+
logger.debug("Unable to parse expires_in parameter")
|
|
246
|
+
expires_in = None
|
|
247
|
+
|
|
248
|
+
# If no expires_in available, use default TTL
|
|
249
|
+
if expires_in is None:
|
|
250
|
+
logger.debug("No expiration info found, using default TTL")
|
|
251
|
+
|
|
252
|
+
try:
|
|
253
|
+
await redis_client.set(self._token_cache_key, token, ex=ttl_seconds)
|
|
254
|
+
logger.debug(f"Cached Salesforce token with TTL: {ttl_seconds}s")
|
|
255
|
+
except Exception as exc: # pragma: no cover - cache failures shouldn't break flow
|
|
256
|
+
logger.warning(f"Failed to store Salesforce token in Redis: {exc}")
|
|
257
|
+
|
|
258
|
+
async def _clear_cached_access_token(self):
|
|
259
|
+
"""Clear cached access token from Redis."""
|
|
260
|
+
redis_client = self._get_redis_client()
|
|
261
|
+
if not redis_client:
|
|
262
|
+
return
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
await redis_client.delete(self._token_cache_key)
|
|
266
|
+
logger.debug("Cleared cached Salesforce access token")
|
|
267
|
+
except Exception as exc: # pragma: no cover - cache failures shouldn't break flow
|
|
268
|
+
logger.warning(f"Failed to clear Salesforce token from Redis: {exc}")
|
|
269
|
+
|
|
270
|
+
async def _get_access_token(self, *, force_refresh: bool = False) -> str:
|
|
271
|
+
"""Get OAuth access token using client credentials.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
force_refresh: If True, skip cache and fetch fresh token from Salesforce.
|
|
275
|
+
"""
|
|
276
|
+
if not force_refresh:
|
|
277
|
+
cached_token = await self._get_cached_access_token()
|
|
278
|
+
if cached_token:
|
|
279
|
+
return cached_token
|
|
137
280
|
|
|
138
281
|
token_url = f"{self._org_domain}/services/oauth2/token"
|
|
139
282
|
data = {
|
|
@@ -146,21 +289,62 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
146
289
|
response = await self._http_client.post(token_url, data=data)
|
|
147
290
|
response.raise_for_status()
|
|
148
291
|
token_data = response.json()
|
|
149
|
-
|
|
292
|
+
access_token = token_data["access_token"]
|
|
293
|
+
await self._set_cached_access_token(access_token, token_data.get("expires_in"))
|
|
294
|
+
logger.debug("Retrieved fresh Salesforce access token")
|
|
295
|
+
return access_token
|
|
150
296
|
except Exception as e:
|
|
151
297
|
logger.error(f"Failed to get access token: {e}")
|
|
152
298
|
raise
|
|
153
299
|
|
|
300
|
+
async def _make_authenticated_request(self, method: str, url: str, **kwargs):
|
|
301
|
+
"""Make an authenticated HTTP request with automatic token refresh on auth errors.
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
method: HTTP method (GET, POST, DELETE, etc.)
|
|
305
|
+
url: Request URL
|
|
306
|
+
**kwargs: Additional arguments passed to httpx request
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
httpx.Response: The HTTP response
|
|
310
|
+
|
|
311
|
+
Raises:
|
|
312
|
+
Exception: If request fails after token refresh attempt
|
|
313
|
+
"""
|
|
314
|
+
# First attempt with current token
|
|
315
|
+
access_token = await self._get_access_token()
|
|
316
|
+
headers = kwargs.get("headers", {})
|
|
317
|
+
headers["Authorization"] = f"Bearer {access_token}"
|
|
318
|
+
kwargs["headers"] = headers
|
|
319
|
+
|
|
320
|
+
try:
|
|
321
|
+
response = await self._http_client.request(method, url, **kwargs)
|
|
322
|
+
response.raise_for_status()
|
|
323
|
+
return response
|
|
324
|
+
except httpx.HTTPStatusError as e:
|
|
325
|
+
# If authentication error, clear cache and retry with fresh token
|
|
326
|
+
if e.response.status_code in (401, 403):
|
|
327
|
+
logger.warning(
|
|
328
|
+
f"Salesforce authentication error ({e.response.status_code}), refreshing token"
|
|
329
|
+
)
|
|
330
|
+
await self._clear_cached_access_token()
|
|
331
|
+
|
|
332
|
+
# Retry with fresh token
|
|
333
|
+
fresh_token = await self._get_access_token(force_refresh=True)
|
|
334
|
+
headers["Authorization"] = f"Bearer {fresh_token}"
|
|
335
|
+
kwargs["headers"] = headers
|
|
336
|
+
|
|
337
|
+
response = await self._http_client.request(method, url, **kwargs)
|
|
338
|
+
response.raise_for_status()
|
|
339
|
+
return response
|
|
340
|
+
else:
|
|
341
|
+
# Re-raise non-auth errors
|
|
342
|
+
raise
|
|
343
|
+
|
|
154
344
|
async def _create_session(self) -> str:
|
|
155
345
|
"""Create a new Salesforce Agent session."""
|
|
156
|
-
access_token = await self._get_access_token()
|
|
157
346
|
session_url = f"{self._api_host}/einstein/ai-agent/v1/agents/{self._agent_id}/sessions"
|
|
158
347
|
|
|
159
|
-
headers = {
|
|
160
|
-
"Authorization": f"Bearer {access_token}",
|
|
161
|
-
"Content-Type": "application/json",
|
|
162
|
-
}
|
|
163
|
-
|
|
164
348
|
external_session_key = f"pipecat-{int(time.time())}-{id(self)}"
|
|
165
349
|
|
|
166
350
|
payload = {
|
|
@@ -174,8 +358,9 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
174
358
|
}
|
|
175
359
|
|
|
176
360
|
try:
|
|
177
|
-
response = await self.
|
|
178
|
-
|
|
361
|
+
response = await self._make_authenticated_request(
|
|
362
|
+
"POST", session_url, headers={"Content-Type": "application/json"}, json=payload
|
|
363
|
+
)
|
|
179
364
|
session_data = response.json()
|
|
180
365
|
session_id = session_data["sessionId"]
|
|
181
366
|
|
|
@@ -207,13 +392,17 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
207
392
|
return self._current_session_id
|
|
208
393
|
else:
|
|
209
394
|
# Session expired, remove it
|
|
210
|
-
|
|
395
|
+
self._sessions.pop(self._current_session_id, None)
|
|
211
396
|
self._current_session_id = None
|
|
212
397
|
|
|
213
398
|
# Create new session
|
|
214
399
|
self._current_session_id = await self._create_session()
|
|
215
400
|
return self._current_session_id
|
|
216
401
|
|
|
402
|
+
async def ensure_session_ready(self) -> str:
|
|
403
|
+
"""Ensure a Salesforce session is ready for use."""
|
|
404
|
+
return await self._get_or_create_session()
|
|
405
|
+
|
|
217
406
|
async def _cleanup_sessions(self):
|
|
218
407
|
"""Clean up expired sessions."""
|
|
219
408
|
current_time = time.time()
|
|
@@ -226,85 +415,82 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
226
415
|
for session_id in expired_sessions:
|
|
227
416
|
try:
|
|
228
417
|
# End the session via API
|
|
229
|
-
access_token = await self._get_access_token()
|
|
230
418
|
url = f"{self._api_host}/einstein/ai-agent/v1/sessions/{session_id}"
|
|
231
|
-
|
|
232
|
-
"
|
|
233
|
-
|
|
234
|
-
}
|
|
235
|
-
await self._http_client.delete(url, headers=headers)
|
|
419
|
+
await self._make_authenticated_request(
|
|
420
|
+
"DELETE", url, headers={"x-session-end-reason": "UserRequest"}
|
|
421
|
+
)
|
|
236
422
|
except Exception as e:
|
|
237
423
|
logger.warning(f"Failed to end session {session_id}: {e}")
|
|
238
424
|
finally:
|
|
239
|
-
|
|
425
|
+
self._sessions.pop(session_id, None)
|
|
240
426
|
if self._current_session_id == session_id:
|
|
241
427
|
self._current_session_id = None
|
|
242
428
|
|
|
243
429
|
def _extract_user_message(self, context: OpenAILLMContext) -> str:
|
|
244
430
|
"""Extract the last user message from context.
|
|
245
|
-
|
|
431
|
+
|
|
246
432
|
Similar to Vistaar pattern - extract only the most recent user message.
|
|
247
|
-
|
|
433
|
+
|
|
248
434
|
Args:
|
|
249
435
|
context: The OpenAI LLM context containing messages.
|
|
250
|
-
|
|
436
|
+
|
|
251
437
|
Returns:
|
|
252
438
|
The last user message as a string.
|
|
253
439
|
"""
|
|
254
440
|
messages = context.get_messages()
|
|
255
|
-
|
|
441
|
+
|
|
256
442
|
# Find the last user message (iterate in reverse for efficiency)
|
|
257
443
|
for message in reversed(messages):
|
|
258
444
|
if message.get("role") == "user":
|
|
259
445
|
content = message.get("content", "")
|
|
260
|
-
|
|
446
|
+
|
|
261
447
|
# Handle content that might be a list (for multimodal messages)
|
|
262
448
|
if isinstance(content, list):
|
|
263
449
|
text_parts = [
|
|
264
450
|
item.get("text", "") for item in content if item.get("type") == "text"
|
|
265
451
|
]
|
|
266
452
|
content = " ".join(text_parts)
|
|
267
|
-
|
|
453
|
+
|
|
268
454
|
if isinstance(content, str):
|
|
269
455
|
return content.strip()
|
|
270
|
-
|
|
456
|
+
|
|
271
457
|
return ""
|
|
272
458
|
|
|
273
459
|
def _generate_sequence_id(self) -> int:
|
|
274
460
|
"""Generate a sequence ID for the message."""
|
|
275
|
-
|
|
461
|
+
self._sequence_counter += 1
|
|
462
|
+
return self._sequence_counter
|
|
276
463
|
|
|
277
|
-
async def _stream_salesforce_response(
|
|
464
|
+
async def _stream_salesforce_response(
|
|
465
|
+
self, session_id: str, user_message: str
|
|
466
|
+
) -> AsyncGenerator[str, None]:
|
|
278
467
|
"""Stream response from Salesforce Agent API."""
|
|
279
|
-
access_token = await self._get_access_token()
|
|
280
468
|
url = f"{self._api_host}/einstein/ai-agent/v1/sessions/{session_id}/messages/stream"
|
|
281
|
-
|
|
282
|
-
headers = {
|
|
283
|
-
"Authorization": f"Bearer {access_token}",
|
|
284
|
-
"Content-Type": "application/json",
|
|
285
|
-
"Accept": "text/event-stream",
|
|
286
|
-
}
|
|
287
469
|
|
|
288
470
|
message_data = {
|
|
289
471
|
"message": {
|
|
290
472
|
"sequenceId": self._generate_sequence_id(),
|
|
291
473
|
"type": "Text",
|
|
292
|
-
"text": user_message
|
|
474
|
+
"text": user_message,
|
|
293
475
|
},
|
|
294
|
-
"variables": [
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
476
|
+
"variables": [{"name": "$Context.EndUserLanguage", "type": "Text", "value": "en_US"}],
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
# First attempt with current token
|
|
480
|
+
access_token = await self._get_access_token()
|
|
481
|
+
headers = {
|
|
482
|
+
"Authorization": f"Bearer {access_token}",
|
|
483
|
+
"Content-Type": "application/json",
|
|
484
|
+
"Accept": "text/event-stream",
|
|
301
485
|
}
|
|
302
486
|
|
|
303
487
|
try:
|
|
304
488
|
logger.info(f"🌐 Salesforce API request: {user_message[:50]}...")
|
|
305
|
-
async with self._http_client.stream(
|
|
489
|
+
async with self._http_client.stream(
|
|
490
|
+
"POST", url, headers=headers, json=message_data
|
|
491
|
+
) as response:
|
|
306
492
|
response.raise_for_status()
|
|
307
|
-
|
|
493
|
+
|
|
308
494
|
async for line in response.aiter_lines():
|
|
309
495
|
if not line:
|
|
310
496
|
continue
|
|
@@ -331,53 +517,101 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
331
517
|
logger.warning(f"JSON decode error: {e}, line: {line}")
|
|
332
518
|
continue
|
|
333
519
|
|
|
520
|
+
except httpx.HTTPStatusError as e:
|
|
521
|
+
# If authentication error, retry with fresh token
|
|
522
|
+
if e.response.status_code in (401, 403):
|
|
523
|
+
logger.warning(
|
|
524
|
+
f"Salesforce streaming authentication error ({e.response.status_code}), refreshing token"
|
|
525
|
+
)
|
|
526
|
+
await self._clear_cached_access_token()
|
|
527
|
+
|
|
528
|
+
# Retry with fresh token
|
|
529
|
+
fresh_token = await self._get_access_token(force_refresh=True)
|
|
530
|
+
headers["Authorization"] = f"Bearer {fresh_token}"
|
|
531
|
+
|
|
532
|
+
logger.info(
|
|
533
|
+
f"🔄 Retrying Salesforce stream with fresh token: {user_message[:50]}..."
|
|
534
|
+
)
|
|
535
|
+
async with self._http_client.stream(
|
|
536
|
+
"POST", url, headers=headers, json=message_data
|
|
537
|
+
) as response:
|
|
538
|
+
response.raise_for_status()
|
|
539
|
+
|
|
540
|
+
async for line in response.aiter_lines():
|
|
541
|
+
if not line:
|
|
542
|
+
continue
|
|
543
|
+
|
|
544
|
+
# Parse SSE format
|
|
545
|
+
if line.startswith("data: "):
|
|
546
|
+
try:
|
|
547
|
+
data = json.loads(line[6:])
|
|
548
|
+
message = data.get("message", {})
|
|
549
|
+
message_type = message.get("type")
|
|
550
|
+
|
|
551
|
+
if message_type == "TextChunk":
|
|
552
|
+
content = message.get("text", "") or message.get("message", "")
|
|
553
|
+
if content:
|
|
554
|
+
yield content
|
|
555
|
+
elif message_type == "EndOfTurn":
|
|
556
|
+
logger.info("🏁 Salesforce response complete")
|
|
557
|
+
break
|
|
558
|
+
elif message_type == "Inform":
|
|
559
|
+
# Skip INFORM events to avoid duplication
|
|
560
|
+
continue
|
|
561
|
+
|
|
562
|
+
except json.JSONDecodeError as e:
|
|
563
|
+
logger.warning(f"JSON decode error: {e}, line: {line}")
|
|
564
|
+
continue
|
|
565
|
+
else:
|
|
566
|
+
# Re-raise non-auth errors
|
|
567
|
+
logger.error(f"Failed to stream from Salesforce Agent API: {e}")
|
|
568
|
+
raise
|
|
334
569
|
except Exception as e:
|
|
335
570
|
logger.error(f"Failed to stream from Salesforce Agent API: {e}")
|
|
336
571
|
raise
|
|
337
572
|
|
|
338
573
|
async def _process_context(self, context: OpenAILLMContext):
|
|
339
574
|
"""Process the LLM context and generate streaming response.
|
|
340
|
-
|
|
341
|
-
Following Vistaar pattern for simple, direct processing.
|
|
342
575
|
|
|
343
576
|
Args:
|
|
344
577
|
context: The OpenAI LLM context containing messages to process.
|
|
345
578
|
"""
|
|
346
579
|
logger.info(f"🔄 Salesforce processing context with {len(context.get_messages())} messages")
|
|
347
|
-
|
|
580
|
+
|
|
348
581
|
# Extract user message from context first
|
|
349
582
|
user_message = self._extract_user_message(context)
|
|
350
|
-
|
|
583
|
+
|
|
351
584
|
if not user_message:
|
|
352
585
|
logger.warning("Salesforce: No user message found in context")
|
|
353
586
|
return
|
|
354
|
-
|
|
587
|
+
|
|
355
588
|
try:
|
|
356
589
|
logger.info(f"🎯 Salesforce extracted query: {user_message}")
|
|
357
|
-
|
|
358
|
-
# Start response
|
|
590
|
+
|
|
591
|
+
# Start response
|
|
359
592
|
await self.push_frame(LLMFullResponseStartFrame())
|
|
360
|
-
await self.push_frame(LLMFullResponseStartFrame(),FrameDirection.UPSTREAM)
|
|
593
|
+
await self.push_frame(LLMFullResponseStartFrame(), FrameDirection.UPSTREAM)
|
|
361
594
|
await self.start_processing_metrics()
|
|
362
595
|
await self.start_ttfb_metrics()
|
|
363
|
-
|
|
596
|
+
|
|
364
597
|
# Get or create session
|
|
365
598
|
session_id = await self._get_or_create_session()
|
|
366
|
-
|
|
599
|
+
|
|
367
600
|
first_chunk = True
|
|
368
|
-
|
|
601
|
+
|
|
369
602
|
# Stream the response
|
|
370
603
|
async for text_chunk in self._stream_salesforce_response(session_id, user_message):
|
|
371
604
|
if first_chunk:
|
|
372
605
|
await self.stop_ttfb_metrics()
|
|
373
606
|
first_chunk = False
|
|
374
|
-
|
|
607
|
+
|
|
375
608
|
# Push each text chunk as it arrives
|
|
376
609
|
await self.push_frame(LLMTextFrame(text=text_chunk))
|
|
377
|
-
|
|
610
|
+
|
|
378
611
|
except Exception as e:
|
|
379
612
|
logger.error(f"Salesforce context processing error: {type(e).__name__}: {str(e)}")
|
|
380
613
|
import traceback
|
|
614
|
+
|
|
381
615
|
logger.error(f"Salesforce traceback: {traceback.format_exc()}")
|
|
382
616
|
raise
|
|
383
617
|
finally:
|
|
@@ -387,8 +621,6 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
387
621
|
|
|
388
622
|
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
389
623
|
"""Process frames for LLM completion requests.
|
|
390
|
-
|
|
391
|
-
Following the exact Vistaar pattern - call super() for non-context frames only.
|
|
392
624
|
|
|
393
625
|
Args:
|
|
394
626
|
frame: The frame to process.
|
|
@@ -397,7 +629,9 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
397
629
|
context = None
|
|
398
630
|
if isinstance(frame, OpenAILLMContextFrame):
|
|
399
631
|
context = frame.context
|
|
400
|
-
logger.info(
|
|
632
|
+
logger.info(
|
|
633
|
+
f"🔍 Received OpenAILLMContextFrame with {len(context.get_messages())} messages"
|
|
634
|
+
)
|
|
401
635
|
elif isinstance(frame, LLMMessagesFrame):
|
|
402
636
|
context = OpenAILLMContext.from_messages(frame.messages)
|
|
403
637
|
logger.info(f"🔍 Received LLMMessagesFrame with {len(frame.messages)} messages")
|
|
@@ -431,7 +665,7 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
431
665
|
"""Create context aggregators for Salesforce LLM.
|
|
432
666
|
|
|
433
667
|
Since Salesforce uses OpenAI-compatible message format, we reuse OpenAI's
|
|
434
|
-
context aggregators directly
|
|
668
|
+
context aggregators directly
|
|
435
669
|
|
|
436
670
|
Args:
|
|
437
671
|
context: The LLM context to create aggregators for.
|
|
@@ -450,6 +684,7 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
450
684
|
def get_llm_adapter(self):
|
|
451
685
|
"""Get the LLM adapter for this service."""
|
|
452
686
|
from pipecat.adapters.services.open_ai_adapter import OpenAILLMAdapter
|
|
687
|
+
|
|
453
688
|
return OpenAILLMAdapter()
|
|
454
689
|
|
|
455
690
|
async def close(self):
|
|
@@ -462,4 +697,4 @@ class SalesforceAgentLLMService(LLMService):
|
|
|
462
697
|
try:
|
|
463
698
|
asyncio.create_task(self._http_client.aclose())
|
|
464
699
|
except:
|
|
465
|
-
pass
|
|
700
|
+
pass
|