dv-pipecat-ai 0.0.85.dev698__py3-none-any.whl → 0.0.85.dev699__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.85.dev698
3
+ Version: 0.0.85.dev699
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.85.dev698.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.85.dev699.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -317,6 +317,8 @@ pipecat/services/rime/tts.py,sha256=ZCuwPSPxR-pC59WH8T53z5PYXNBzT9GFX_79F3s0gaE,
317
317
  pipecat/services/riva/__init__.py,sha256=rObSsj504O_TMXhPBg_ymqKslZBhovlR-A0aaRZ0O6A,276
318
318
  pipecat/services/riva/stt.py,sha256=dtg8toijmexWB3uipw0EQ7ov3DFgHj40kFFv1Zadmmc,25116
319
319
  pipecat/services/riva/tts.py,sha256=idbqx3I2NlWCXtrIFsjEaYapxA3BLIA14ai3aMBh-2w,8158
320
+ pipecat/services/salesforce/__init__.py,sha256=OFvYbcvCadYhcKdBAVLj3ZUXVXQ1HyVyhgxIFf6_Thg,173
321
+ pipecat/services/salesforce/llm.py,sha256=i4bHB7IE61ahDCZJxbO4jqOI3AdA13m1ufhEBuPWqTk,18065
320
322
  pipecat/services/sambanova/__init__.py,sha256=oTXExLic-qTcsfsiWmssf3Elclf3IIWoN41_2IpoF18,128
321
323
  pipecat/services/sambanova/llm.py,sha256=5XVfPLEk__W8ykFqLdV95ZUhlGGkAaJwmbciLdZYtTc,8976
322
324
  pipecat/services/sambanova/stt.py,sha256=ZZgEZ7WQjLFHbCko-3LNTtVajjtfUvbtVLtFcaNadVQ,2536
@@ -412,7 +414,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=fwzxFpi8DJl6BJbK74G0UEB4ccMJg
412
414
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
413
415
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
414
416
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
415
- dv_pipecat_ai-0.0.85.dev698.dist-info/METADATA,sha256=l9LjPnWF8C-VB0NHPuXS-VY36hK2kYPGNmaH8R-c6p4,31861
416
- dv_pipecat_ai-0.0.85.dev698.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
417
- dv_pipecat_ai-0.0.85.dev698.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
418
- dv_pipecat_ai-0.0.85.dev698.dist-info/RECORD,,
417
+ dv_pipecat_ai-0.0.85.dev699.dist-info/METADATA,sha256=VOWNiDVVwydwqaxJJ8XSaX9NIppL69_AQK6GuR8A0uI,31861
418
+ dv_pipecat_ai-0.0.85.dev699.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
419
+ dv_pipecat_ai-0.0.85.dev699.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
420
+ dv_pipecat_ai-0.0.85.dev699.dist-info/RECORD,,
@@ -0,0 +1,9 @@
1
+ #
2
+ # Copyright (c) 2024–2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
+
7
+ from .llm import SalesforceAgentLLMService
8
+
9
+ __all__ = ["SalesforceAgentLLMService"]
@@ -0,0 +1,465 @@
1
+ #
2
+ # Copyright (c) 2024–2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
+
7
+ """Salesforce Agent API LLM service implementation."""
8
+
9
+ import asyncio
10
+ import json
11
+ import os
12
+ import time
13
+ from typing import Any, Dict, List, Optional, AsyncGenerator
14
+ from dataclasses import dataclass
15
+
16
+ import httpx
17
+ from loguru import logger
18
+
19
+ from pipecat.frames.frames import (
20
+ Frame,
21
+ LLMFullResponseEndFrame,
22
+ LLMFullResponseStartFrame,
23
+ LLMMessagesFrame,
24
+ LLMTextFrame,
25
+ LLMUpdateSettingsFrame,
26
+ )
27
+ from pipecat.processors.aggregators.openai_llm_context import (
28
+ OpenAILLMContext,
29
+ OpenAILLMContextFrame,
30
+ )
31
+ from pipecat.processors.frame_processor import FrameDirection
32
+ from pipecat.services.llm_service import LLMService
33
+ from pipecat.services.openai.llm import (
34
+ OpenAIAssistantContextAggregator,
35
+ OpenAIContextAggregatorPair,
36
+ OpenAIUserContextAggregator,
37
+ )
38
+ from pipecat.processors.aggregators.llm_response import (
39
+ LLMAssistantAggregatorParams,
40
+ LLMUserAggregatorParams,
41
+ )
42
+ from env_config import api_config
43
+
44
+
45
+ @dataclass
46
+ class SalesforceSessionInfo:
47
+ """Information about an active Salesforce Agent session."""
48
+
49
+ session_id: str
50
+ agent_id: str
51
+ created_at: float
52
+ last_used: float
53
+
54
+
55
+ class SalesforceAgentLLMService(LLMService):
56
+ """Salesforce Agent API LLM service implementation.
57
+
58
+ This service integrates with Salesforce Agent API to provide conversational
59
+ AI capabilities using Salesforce's Agentforce platform. It follows the same
60
+ pattern as Vistaar LLM service for proper frame processing.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ *,
66
+ model: str = "salesforce-agent",
67
+ session_timeout_secs: float = 3600.0,
68
+ **kwargs,
69
+ ):
70
+ """Initialize Salesforce Agent LLM service.
71
+
72
+ Reads configuration from environment variables:
73
+ - SALESFORCE_AGENT_ID: The Salesforce agent ID to interact with
74
+ - SALESFORCE_API_KEY: OAuth access token (optional, will use client credentials if not provided)
75
+ - SALESFORCE_ORG_DOMAIN: Salesforce org domain (e.g., https://myorg.my.salesforce.com)
76
+ - SALESFORCE_CLIENT_ID: Connected app client ID for OAuth
77
+ - SALESFORCE_CLIENT_SECRET: Connected app client secret for OAuth
78
+ - SALESFORCE_API_HOST: Salesforce API host (default: https://api.salesforce.com)
79
+
80
+ Args:
81
+ model: The model name (defaults to "salesforce-agent").
82
+ session_timeout_secs: Session timeout in seconds (default: 1 hour).
83
+ **kwargs: Additional arguments passed to parent LLMService.
84
+ """
85
+ # Initialize parent LLM service
86
+ super().__init__(**kwargs)
87
+
88
+ self._agent_id = api_config.SALESFORCE_AGENT_ID
89
+ self._api_key = api_config.SALESFORCE_API_KEY
90
+ self._org_domain = api_config.SALESFORCE_ORG_DOMAIN
91
+ self._client_id = api_config.SALESFORCE_CLIENT_ID
92
+ self._client_secret = api_config.SALESFORCE_CLIENT_SECRET
93
+ self._api_host = api_config.SALESFORCE_API_HOST
94
+
95
+
96
+ # Validate required environment variables
97
+ required_vars = {
98
+ "SALESFORCE_AGENT_ID": self._agent_id,
99
+ "SALESFORCE_ORG_DOMAIN": self._org_domain,
100
+ "SALESFORCE_CLIENT_ID": self._client_id,
101
+ "SALESFORCE_CLIENT_SECRET": self._client_secret,
102
+ }
103
+
104
+ missing_vars = [var for var, value in required_vars.items() if not value]
105
+ if missing_vars:
106
+ raise ValueError(f"Missing required environment variables: {', '.join(missing_vars)}")
107
+
108
+ logger.info(f"Salesforce LLM initialized - Agent ID: {self._agent_id}")
109
+
110
+ self._session_timeout_secs = session_timeout_secs
111
+
112
+ # Session management
113
+ self._sessions: Dict[str, SalesforceSessionInfo] = {}
114
+ self._current_session_id: Optional[str] = None
115
+
116
+ # HTTP client for API calls
117
+ self._http_client = httpx.AsyncClient(timeout=30.0)
118
+
119
+
120
+ async def __aenter__(self):
121
+ """Async context manager entry."""
122
+ return self
123
+
124
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
125
+ """Async context manager exit."""
126
+ await self._cleanup_sessions()
127
+ await self._http_client.aclose()
128
+
129
+ def can_generate_metrics(self) -> bool:
130
+ """Check if this service can generate processing metrics."""
131
+ return True
132
+
133
+ async def _get_access_token(self) -> str:
134
+ """Get OAuth access token using client credentials."""
135
+ if self._api_key and not self._api_key.startswith("Bearer"):
136
+ return self._api_key
137
+
138
+ token_url = f"{self._org_domain}/services/oauth2/token"
139
+ data = {
140
+ "grant_type": "client_credentials",
141
+ "client_id": self._client_id,
142
+ "client_secret": self._client_secret,
143
+ }
144
+
145
+ try:
146
+ response = await self._http_client.post(token_url, data=data)
147
+ response.raise_for_status()
148
+ token_data = response.json()
149
+ return token_data["access_token"]
150
+ except Exception as e:
151
+ logger.error(f"Failed to get access token: {e}")
152
+ raise
153
+
154
+ async def _create_session(self) -> str:
155
+ """Create a new Salesforce Agent session."""
156
+ access_token = await self._get_access_token()
157
+ session_url = f"{self._api_host}/einstein/ai-agent/v1/agents/{self._agent_id}/sessions"
158
+
159
+ headers = {
160
+ "Authorization": f"Bearer {access_token}",
161
+ "Content-Type": "application/json",
162
+ }
163
+
164
+ external_session_key = f"pipecat-{int(time.time())}-{id(self)}"
165
+
166
+ payload = {
167
+ "externalSessionKey": external_session_key,
168
+ "instanceConfig": {"endpoint": self._org_domain},
169
+ "tz": "America/Los_Angeles",
170
+ "variables": [{"name": "$Context.EndUserLanguage", "type": "Text", "value": "en_US"}],
171
+ "featureSupport": "Streaming",
172
+ "streamingCapabilities": {"chunkTypes": ["Text"]},
173
+ "bypassUser": True,
174
+ }
175
+
176
+ try:
177
+ response = await self._http_client.post(session_url, headers=headers, json=payload)
178
+ response.raise_for_status()
179
+ session_data = response.json()
180
+ session_id = session_data["sessionId"]
181
+
182
+ # Store session info
183
+ current_time = time.time()
184
+ self._sessions[session_id] = SalesforceSessionInfo(
185
+ session_id=session_id,
186
+ agent_id=self._agent_id,
187
+ created_at=current_time,
188
+ last_used=current_time,
189
+ )
190
+
191
+ logger.debug(f"Created Salesforce Agent session: {session_id}")
192
+ return session_id
193
+
194
+ except Exception as e:
195
+ logger.error(f"Failed to create Salesforce Agent session: {e}")
196
+ raise
197
+
198
+ async def _get_or_create_session(self) -> str:
199
+ """Get existing session or create a new one."""
200
+ current_time = time.time()
201
+
202
+ # Check if current session is still valid
203
+ if self._current_session_id and self._current_session_id in self._sessions:
204
+ session = self._sessions[self._current_session_id]
205
+ if current_time - session.last_used < self._session_timeout_secs:
206
+ session.last_used = current_time
207
+ return self._current_session_id
208
+ else:
209
+ # Session expired, remove it
210
+ del self._sessions[self._current_session_id]
211
+ self._current_session_id = None
212
+
213
+ # Create new session
214
+ self._current_session_id = await self._create_session()
215
+ return self._current_session_id
216
+
217
+ async def _cleanup_sessions(self):
218
+ """Clean up expired sessions."""
219
+ current_time = time.time()
220
+ expired_sessions = []
221
+
222
+ for session_id, session in self._sessions.items():
223
+ if current_time - session.last_used > self._session_timeout_secs:
224
+ expired_sessions.append(session_id)
225
+
226
+ for session_id in expired_sessions:
227
+ try:
228
+ # End the session via API
229
+ access_token = await self._get_access_token()
230
+ url = f"{self._api_host}/einstein/ai-agent/v1/sessions/{session_id}"
231
+ headers = {
232
+ "Authorization": f"Bearer {access_token}",
233
+ "x-session-end-reason": "UserRequest",
234
+ }
235
+ await self._http_client.delete(url, headers=headers)
236
+ except Exception as e:
237
+ logger.warning(f"Failed to end session {session_id}: {e}")
238
+ finally:
239
+ del self._sessions[session_id]
240
+ if self._current_session_id == session_id:
241
+ self._current_session_id = None
242
+
243
+ def _extract_user_message(self, context: OpenAILLMContext) -> str:
244
+ """Extract the last user message from context.
245
+
246
+ Similar to Vistaar pattern - extract only the most recent user message.
247
+
248
+ Args:
249
+ context: The OpenAI LLM context containing messages.
250
+
251
+ Returns:
252
+ The last user message as a string.
253
+ """
254
+ messages = context.get_messages()
255
+
256
+ # Find the last user message (iterate in reverse for efficiency)
257
+ for message in reversed(messages):
258
+ if message.get("role") == "user":
259
+ content = message.get("content", "")
260
+
261
+ # Handle content that might be a list (for multimodal messages)
262
+ if isinstance(content, list):
263
+ text_parts = [
264
+ item.get("text", "") for item in content if item.get("type") == "text"
265
+ ]
266
+ content = " ".join(text_parts)
267
+
268
+ if isinstance(content, str):
269
+ return content.strip()
270
+
271
+ return ""
272
+
273
+ def _generate_sequence_id(self) -> int:
274
+ """Generate a sequence ID for the message."""
275
+ return int(time.time() * 1000) % 2147483647 # Keep within int32 range
276
+
277
+ async def _stream_salesforce_response(self, session_id: str, user_message: str) -> AsyncGenerator[str, None]:
278
+ """Stream response from Salesforce Agent API."""
279
+ access_token = await self._get_access_token()
280
+ url = f"{self._api_host}/einstein/ai-agent/v1/sessions/{session_id}/messages/stream"
281
+
282
+ headers = {
283
+ "Authorization": f"Bearer {access_token}",
284
+ "Content-Type": "application/json",
285
+ "Accept": "text/event-stream",
286
+ }
287
+
288
+ message_data = {
289
+ "message": {
290
+ "sequenceId": self._generate_sequence_id(),
291
+ "type": "Text",
292
+ "text": user_message
293
+ },
294
+ "variables": [
295
+ {
296
+ "name": "$Context.EndUserLanguage",
297
+ "type": "Text",
298
+ "value": "en_US"
299
+ }
300
+ ]
301
+ }
302
+
303
+ try:
304
+ logger.info(f"🌐 Salesforce API request: {user_message[:50]}...")
305
+ async with self._http_client.stream("POST", url, headers=headers, json=message_data) as response:
306
+ response.raise_for_status()
307
+
308
+ async for line in response.aiter_lines():
309
+ if not line:
310
+ continue
311
+
312
+ # Parse SSE format
313
+ if line.startswith("data: "):
314
+ try:
315
+ data = json.loads(line[6:])
316
+ message = data.get("message", {})
317
+ message_type = message.get("type")
318
+
319
+ if message_type == "TextChunk":
320
+ content = message.get("text", "") or message.get("message", "")
321
+ if content:
322
+ yield content
323
+ elif message_type == "EndOfTurn":
324
+ logger.info("🏁 Salesforce response complete")
325
+ break
326
+ elif message_type == "Inform":
327
+ # Skip INFORM events to avoid duplication
328
+ continue
329
+
330
+ except json.JSONDecodeError as e:
331
+ logger.warning(f"JSON decode error: {e}, line: {line}")
332
+ continue
333
+
334
+ except Exception as e:
335
+ logger.error(f"Failed to stream from Salesforce Agent API: {e}")
336
+ raise
337
+
338
+ async def _process_context(self, context: OpenAILLMContext):
339
+ """Process the LLM context and generate streaming response.
340
+
341
+ Following Vistaar pattern for simple, direct processing.
342
+
343
+ Args:
344
+ context: The OpenAI LLM context containing messages to process.
345
+ """
346
+ logger.info(f"🔄 Salesforce processing context with {len(context.get_messages())} messages")
347
+
348
+ # Extract user message from context first
349
+ user_message = self._extract_user_message(context)
350
+
351
+ if not user_message:
352
+ logger.warning("Salesforce: No user message found in context")
353
+ return
354
+
355
+ try:
356
+ logger.info(f"🎯 Salesforce extracted query: {user_message}")
357
+
358
+ # Start response
359
+ await self.push_frame(LLMFullResponseStartFrame())
360
+ await self.push_frame(LLMFullResponseStartFrame(),FrameDirection.UPSTREAM)
361
+ await self.start_processing_metrics()
362
+ await self.start_ttfb_metrics()
363
+
364
+ # Get or create session
365
+ session_id = await self._get_or_create_session()
366
+
367
+ first_chunk = True
368
+
369
+ # Stream the response
370
+ async for text_chunk in self._stream_salesforce_response(session_id, user_message):
371
+ if first_chunk:
372
+ await self.stop_ttfb_metrics()
373
+ first_chunk = False
374
+
375
+ # Push each text chunk as it arrives
376
+ await self.push_frame(LLMTextFrame(text=text_chunk))
377
+
378
+ except Exception as e:
379
+ logger.error(f"Salesforce context processing error: {type(e).__name__}: {str(e)}")
380
+ import traceback
381
+ logger.error(f"Salesforce traceback: {traceback.format_exc()}")
382
+ raise
383
+ finally:
384
+ await self.stop_processing_metrics()
385
+ await self.push_frame(LLMFullResponseEndFrame())
386
+ await self.push_frame(LLMFullResponseEndFrame(), FrameDirection.UPSTREAM)
387
+
388
+ async def process_frame(self, frame: Frame, direction: FrameDirection):
389
+ """Process frames for LLM completion requests.
390
+
391
+ Following the exact Vistaar pattern - call super() for non-context frames only.
392
+
393
+ Args:
394
+ frame: The frame to process.
395
+ direction: The direction of frame processing.
396
+ """
397
+ context = None
398
+ if isinstance(frame, OpenAILLMContextFrame):
399
+ context = frame.context
400
+ logger.info(f"🔍 Received OpenAILLMContextFrame with {len(context.get_messages())} messages")
401
+ elif isinstance(frame, LLMMessagesFrame):
402
+ context = OpenAILLMContext.from_messages(frame.messages)
403
+ logger.info(f"🔍 Received LLMMessagesFrame with {len(frame.messages)} messages")
404
+ elif isinstance(frame, LLMUpdateSettingsFrame):
405
+ # Call super for settings frames and update settings
406
+ await super().process_frame(frame, direction)
407
+ settings = frame.settings
408
+ logger.debug(f"Updated Salesforce settings: {settings}")
409
+ else:
410
+ # For non-context frames, call super and push them downstream
411
+ await super().process_frame(frame, direction)
412
+ await self.push_frame(frame, direction)
413
+
414
+ if context:
415
+ try:
416
+ await self._process_context(context)
417
+ except httpx.TimeoutException:
418
+ logger.error("Timeout while processing Salesforce request")
419
+ await self._call_event_handler("on_completion_timeout")
420
+ except Exception as e:
421
+ logger.error(f"Error processing Salesforce request: {e}")
422
+ raise
423
+
424
+ def create_context_aggregator(
425
+ self,
426
+ context: OpenAILLMContext,
427
+ *,
428
+ user_params: LLMUserAggregatorParams = LLMUserAggregatorParams(),
429
+ assistant_params: LLMAssistantAggregatorParams = LLMAssistantAggregatorParams(),
430
+ ) -> OpenAIContextAggregatorPair:
431
+ """Create context aggregators for Salesforce LLM.
432
+
433
+ Since Salesforce uses OpenAI-compatible message format, we reuse OpenAI's
434
+ context aggregators directly, similar to how Vistaar works.
435
+
436
+ Args:
437
+ context: The LLM context to create aggregators for.
438
+ user_params: Parameters for user message aggregation.
439
+ assistant_params: Parameters for assistant message aggregation.
440
+
441
+ Returns:
442
+ OpenAIContextAggregatorPair: A pair of OpenAI context aggregators,
443
+ compatible with Salesforce's OpenAI-like message format.
444
+ """
445
+ context.set_llm_adapter(self.get_llm_adapter())
446
+ user = OpenAIUserContextAggregator(context, params=user_params)
447
+ assistant = OpenAIAssistantContextAggregator(context, params=assistant_params)
448
+ return OpenAIContextAggregatorPair(_user=user, _assistant=assistant)
449
+
450
+ def get_llm_adapter(self):
451
+ """Get the LLM adapter for this service."""
452
+ from pipecat.adapters.services.open_ai_adapter import OpenAILLMAdapter
453
+ return OpenAILLMAdapter()
454
+
455
+ async def close(self):
456
+ """Close the HTTP client when the service is destroyed."""
457
+ await self._cleanup_sessions()
458
+ await self._http_client.aclose()
459
+
460
+ def __del__(self):
461
+ """Ensure the client is closed on deletion."""
462
+ try:
463
+ asyncio.create_task(self._http_client.aclose())
464
+ except:
465
+ pass