promptlayer 1.0.59__py3-none-any.whl → 1.0.61__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/__init__.py +1 -1
- promptlayer/promptlayer.py +7 -2
- promptlayer/promptlayer_mixins.py +14 -12
- promptlayer/streaming/__init__.py +54 -0
- promptlayer/streaming/blueprint_builder.py +139 -0
- promptlayer/streaming/response_handlers.py +550 -0
- promptlayer/streaming/stream_processor.py +100 -0
- promptlayer/utils.py +1 -581
- {promptlayer-1.0.59.dist-info → promptlayer-1.0.61.dist-info}/METADATA +1 -1
- promptlayer-1.0.61.dist-info/RECORD +22 -0
- promptlayer-1.0.59.dist-info/RECORD +0 -18
- {promptlayer-1.0.59.dist-info → promptlayer-1.0.61.dist-info}/LICENSE +0 -0
- {promptlayer-1.0.59.dist-info → promptlayer-1.0.61.dist-info}/WHEEL +0 -0
promptlayer/utils.py
CHANGED
|
@@ -9,7 +9,7 @@ import sys
|
|
|
9
9
|
import types
|
|
10
10
|
from copy import deepcopy
|
|
11
11
|
from enum import Enum
|
|
12
|
-
from typing import Any,
|
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
|
13
13
|
from uuid import uuid4
|
|
14
14
|
|
|
15
15
|
import httpx
|
|
@@ -1231,364 +1231,6 @@ async def aget_all_prompt_templates(
|
|
|
1231
1231
|
raise Exception(f"PromptLayer had the following error while getting all your prompt templates: {str(e)}") from e
|
|
1232
1232
|
|
|
1233
1233
|
|
|
1234
|
-
def openai_stream_chat(results: list):
|
|
1235
|
-
from openai.types.chat import (
|
|
1236
|
-
ChatCompletion,
|
|
1237
|
-
ChatCompletionChunk,
|
|
1238
|
-
ChatCompletionMessage,
|
|
1239
|
-
ChatCompletionMessageToolCall,
|
|
1240
|
-
)
|
|
1241
|
-
from openai.types.chat.chat_completion import Choice
|
|
1242
|
-
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
1243
|
-
|
|
1244
|
-
chat_completion_chunks: List[ChatCompletionChunk] = results
|
|
1245
|
-
response: ChatCompletion = ChatCompletion(
|
|
1246
|
-
id="",
|
|
1247
|
-
object="chat.completion",
|
|
1248
|
-
choices=[
|
|
1249
|
-
Choice(
|
|
1250
|
-
finish_reason="stop",
|
|
1251
|
-
index=0,
|
|
1252
|
-
message=ChatCompletionMessage(role="assistant"),
|
|
1253
|
-
)
|
|
1254
|
-
],
|
|
1255
|
-
created=0,
|
|
1256
|
-
model="",
|
|
1257
|
-
)
|
|
1258
|
-
last_result = chat_completion_chunks[-1]
|
|
1259
|
-
response.id = last_result.id
|
|
1260
|
-
response.created = last_result.created
|
|
1261
|
-
response.model = last_result.model
|
|
1262
|
-
response.system_fingerprint = last_result.system_fingerprint
|
|
1263
|
-
response.usage = last_result.usage
|
|
1264
|
-
content = ""
|
|
1265
|
-
tool_calls: Union[List[ChatCompletionMessageToolCall], None] = None
|
|
1266
|
-
for result in chat_completion_chunks:
|
|
1267
|
-
choices = result.choices
|
|
1268
|
-
if len(choices) == 0:
|
|
1269
|
-
continue
|
|
1270
|
-
if choices[0].delta.content:
|
|
1271
|
-
content = f"{content}{result.choices[0].delta.content}"
|
|
1272
|
-
|
|
1273
|
-
delta = choices[0].delta
|
|
1274
|
-
if delta.tool_calls:
|
|
1275
|
-
tool_calls = tool_calls or []
|
|
1276
|
-
last_tool_call = None
|
|
1277
|
-
if len(tool_calls) > 0:
|
|
1278
|
-
last_tool_call = tool_calls[-1]
|
|
1279
|
-
tool_call = delta.tool_calls[0]
|
|
1280
|
-
if not tool_call.function:
|
|
1281
|
-
continue
|
|
1282
|
-
if not last_tool_call or tool_call.id:
|
|
1283
|
-
tool_calls.append(
|
|
1284
|
-
ChatCompletionMessageToolCall(
|
|
1285
|
-
id=tool_call.id or "",
|
|
1286
|
-
function=Function(
|
|
1287
|
-
name=tool_call.function.name or "",
|
|
1288
|
-
arguments=tool_call.function.arguments or "",
|
|
1289
|
-
),
|
|
1290
|
-
type=tool_call.type or "function",
|
|
1291
|
-
)
|
|
1292
|
-
)
|
|
1293
|
-
continue
|
|
1294
|
-
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name or ''}"
|
|
1295
|
-
last_tool_call.function.arguments = (
|
|
1296
|
-
f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1297
|
-
)
|
|
1298
|
-
|
|
1299
|
-
response.choices[0].message.content = content
|
|
1300
|
-
response.choices[0].message.tool_calls = tool_calls
|
|
1301
|
-
return response
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
async def aopenai_stream_chat(generator: AsyncIterable[Any]) -> Any:
|
|
1305
|
-
from openai.types.chat import (
|
|
1306
|
-
ChatCompletion,
|
|
1307
|
-
ChatCompletionChunk,
|
|
1308
|
-
ChatCompletionMessage,
|
|
1309
|
-
ChatCompletionMessageToolCall,
|
|
1310
|
-
)
|
|
1311
|
-
from openai.types.chat.chat_completion import Choice
|
|
1312
|
-
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
1313
|
-
|
|
1314
|
-
chat_completion_chunks: List[ChatCompletionChunk] = []
|
|
1315
|
-
response: ChatCompletion = ChatCompletion(
|
|
1316
|
-
id="",
|
|
1317
|
-
object="chat.completion",
|
|
1318
|
-
choices=[
|
|
1319
|
-
Choice(
|
|
1320
|
-
finish_reason="stop",
|
|
1321
|
-
index=0,
|
|
1322
|
-
message=ChatCompletionMessage(role="assistant"),
|
|
1323
|
-
)
|
|
1324
|
-
],
|
|
1325
|
-
created=0,
|
|
1326
|
-
model="",
|
|
1327
|
-
)
|
|
1328
|
-
content = ""
|
|
1329
|
-
tool_calls: Union[List[ChatCompletionMessageToolCall], None] = None
|
|
1330
|
-
|
|
1331
|
-
async for result in generator:
|
|
1332
|
-
chat_completion_chunks.append(result)
|
|
1333
|
-
choices = result.choices
|
|
1334
|
-
if len(choices) == 0:
|
|
1335
|
-
continue
|
|
1336
|
-
if choices[0].delta.content:
|
|
1337
|
-
content = f"{content}{choices[0].delta.content}"
|
|
1338
|
-
|
|
1339
|
-
delta = choices[0].delta
|
|
1340
|
-
if delta.tool_calls:
|
|
1341
|
-
tool_calls = tool_calls or []
|
|
1342
|
-
last_tool_call = None
|
|
1343
|
-
if len(tool_calls) > 0:
|
|
1344
|
-
last_tool_call = tool_calls[-1]
|
|
1345
|
-
tool_call = delta.tool_calls[0]
|
|
1346
|
-
if not tool_call.function:
|
|
1347
|
-
continue
|
|
1348
|
-
if not last_tool_call or tool_call.id:
|
|
1349
|
-
tool_calls.append(
|
|
1350
|
-
ChatCompletionMessageToolCall(
|
|
1351
|
-
id=tool_call.id or "",
|
|
1352
|
-
function=Function(
|
|
1353
|
-
name=tool_call.function.name or "",
|
|
1354
|
-
arguments=tool_call.function.arguments or "",
|
|
1355
|
-
),
|
|
1356
|
-
type=tool_call.type or "function",
|
|
1357
|
-
)
|
|
1358
|
-
)
|
|
1359
|
-
continue
|
|
1360
|
-
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name or ''}"
|
|
1361
|
-
last_tool_call.function.arguments = (
|
|
1362
|
-
f"{last_tool_call.function.arguments}{tool_call.function.arguments or ''}"
|
|
1363
|
-
)
|
|
1364
|
-
|
|
1365
|
-
# After collecting all chunks, set the response attributes
|
|
1366
|
-
if chat_completion_chunks:
|
|
1367
|
-
last_result = chat_completion_chunks[-1]
|
|
1368
|
-
response.id = last_result.id
|
|
1369
|
-
response.created = last_result.created
|
|
1370
|
-
response.model = last_result.model
|
|
1371
|
-
response.system_fingerprint = getattr(last_result, "system_fingerprint", None)
|
|
1372
|
-
response.usage = last_result.usage
|
|
1373
|
-
|
|
1374
|
-
response.choices[0].message.content = content
|
|
1375
|
-
response.choices[0].message.tool_calls = tool_calls
|
|
1376
|
-
return response
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
def openai_stream_completion(results: list):
|
|
1380
|
-
from openai.types.completion import Completion, CompletionChoice
|
|
1381
|
-
|
|
1382
|
-
completions: List[Completion] = results
|
|
1383
|
-
last_chunk = completions[-1]
|
|
1384
|
-
response = Completion(
|
|
1385
|
-
id=last_chunk.id,
|
|
1386
|
-
created=last_chunk.created,
|
|
1387
|
-
model=last_chunk.model,
|
|
1388
|
-
object="text_completion",
|
|
1389
|
-
choices=[CompletionChoice(finish_reason="stop", index=0, text="")],
|
|
1390
|
-
)
|
|
1391
|
-
text = ""
|
|
1392
|
-
for completion in completions:
|
|
1393
|
-
usage = completion.usage
|
|
1394
|
-
system_fingerprint = completion.system_fingerprint
|
|
1395
|
-
if len(completion.choices) > 0 and completion.choices[0].text:
|
|
1396
|
-
text = f"{text}{completion.choices[0].text}"
|
|
1397
|
-
if usage:
|
|
1398
|
-
response.usage = usage
|
|
1399
|
-
if system_fingerprint:
|
|
1400
|
-
response.system_fingerprint = system_fingerprint
|
|
1401
|
-
response.choices[0].text = text
|
|
1402
|
-
return response
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
async def aopenai_stream_completion(generator: AsyncIterable[Any]) -> Any:
|
|
1406
|
-
from openai.types.completion import Completion, CompletionChoice
|
|
1407
|
-
|
|
1408
|
-
completions: List[Completion] = []
|
|
1409
|
-
text = ""
|
|
1410
|
-
response = Completion(
|
|
1411
|
-
id="",
|
|
1412
|
-
created=0,
|
|
1413
|
-
model="",
|
|
1414
|
-
object="text_completion",
|
|
1415
|
-
choices=[CompletionChoice(finish_reason="stop", index=0, text="")],
|
|
1416
|
-
)
|
|
1417
|
-
|
|
1418
|
-
async for completion in generator:
|
|
1419
|
-
completions.append(completion)
|
|
1420
|
-
usage = completion.usage
|
|
1421
|
-
system_fingerprint = getattr(completion, "system_fingerprint", None)
|
|
1422
|
-
if len(completion.choices) > 0 and completion.choices[0].text:
|
|
1423
|
-
text = f"{text}{completion.choices[0].text}"
|
|
1424
|
-
if usage:
|
|
1425
|
-
response.usage = usage
|
|
1426
|
-
if system_fingerprint:
|
|
1427
|
-
response.system_fingerprint = system_fingerprint
|
|
1428
|
-
|
|
1429
|
-
# After collecting all completions, set the response attributes
|
|
1430
|
-
if completions:
|
|
1431
|
-
last_chunk = completions[-1]
|
|
1432
|
-
response.id = last_chunk.id
|
|
1433
|
-
response.created = last_chunk.created
|
|
1434
|
-
response.model = last_chunk.model
|
|
1435
|
-
|
|
1436
|
-
response.choices[0].text = text
|
|
1437
|
-
return response
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
def anthropic_stream_message(results: list):
|
|
1441
|
-
from anthropic.types import Message, MessageStreamEvent, Usage
|
|
1442
|
-
|
|
1443
|
-
message_stream_events: List[MessageStreamEvent] = results
|
|
1444
|
-
response: Message = Message(
|
|
1445
|
-
id="",
|
|
1446
|
-
model="",
|
|
1447
|
-
content=[],
|
|
1448
|
-
role="assistant",
|
|
1449
|
-
type="message",
|
|
1450
|
-
stop_reason="stop_sequence",
|
|
1451
|
-
stop_sequence=None,
|
|
1452
|
-
usage=Usage(input_tokens=0, output_tokens=0),
|
|
1453
|
-
)
|
|
1454
|
-
|
|
1455
|
-
for event in message_stream_events:
|
|
1456
|
-
if event.type == "message_start":
|
|
1457
|
-
response = event.message
|
|
1458
|
-
break
|
|
1459
|
-
|
|
1460
|
-
content_blocks, usage, stop_reason = build_anthropic_content_blocks(message_stream_events)
|
|
1461
|
-
response.content = content_blocks
|
|
1462
|
-
if usage:
|
|
1463
|
-
response.usage.output_tokens = usage.output_tokens
|
|
1464
|
-
if stop_reason:
|
|
1465
|
-
response.stop_reason = stop_reason
|
|
1466
|
-
|
|
1467
|
-
return response
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
async def aanthropic_stream_message(generator: AsyncIterable[Any]) -> Any:
|
|
1471
|
-
from anthropic.types import Message, MessageStreamEvent, Usage
|
|
1472
|
-
|
|
1473
|
-
message_stream_events: List[MessageStreamEvent] = []
|
|
1474
|
-
response: Message = Message(
|
|
1475
|
-
id="",
|
|
1476
|
-
model="",
|
|
1477
|
-
content=[],
|
|
1478
|
-
role="assistant",
|
|
1479
|
-
type="message",
|
|
1480
|
-
stop_reason="stop_sequence",
|
|
1481
|
-
stop_sequence=None,
|
|
1482
|
-
usage=Usage(input_tokens=0, output_tokens=0),
|
|
1483
|
-
)
|
|
1484
|
-
|
|
1485
|
-
async for event in generator:
|
|
1486
|
-
if event.type == "message_start":
|
|
1487
|
-
response = event.message
|
|
1488
|
-
message_stream_events.append(event)
|
|
1489
|
-
|
|
1490
|
-
content_blocks, usage, stop_reason = build_anthropic_content_blocks(message_stream_events)
|
|
1491
|
-
response.content = content_blocks
|
|
1492
|
-
if usage:
|
|
1493
|
-
response.usage.output_tokens = usage.output_tokens
|
|
1494
|
-
if stop_reason:
|
|
1495
|
-
response.stop_reason = stop_reason
|
|
1496
|
-
|
|
1497
|
-
return response
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
def anthropic_stream_completion(results: list):
|
|
1501
|
-
from anthropic.types import Completion
|
|
1502
|
-
|
|
1503
|
-
completions: List[Completion] = results
|
|
1504
|
-
last_chunk = completions[-1]
|
|
1505
|
-
response = Completion(
|
|
1506
|
-
id=last_chunk.id,
|
|
1507
|
-
completion="",
|
|
1508
|
-
model=last_chunk.model,
|
|
1509
|
-
stop_reason="stop",
|
|
1510
|
-
type="completion",
|
|
1511
|
-
)
|
|
1512
|
-
|
|
1513
|
-
text = ""
|
|
1514
|
-
for completion in completions:
|
|
1515
|
-
text = f"{text}{completion.completion}"
|
|
1516
|
-
response.completion = text
|
|
1517
|
-
return response
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
async def aanthropic_stream_completion(generator: AsyncIterable[Any]) -> Any:
|
|
1521
|
-
from anthropic.types import Completion
|
|
1522
|
-
|
|
1523
|
-
completions: List[Completion] = []
|
|
1524
|
-
text = ""
|
|
1525
|
-
response = Completion(
|
|
1526
|
-
id="",
|
|
1527
|
-
completion="",
|
|
1528
|
-
model="",
|
|
1529
|
-
stop_reason="stop",
|
|
1530
|
-
type="completion",
|
|
1531
|
-
)
|
|
1532
|
-
|
|
1533
|
-
async for completion in generator:
|
|
1534
|
-
completions.append(completion)
|
|
1535
|
-
text = f"{text}{completion.completion}"
|
|
1536
|
-
|
|
1537
|
-
# After collecting all completions, set the response attributes
|
|
1538
|
-
if completions:
|
|
1539
|
-
last_chunk = completions[-1]
|
|
1540
|
-
response.id = last_chunk.id
|
|
1541
|
-
response.model = last_chunk.model
|
|
1542
|
-
|
|
1543
|
-
response.completion = text
|
|
1544
|
-
return response
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable):
|
|
1548
|
-
data = {
|
|
1549
|
-
"request_id": None,
|
|
1550
|
-
"raw_response": None,
|
|
1551
|
-
"prompt_blueprint": None,
|
|
1552
|
-
}
|
|
1553
|
-
results = []
|
|
1554
|
-
for result in generator:
|
|
1555
|
-
results.append(result)
|
|
1556
|
-
data["raw_response"] = result
|
|
1557
|
-
yield data
|
|
1558
|
-
request_response = map_results(results)
|
|
1559
|
-
response = after_stream(request_response=request_response.model_dump())
|
|
1560
|
-
data["request_id"] = response.get("request_id")
|
|
1561
|
-
data["prompt_blueprint"] = response.get("prompt_blueprint")
|
|
1562
|
-
yield data
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
async def astream_response(
|
|
1566
|
-
generator: AsyncIterable[Any],
|
|
1567
|
-
after_stream: Callable[..., Any],
|
|
1568
|
-
map_results: Callable[[Any], Any],
|
|
1569
|
-
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
1570
|
-
data = {
|
|
1571
|
-
"request_id": None,
|
|
1572
|
-
"raw_response": None,
|
|
1573
|
-
"prompt_blueprint": None,
|
|
1574
|
-
}
|
|
1575
|
-
results = []
|
|
1576
|
-
async for result in generator:
|
|
1577
|
-
results.append(result)
|
|
1578
|
-
data["raw_response"] = result
|
|
1579
|
-
yield data
|
|
1580
|
-
|
|
1581
|
-
async def async_generator_from_list(lst):
|
|
1582
|
-
for item in lst:
|
|
1583
|
-
yield item
|
|
1584
|
-
|
|
1585
|
-
request_response = await map_results(async_generator_from_list(results))
|
|
1586
|
-
after_stream_response = await after_stream(request_response=request_response.model_dump())
|
|
1587
|
-
data["request_id"] = after_stream_response.get("request_id")
|
|
1588
|
-
data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
|
|
1589
|
-
yield data
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
1234
|
def openai_chat_request(client, **kwargs):
|
|
1593
1235
|
return client.chat.completions.create(**kwargs)
|
|
1594
1236
|
|
|
@@ -1776,133 +1418,6 @@ async def amistral_request(
|
|
|
1776
1418
|
return await client.chat.complete_async(**function_kwargs)
|
|
1777
1419
|
|
|
1778
1420
|
|
|
1779
|
-
def mistral_stream_chat(results: list):
|
|
1780
|
-
from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall
|
|
1781
|
-
from openai.types.chat.chat_completion import Choice
|
|
1782
|
-
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
1783
|
-
|
|
1784
|
-
last_result = results[-1]
|
|
1785
|
-
response = ChatCompletion(
|
|
1786
|
-
id=last_result.data.id,
|
|
1787
|
-
object="chat.completion",
|
|
1788
|
-
choices=[
|
|
1789
|
-
Choice(
|
|
1790
|
-
finish_reason=last_result.data.choices[0].finish_reason or "stop",
|
|
1791
|
-
index=0,
|
|
1792
|
-
message=ChatCompletionMessage(role="assistant"),
|
|
1793
|
-
)
|
|
1794
|
-
],
|
|
1795
|
-
created=last_result.data.created,
|
|
1796
|
-
model=last_result.data.model,
|
|
1797
|
-
)
|
|
1798
|
-
|
|
1799
|
-
content = ""
|
|
1800
|
-
tool_calls = None
|
|
1801
|
-
|
|
1802
|
-
for result in results:
|
|
1803
|
-
choices = result.data.choices
|
|
1804
|
-
if len(choices) == 0:
|
|
1805
|
-
continue
|
|
1806
|
-
|
|
1807
|
-
delta = choices[0].delta
|
|
1808
|
-
if delta.content is not None:
|
|
1809
|
-
content = f"{content}{delta.content}"
|
|
1810
|
-
|
|
1811
|
-
if delta.tool_calls:
|
|
1812
|
-
tool_calls = tool_calls or []
|
|
1813
|
-
for tool_call in delta.tool_calls:
|
|
1814
|
-
if len(tool_calls) == 0 or tool_call.id:
|
|
1815
|
-
tool_calls.append(
|
|
1816
|
-
ChatCompletionMessageToolCall(
|
|
1817
|
-
id=tool_call.id or "",
|
|
1818
|
-
function=Function(
|
|
1819
|
-
name=tool_call.function.name,
|
|
1820
|
-
arguments=tool_call.function.arguments,
|
|
1821
|
-
),
|
|
1822
|
-
type="function",
|
|
1823
|
-
)
|
|
1824
|
-
)
|
|
1825
|
-
else:
|
|
1826
|
-
last_tool_call = tool_calls[-1]
|
|
1827
|
-
if tool_call.function.name:
|
|
1828
|
-
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name}"
|
|
1829
|
-
if tool_call.function.arguments:
|
|
1830
|
-
last_tool_call.function.arguments = (
|
|
1831
|
-
f"{last_tool_call.function.arguments}{tool_call.function.arguments}"
|
|
1832
|
-
)
|
|
1833
|
-
|
|
1834
|
-
response.choices[0].message.content = content
|
|
1835
|
-
response.choices[0].message.tool_calls = tool_calls
|
|
1836
|
-
response.usage = last_result.data.usage
|
|
1837
|
-
return response
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
|
|
1841
|
-
from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall
|
|
1842
|
-
from openai.types.chat.chat_completion import Choice
|
|
1843
|
-
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
1844
|
-
|
|
1845
|
-
completion_chunks = []
|
|
1846
|
-
response = ChatCompletion(
|
|
1847
|
-
id="",
|
|
1848
|
-
object="chat.completion",
|
|
1849
|
-
choices=[
|
|
1850
|
-
Choice(
|
|
1851
|
-
finish_reason="stop",
|
|
1852
|
-
index=0,
|
|
1853
|
-
message=ChatCompletionMessage(role="assistant"),
|
|
1854
|
-
)
|
|
1855
|
-
],
|
|
1856
|
-
created=0,
|
|
1857
|
-
model="",
|
|
1858
|
-
)
|
|
1859
|
-
content = ""
|
|
1860
|
-
tool_calls = None
|
|
1861
|
-
|
|
1862
|
-
async for result in generator:
|
|
1863
|
-
completion_chunks.append(result)
|
|
1864
|
-
choices = result.data.choices
|
|
1865
|
-
if len(choices) == 0:
|
|
1866
|
-
continue
|
|
1867
|
-
delta = choices[0].delta
|
|
1868
|
-
if delta.content is not None:
|
|
1869
|
-
content = f"{content}{delta.content}"
|
|
1870
|
-
|
|
1871
|
-
if delta.tool_calls:
|
|
1872
|
-
tool_calls = tool_calls or []
|
|
1873
|
-
for tool_call in delta.tool_calls:
|
|
1874
|
-
if len(tool_calls) == 0 or tool_call.id:
|
|
1875
|
-
tool_calls.append(
|
|
1876
|
-
ChatCompletionMessageToolCall(
|
|
1877
|
-
id=tool_call.id or "",
|
|
1878
|
-
function=Function(
|
|
1879
|
-
name=tool_call.function.name,
|
|
1880
|
-
arguments=tool_call.function.arguments,
|
|
1881
|
-
),
|
|
1882
|
-
type="function",
|
|
1883
|
-
)
|
|
1884
|
-
)
|
|
1885
|
-
else:
|
|
1886
|
-
last_tool_call = tool_calls[-1]
|
|
1887
|
-
if tool_call.function.name:
|
|
1888
|
-
last_tool_call.function.name = f"{last_tool_call.function.name}{tool_call.function.name}"
|
|
1889
|
-
if tool_call.function.arguments:
|
|
1890
|
-
last_tool_call.function.arguments = (
|
|
1891
|
-
f"{last_tool_call.function.arguments}{tool_call.function.arguments}"
|
|
1892
|
-
)
|
|
1893
|
-
|
|
1894
|
-
if completion_chunks:
|
|
1895
|
-
last_result = completion_chunks[-1]
|
|
1896
|
-
response.id = last_result.data.id
|
|
1897
|
-
response.created = last_result.data.created
|
|
1898
|
-
response.model = last_result.data.model
|
|
1899
|
-
response.usage = last_result.data.usage
|
|
1900
|
-
|
|
1901
|
-
response.choices[0].message.content = content
|
|
1902
|
-
response.choices[0].message.tool_calls = tool_calls
|
|
1903
|
-
return response
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
1421
|
def google_chat_request(client, **kwargs):
|
|
1907
1422
|
from google.genai.chats import Content
|
|
1908
1423
|
|
|
@@ -1927,65 +1442,6 @@ def google_completions_request(client, **kwargs):
|
|
|
1927
1442
|
return client.models.generate_content(model=model, contents=contents, config=config)
|
|
1928
1443
|
|
|
1929
1444
|
|
|
1930
|
-
def _build_google_response_from_parts(thought_content: str, regular_content: str, function_calls: list, last_result):
|
|
1931
|
-
"""Helper function to build Google response with thought, regular, and function call parts."""
|
|
1932
|
-
from google.genai.chats import Part
|
|
1933
|
-
|
|
1934
|
-
response = last_result.model_copy()
|
|
1935
|
-
final_parts = []
|
|
1936
|
-
|
|
1937
|
-
if thought_content:
|
|
1938
|
-
thought_part = Part(text=thought_content, thought=True)
|
|
1939
|
-
final_parts.append(thought_part)
|
|
1940
|
-
|
|
1941
|
-
if regular_content:
|
|
1942
|
-
text_part = Part(text=regular_content, thought=None)
|
|
1943
|
-
final_parts.append(text_part)
|
|
1944
|
-
|
|
1945
|
-
for function_call in function_calls:
|
|
1946
|
-
function_part = Part(function_call=function_call, thought=None)
|
|
1947
|
-
final_parts.append(function_part)
|
|
1948
|
-
|
|
1949
|
-
if final_parts:
|
|
1950
|
-
response.candidates[0].content.parts = final_parts
|
|
1951
|
-
|
|
1952
|
-
return response
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
def map_google_stream_response(results: list):
|
|
1956
|
-
from google.genai.chats import GenerateContentResponse
|
|
1957
|
-
|
|
1958
|
-
response = GenerateContentResponse()
|
|
1959
|
-
if not results:
|
|
1960
|
-
return response
|
|
1961
|
-
results: List[GenerateContentResponse] = results
|
|
1962
|
-
|
|
1963
|
-
thought_content = ""
|
|
1964
|
-
regular_content = ""
|
|
1965
|
-
function_calls = []
|
|
1966
|
-
|
|
1967
|
-
for result in results:
|
|
1968
|
-
if result.candidates and result.candidates[0].content.parts:
|
|
1969
|
-
for part in result.candidates[0].content.parts:
|
|
1970
|
-
if hasattr(part, "text") and part.text:
|
|
1971
|
-
if hasattr(part, "thought") and part.thought:
|
|
1972
|
-
thought_content = f"{thought_content}{part.text}"
|
|
1973
|
-
else:
|
|
1974
|
-
regular_content = f"{regular_content}{part.text}"
|
|
1975
|
-
elif hasattr(part, "function_call") and part.function_call:
|
|
1976
|
-
function_calls.append(part.function_call)
|
|
1977
|
-
|
|
1978
|
-
return _build_google_response_from_parts(thought_content, regular_content, function_calls, results[-1])
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
def google_stream_chat(results: list):
|
|
1982
|
-
return map_google_stream_response(results)
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
def google_stream_completion(results: list):
|
|
1986
|
-
return map_google_stream_response(results)
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
1445
|
MAP_TYPE_TO_GOOGLE_FUNCTION = {
|
|
1990
1446
|
"chat": google_chat_request,
|
|
1991
1447
|
"completion": google_completions_request,
|
|
@@ -2038,42 +1494,6 @@ async def agoogle_request(prompt_blueprint: GetPromptTemplateResponse, client_kw
|
|
|
2038
1494
|
return await request_to_make(client, **function_kwargs)
|
|
2039
1495
|
|
|
2040
1496
|
|
|
2041
|
-
async def amap_google_stream_response(generator: AsyncIterable[Any]):
|
|
2042
|
-
from google.genai.chats import GenerateContentResponse
|
|
2043
|
-
|
|
2044
|
-
response = GenerateContentResponse()
|
|
2045
|
-
|
|
2046
|
-
thought_content = ""
|
|
2047
|
-
regular_content = ""
|
|
2048
|
-
function_calls = []
|
|
2049
|
-
last_result = None
|
|
2050
|
-
|
|
2051
|
-
async for result in generator:
|
|
2052
|
-
last_result = result
|
|
2053
|
-
if result.candidates and result.candidates[0].content.parts:
|
|
2054
|
-
for part in result.candidates[0].content.parts:
|
|
2055
|
-
if hasattr(part, "text") and part.text:
|
|
2056
|
-
if hasattr(part, "thought") and part.thought:
|
|
2057
|
-
thought_content = f"{thought_content}{part.text}"
|
|
2058
|
-
else:
|
|
2059
|
-
regular_content = f"{regular_content}{part.text}"
|
|
2060
|
-
elif hasattr(part, "function_call") and part.function_call:
|
|
2061
|
-
function_calls.append(part.function_call)
|
|
2062
|
-
|
|
2063
|
-
if not last_result:
|
|
2064
|
-
return response
|
|
2065
|
-
|
|
2066
|
-
return _build_google_response_from_parts(thought_content, regular_content, function_calls, last_result)
|
|
2067
|
-
|
|
2068
|
-
|
|
2069
|
-
async def agoogle_stream_chat(generator: AsyncIterable[Any]):
|
|
2070
|
-
return await amap_google_stream_response(generator)
|
|
2071
|
-
|
|
2072
|
-
|
|
2073
|
-
async def agoogle_stream_completion(generator: AsyncIterable[Any]):
|
|
2074
|
-
return await amap_google_stream_response(generator)
|
|
2075
|
-
|
|
2076
|
-
|
|
2077
1497
|
def vertexai_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
|
|
2078
1498
|
if "gemini" in prompt_blueprint["metadata"]["model"]["name"]:
|
|
2079
1499
|
return google_request(
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
promptlayer/__init__.py,sha256=nPvZs4XEeUdjEe3DmFZ5C-KBN4LifXxdYbK2mIextTY,140
|
|
2
|
+
promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
|
|
3
|
+
promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
|
|
4
|
+
promptlayer/promptlayer.py,sha256=qaxxSvimmXgN45q-IvWsAtyzIJ-w397F97ofsH_7w00,22516
|
|
5
|
+
promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
|
|
6
|
+
promptlayer/promptlayer_mixins.py,sha256=x-HOqd7SaKnVHoGeySYKzwlPVnqpFH5oCohJQIEjLQY,12172
|
|
7
|
+
promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
|
|
8
|
+
promptlayer/streaming/__init__.py,sha256=yNO77fyOi_scNPbE-eIEDGwSOyp8WYyPZ7ZrHaoipmM,1523
|
|
9
|
+
promptlayer/streaming/blueprint_builder.py,sha256=NLmqwspHoAsecrY7varbF4EQaUg5yBKfBxS4y7UycuU,5925
|
|
10
|
+
promptlayer/streaming/response_handlers.py,sha256=vNvpP-RLVl2uHkKLc8Ci9bmNldCezRey40tgtBEd4bo,19005
|
|
11
|
+
promptlayer/streaming/stream_processor.py,sha256=wgY2B1PEJA3xWotDtJaeGS7rjhadAh7SVZ_q5QkEPbg,3752
|
|
12
|
+
promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
|
|
13
|
+
promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
|
|
14
|
+
promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
|
|
15
|
+
promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
|
|
16
|
+
promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
|
|
17
|
+
promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
|
|
18
|
+
promptlayer/utils.py,sha256=hUljiXqyatTvyMmKwHZQr53BsqJ7AaSwP91qXZ5Gb_g,55859
|
|
19
|
+
promptlayer-1.0.61.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
20
|
+
promptlayer-1.0.61.dist-info/METADATA,sha256=xOL_XD0NZ9kjyr57x7YBotqkflTMLm-FR7_dM1PLNWk,4819
|
|
21
|
+
promptlayer-1.0.61.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
22
|
+
promptlayer-1.0.61.dist-info/RECORD,,
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
promptlayer/__init__.py,sha256=Gj2sG2OFxyf6LP1lCysUZUGPVSegsJn2FhSc5HBc1ww,140
|
|
2
|
-
promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
|
|
3
|
-
promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
|
|
4
|
-
promptlayer/promptlayer.py,sha256=4MtP_byvYdVi4yZp_VCyVpPGx3iAaijwDZHPoYDfqZc,22212
|
|
5
|
-
promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
|
|
6
|
-
promptlayer/promptlayer_mixins.py,sha256=1DfNk9woiB-EWtbydFRPp55f3Y88gt6be0m1OdVcDxs,12134
|
|
7
|
-
promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
|
|
8
|
-
promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
|
|
9
|
-
promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
|
|
10
|
-
promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
|
|
11
|
-
promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
|
|
12
|
-
promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
|
|
13
|
-
promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
|
|
14
|
-
promptlayer/utils.py,sha256=lJoEMhXteIdkztX67rRoel8EcIMEcxH7JwQXXmg_yLo,75790
|
|
15
|
-
promptlayer-1.0.59.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
16
|
-
promptlayer-1.0.59.dist-info/METADATA,sha256=xoBHjtE7yxwwx4gsuRCCqrPIgg6PxQ7vM_1u9iOa978,4819
|
|
17
|
-
promptlayer-1.0.59.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
18
|
-
promptlayer-1.0.59.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|