promptlayer 0.5.3__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of promptlayer might be problematic. Click here for more details.
- promptlayer/utils.py +51 -5
- {promptlayer-0.5.3.dist-info → promptlayer-0.5.4.dist-info}/METADATA +1 -1
- {promptlayer-0.5.3.dist-info → promptlayer-0.5.4.dist-info}/RECORD +5 -5
- {promptlayer-0.5.3.dist-info → promptlayer-0.5.4.dist-info}/LICENSE +0 -0
- {promptlayer-0.5.3.dist-info → promptlayer-0.5.4.dist-info}/WHEEL +0 -0
promptlayer/utils.py
CHANGED
|
@@ -51,7 +51,8 @@ def promptlayer_api_handler(
|
|
|
51
51
|
if (
|
|
52
52
|
isinstance(response, types.GeneratorType)
|
|
53
53
|
or isinstance(response, types.AsyncGeneratorType)
|
|
54
|
-
or type(response).__name__
|
|
54
|
+
or type(response).__name__
|
|
55
|
+
in ["Stream", "AsyncStream", "AsyncMessageStreamManager"]
|
|
55
56
|
):
|
|
56
57
|
return GeneratorProxy(
|
|
57
58
|
response,
|
|
@@ -355,6 +356,17 @@ class GeneratorProxy:
|
|
|
355
356
|
def __aiter__(self):
|
|
356
357
|
return self
|
|
357
358
|
|
|
359
|
+
async def __aenter__(self):
|
|
360
|
+
api_request_arguments = self.api_request_arugments
|
|
361
|
+
if hasattr(self.generator, "_AsyncMessageStreamManager__api_request"):
|
|
362
|
+
return GeneratorProxy(
|
|
363
|
+
await self.generator._AsyncMessageStreamManager__api_request,
|
|
364
|
+
api_request_arguments,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
368
|
+
pass
|
|
369
|
+
|
|
358
370
|
async def __anext__(self):
|
|
359
371
|
result = await self.generator.__anext__()
|
|
360
372
|
return self._abstracted_next(result)
|
|
@@ -363,14 +375,29 @@ class GeneratorProxy:
|
|
|
363
375
|
result = next(self.generator)
|
|
364
376
|
return self._abstracted_next(result)
|
|
365
377
|
|
|
378
|
+
def __getattr__(self, name):
|
|
379
|
+
if name == "text_stream": # anthropic async stream
|
|
380
|
+
return GeneratorProxy(
|
|
381
|
+
self.generator.text_stream, self.api_request_arugments
|
|
382
|
+
)
|
|
383
|
+
return getattr(self.generator, name)
|
|
384
|
+
|
|
366
385
|
def _abstracted_next(self, result):
|
|
367
386
|
self.results.append(result)
|
|
368
387
|
provider_type = self.api_request_arugments["provider_type"]
|
|
369
|
-
end_anthropic =
|
|
388
|
+
end_anthropic = False
|
|
389
|
+
if provider_type == "anthropic":
|
|
390
|
+
if hasattr(result, "stop_reason"):
|
|
391
|
+
end_anthropic = result.stop_reason
|
|
392
|
+
elif hasattr(result, "message"):
|
|
393
|
+
end_anthropic = result.message.stop_reason
|
|
394
|
+
elif hasattr(result, "type") and result.type == "message_stop":
|
|
395
|
+
end_anthropic = True
|
|
370
396
|
end_openai = provider_type == "openai" and (
|
|
371
397
|
result.choices[0].finish_reason == "stop"
|
|
372
398
|
or result.choices[0].finish_reason == "length"
|
|
373
399
|
)
|
|
400
|
+
|
|
374
401
|
if end_anthropic or end_openai:
|
|
375
402
|
request_id = promptlayer_api_request(
|
|
376
403
|
self.api_request_arugments["function_name"],
|
|
@@ -395,9 +422,28 @@ class GeneratorProxy:
|
|
|
395
422
|
if provider_type == "anthropic":
|
|
396
423
|
response = ""
|
|
397
424
|
for result in self.results:
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
425
|
+
if hasattr(result, "completion"):
|
|
426
|
+
response = f"{response}{result.completion}"
|
|
427
|
+
elif hasattr(result, "message") and isinstance(result.message, str):
|
|
428
|
+
response = f"{response}{result.message}"
|
|
429
|
+
elif hasattr(result, "content_block") and hasattr(
|
|
430
|
+
result.content_block, "text"
|
|
431
|
+
):
|
|
432
|
+
response = f"{response}{result.content_block.text}"
|
|
433
|
+
elif hasattr(result, "delta") and hasattr(result.delta, "text"):
|
|
434
|
+
response = f"{response}{result.delta.text}"
|
|
435
|
+
if (
|
|
436
|
+
hasattr(self.results[-1], "type")
|
|
437
|
+
and self.results[-1].type == "message_stop"
|
|
438
|
+
): # this is a message stream and not the correct event
|
|
439
|
+
final_result = deepcopy(self.results[0].message)
|
|
440
|
+
final_result.usage = None
|
|
441
|
+
content_block = deepcopy(self.results[1].content_block)
|
|
442
|
+
content_block.text = response
|
|
443
|
+
final_result.content = [content_block]
|
|
444
|
+
else:
|
|
445
|
+
final_result = deepcopy(self.results[-1])
|
|
446
|
+
final_result.completion = response
|
|
401
447
|
return final_result
|
|
402
448
|
if hasattr(self.results[0].choices[0], "text"): # this is regular completion
|
|
403
449
|
response = ""
|
|
@@ -13,8 +13,8 @@ promptlayer/track/__init__.py,sha256=tkIlHRZMX5GCKBlu4vYpQrxauPno2JDmP0M9RQxUBkQ
|
|
|
13
13
|
promptlayer/track/track.py,sha256=gNM3aAKAAtOc8TOjogpZorlwg4zM2hoWgRnBQfexmmo,1525
|
|
14
14
|
promptlayer/types/__init__.py,sha256=ulWSyCrk5hZ_PI-nKGpd6GPcRaK8lqP4wFl0LPNUYWk,61
|
|
15
15
|
promptlayer/types/prompt_template.py,sha256=pMFlCG8YAIajc3-GjW0Y9_9-m7_ysbuGDC0WZzFHZbI,3752
|
|
16
|
-
promptlayer/utils.py,sha256=
|
|
17
|
-
promptlayer-0.5.
|
|
18
|
-
promptlayer-0.5.
|
|
19
|
-
promptlayer-0.5.
|
|
20
|
-
promptlayer-0.5.
|
|
16
|
+
promptlayer/utils.py,sha256=Gy-ODWrM_0sb2alkMoIBIelM8izYEF7r8Uw7bcEjaa0,21160
|
|
17
|
+
promptlayer-0.5.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
18
|
+
promptlayer-0.5.4.dist-info/METADATA,sha256=_zu-Wy8R2nDb68bOMEX2UhScDIw3yUok_9XsbwRtTY4,4475
|
|
19
|
+
promptlayer-0.5.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
20
|
+
promptlayer-0.5.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|