promptlayer 1.0.64__py3-none-any.whl → 1.0.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of promptlayer might be problematic. Click here for more details.

promptlayer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .promptlayer import AsyncPromptLayer, PromptLayer
2
2
 
3
- __version__ = "1.0.64"
3
+ __version__ = "1.0.65"
4
4
  __all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
@@ -171,6 +171,11 @@ class PromptLayer(PromptLayerMixin):
171
171
  metadata=llm_data["prompt_blueprint"]["metadata"],
172
172
  )
173
173
 
174
+ if isinstance(response, dict):
175
+ request_response = response
176
+ else:
177
+ request_response = response.model_dump(mode="json")
178
+
174
179
  request_log = self._track_request_log(
175
180
  llm_data,
176
181
  tags,
@@ -178,7 +183,7 @@ class PromptLayer(PromptLayerMixin):
178
183
  group_id,
179
184
  pl_run_span_id,
180
185
  metadata=metadata,
181
- request_response=response.model_dump(mode="json"),
186
+ request_response=request_response,
182
187
  )
183
188
 
184
189
  return {
@@ -592,6 +597,11 @@ class AsyncPromptLayer(PromptLayerMixin):
592
597
  function_kwargs=llm_data["function_kwargs"],
593
598
  )
594
599
 
600
+ if isinstance(response, dict):
601
+ request_response = response
602
+ else:
603
+ request_response = response.model_dump(mode="json")
604
+
595
605
  if stream:
596
606
  track_request_callable = await self._create_track_request_callable(
597
607
  request_params=llm_data,
@@ -601,7 +611,7 @@ class AsyncPromptLayer(PromptLayerMixin):
601
611
  pl_run_span_id=pl_run_span_id,
602
612
  )
603
613
  return astream_response(
604
- response,
614
+ request_response,
605
615
  track_request_callable,
606
616
  llm_data["stream_function"],
607
617
  llm_data["prompt_blueprint"]["metadata"],
@@ -614,11 +624,11 @@ class AsyncPromptLayer(PromptLayerMixin):
614
624
  group_id,
615
625
  pl_run_span_id,
616
626
  metadata=metadata,
617
- request_response=response.model_dump(mode="json"),
627
+ request_response=request_response,
618
628
  )
619
629
 
620
630
  return {
621
631
  "request_id": request_log.get("request_id", None),
622
- "raw_response": response,
632
+ "raw_response": request_response,
623
633
  "prompt_blueprint": request_log.get("prompt_blueprint", None),
624
634
  }
@@ -13,6 +13,7 @@ from promptlayer.span_exporter import PromptLayerSpanExporter
13
13
  from promptlayer.streaming import (
14
14
  aanthropic_stream_completion,
15
15
  aanthropic_stream_message,
16
+ abedrock_stream_message,
16
17
  agoogle_stream_chat,
17
18
  agoogle_stream_completion,
18
19
  amistral_stream_chat,
@@ -20,6 +21,7 @@ from promptlayer.streaming import (
20
21
  anthropic_stream_message,
21
22
  aopenai_stream_chat,
22
23
  aopenai_stream_completion,
24
+ bedrock_stream_message,
23
25
  google_stream_chat,
24
26
  google_stream_completion,
25
27
  mistral_stream_chat,
@@ -27,10 +29,12 @@ from promptlayer.streaming import (
27
29
  openai_stream_completion,
28
30
  )
29
31
  from promptlayer.utils import (
32
+ aamazon_bedrock_request,
30
33
  aanthropic_bedrock_request,
31
34
  aanthropic_request,
32
35
  aazure_openai_request,
33
36
  agoogle_request,
37
+ amazon_bedrock_request,
34
38
  amistral_request,
35
39
  anthropic_bedrock_request,
36
40
  anthropic_request,
@@ -94,6 +98,16 @@ MAP_PROVIDER_TO_FUNCTION_NAME = {
94
98
  "stream_function": google_stream_completion,
95
99
  },
96
100
  },
101
+ "amazon.bedrock": {
102
+ "chat": {
103
+ "function_name": "boto3.bedrock-runtime.converse",
104
+ "stream_function": bedrock_stream_message,
105
+ },
106
+ "completion": {
107
+ "function_name": "boto3.bedrock-runtime.converse",
108
+ "stream_function": bedrock_stream_message,
109
+ },
110
+ },
97
111
  "anthropic.bedrock": {
98
112
  "chat": {
99
113
  "function_name": "anthropic.messages.create",
@@ -114,6 +128,7 @@ MAP_PROVIDER_TO_FUNCTION = {
114
128
  "openai": openai_request,
115
129
  "openai.azure": azure_openai_request,
116
130
  "vertexai": vertexai_request,
131
+ "amazon.bedrock": amazon_bedrock_request,
117
132
  "anthropic.bedrock": anthropic_bedrock_request,
118
133
  }
119
134
 
@@ -168,6 +183,16 @@ AMAP_PROVIDER_TO_FUNCTION_NAME = {
168
183
  "stream_function": agoogle_stream_completion,
169
184
  },
170
185
  },
186
+ "amazon.bedrock": {
187
+ "chat": {
188
+ "function_name": "boto3.bedrock-runtime.converse",
189
+ "stream_function": abedrock_stream_message,
190
+ },
191
+ "completion": {
192
+ "function_name": "boto3.bedrock-runtime.converse",
193
+ "stream_function": abedrock_stream_message,
194
+ },
195
+ },
171
196
  "anthropic.bedrock": {
172
197
  "chat": {
173
198
  "function_name": "anthropic.messages.create",
@@ -188,6 +213,7 @@ AMAP_PROVIDER_TO_FUNCTION = {
188
213
  "openai": aopenai_request,
189
214
  "openai.azure": aazure_openai_request,
190
215
  "vertexai": avertexai_request,
216
+ "amazon.bedrock": aamazon_bedrock_request,
191
217
  "anthropic.bedrock": aanthropic_bedrock_request,
192
218
  }
193
219
 
@@ -13,6 +13,7 @@ from .blueprint_builder import (
13
13
  from .response_handlers import (
14
14
  aanthropic_stream_completion,
15
15
  aanthropic_stream_message,
16
+ abedrock_stream_message,
16
17
  agoogle_stream_chat,
17
18
  agoogle_stream_completion,
18
19
  amistral_stream_chat,
@@ -20,6 +21,7 @@ from .response_handlers import (
20
21
  anthropic_stream_message,
21
22
  aopenai_stream_chat,
22
23
  aopenai_stream_completion,
24
+ bedrock_stream_message,
23
25
  google_stream_chat,
24
26
  google_stream_completion,
25
27
  mistral_stream_chat,
@@ -45,6 +47,8 @@ __all__ = [
45
47
  "aopenai_stream_completion",
46
48
  "anthropic_stream_completion",
47
49
  "aanthropic_stream_completion",
50
+ "bedrock_stream_message",
51
+ "abedrock_stream_message",
48
52
  "google_stream_chat",
49
53
  "google_stream_completion",
50
54
  "agoogle_stream_chat",
@@ -137,3 +137,38 @@ def build_prompt_blueprint_from_google_event(event, metadata):
137
137
 
138
138
  assistant_message = _build_assistant_message(assistant_content, tool_calls or None, template_format="f-string")
139
139
  return _build_prompt_blueprint(assistant_message, metadata)
140
+
141
+
142
+ def build_prompt_blueprint_from_bedrock_event(result, metadata):
143
+ """
144
+ Build a prompt blueprint from an Amazon Bedrock streaming event.
145
+ """
146
+ assistant_content = []
147
+ tool_calls = []
148
+
149
+ if "contentBlockDelta" in result:
150
+ delta = result["contentBlockDelta"].get("delta", {})
151
+
152
+ if "reasoningContent" in delta:
153
+ reasoning_text = delta["reasoningContent"].get("text", "")
154
+ signature = delta["reasoningContent"].get("signature")
155
+ assistant_content.append(_create_content_item("thinking", thinking=reasoning_text, signature=signature))
156
+
157
+ elif "text" in delta:
158
+ assistant_content.append(_create_content_item("text", text=delta["text"]))
159
+
160
+ elif "toolUse" in delta:
161
+ tool_use = delta["toolUse"]
162
+ assistant_content.append(
163
+ _create_tool_call(tool_use.get("toolUseId", ""), tool_use.get("name", ""), tool_use.get("input", ""))
164
+ )
165
+
166
+ elif "contentBlockStart" in result:
167
+ start_block = result["contentBlockStart"].get("start", {})
168
+
169
+ if "toolUse" in start_block:
170
+ tool_use = start_block["toolUse"]
171
+ tool_calls.append(_create_tool_call(tool_use.get("toolUseId", ""), tool_use.get("name", ""), ""))
172
+
173
+ assistant_message = _build_assistant_message(assistant_content, tool_calls or None)
174
+ return _build_prompt_blueprint(assistant_message, metadata)
@@ -5,6 +5,7 @@ This module contains handlers that process streaming responses from various
5
5
  LLM providers and return both the final response and prompt blueprint.
6
6
  """
7
7
 
8
+ import json
8
9
  from typing import Any, AsyncIterable, List
9
10
 
10
11
 
@@ -548,3 +549,147 @@ async def amistral_stream_chat(generator: AsyncIterable[Any]) -> Any:
548
549
  response.choices[0].message.content = content
549
550
  response.choices[0].message.tool_calls = tool_calls
550
551
  return response
552
+
553
+
554
+ def bedrock_stream_message(results: list):
555
+ """Process Amazon Bedrock streaming message results and return response + blueprint"""
556
+
557
+ response = {"ResponseMetadata": {}, "output": {"message": {}}, "stopReason": "end_turn", "metrics": {}, "usage": {}}
558
+
559
+ content_blocks = []
560
+ current_tool_call = None
561
+ current_tool_input = ""
562
+ current_text = ""
563
+ current_signature = ""
564
+ current_thinking = ""
565
+
566
+ for event in results:
567
+ if "contentBlockStart" in event:
568
+ content_block = event["contentBlockStart"]
569
+ if "start" in content_block and "toolUse" in content_block["start"]:
570
+ tool_use = content_block["start"]["toolUse"]
571
+ current_tool_call = {"toolUse": {"toolUseId": tool_use["toolUseId"], "name": tool_use["name"]}}
572
+ current_tool_input = ""
573
+
574
+ elif "contentBlockDelta" in event:
575
+ delta = event["contentBlockDelta"]["delta"]
576
+ if "text" in delta:
577
+ current_text += delta["text"]
578
+ elif "reasoningContent" in delta:
579
+ reasoning_content = delta["reasoningContent"]
580
+ if "text" in reasoning_content:
581
+ current_thinking += reasoning_content["text"]
582
+ elif "signature" in reasoning_content:
583
+ current_signature += reasoning_content["signature"]
584
+ elif "toolUse" in delta:
585
+ if "input" in delta["toolUse"]:
586
+ input_chunk = delta["toolUse"]["input"]
587
+ current_tool_input += input_chunk
588
+ if not input_chunk.strip():
589
+ continue
590
+
591
+ elif "contentBlockStop" in event:
592
+ if current_tool_call and current_tool_input:
593
+ try:
594
+ current_tool_call["toolUse"]["input"] = json.loads(current_tool_input)
595
+ except json.JSONDecodeError:
596
+ current_tool_call["toolUse"]["input"] = {}
597
+ content_blocks.append(current_tool_call)
598
+ current_tool_call = None
599
+ current_tool_input = ""
600
+ elif current_text:
601
+ content_blocks.append({"text": current_text})
602
+ current_text = ""
603
+ elif current_thinking and current_signature:
604
+ content_blocks.append(
605
+ {
606
+ "reasoningContent": {
607
+ "reasoningText": {"text": current_thinking, "signature": current_signature},
608
+ }
609
+ }
610
+ )
611
+ current_thinking = ""
612
+ current_signature = ""
613
+
614
+ elif "messageStop" in event:
615
+ response["stopReason"] = event["messageStop"]["stopReason"]
616
+
617
+ elif "metadata" in event:
618
+ metadata = event["metadata"]
619
+ response["usage"] = metadata.get("usage", {})
620
+ response["metrics"] = metadata.get("metrics", {})
621
+
622
+ response["output"]["message"] = {"role": "assistant", "content": content_blocks}
623
+ return response
624
+
625
+
626
+ async def abedrock_stream_message(generator: AsyncIterable[Any]) -> Any:
627
+ """Async version of bedrock_stream_message"""
628
+
629
+ response = {"ResponseMetadata": {}, "output": {"message": {}}, "stopReason": "end_turn", "metrics": {}, "usage": {}}
630
+
631
+ content_blocks = []
632
+ current_tool_call = None
633
+ current_tool_input = ""
634
+ current_text = ""
635
+ current_signature = ""
636
+ current_thinking = ""
637
+
638
+ async for event in generator:
639
+ if "contentBlockStart" in event:
640
+ content_block = event["contentBlockStart"]
641
+ if "start" in content_block and "toolUse" in content_block["start"]:
642
+ tool_use = content_block["start"]["toolUse"]
643
+ current_tool_call = {"toolUse": {"toolUseId": tool_use["toolUseId"], "name": tool_use["name"]}}
644
+ current_tool_input = ""
645
+
646
+ elif "contentBlockDelta" in event:
647
+ delta = event["contentBlockDelta"]["delta"]
648
+ if "text" in delta:
649
+ current_text += delta["text"]
650
+ elif "reasoningContent" in delta:
651
+ reasoning_content = delta["reasoningContent"]
652
+ if "text" in reasoning_content:
653
+ current_thinking += reasoning_content["text"]
654
+ elif "signature" in reasoning_content:
655
+ current_signature += reasoning_content["signature"]
656
+ elif "toolUse" in delta:
657
+ if "input" in delta["toolUse"]:
658
+ input_chunk = delta["toolUse"]["input"]
659
+ current_tool_input += input_chunk
660
+ if not input_chunk.strip():
661
+ continue
662
+
663
+ elif "contentBlockStop" in event:
664
+ if current_tool_call and current_tool_input:
665
+ try:
666
+ current_tool_call["toolUse"]["input"] = json.loads(current_tool_input)
667
+ except json.JSONDecodeError:
668
+ current_tool_call["toolUse"]["input"] = {}
669
+ content_blocks.append(current_tool_call)
670
+ current_tool_call = None
671
+ current_tool_input = ""
672
+ elif current_text:
673
+ content_blocks.append({"text": current_text})
674
+ current_text = ""
675
+ elif current_thinking and current_signature:
676
+ content_blocks.append(
677
+ {
678
+ "reasoningContent": {
679
+ "reasoningText": {"text": current_thinking, "signature": current_signature},
680
+ }
681
+ }
682
+ )
683
+ current_thinking = ""
684
+ current_signature = ""
685
+
686
+ elif "messageStop" in event:
687
+ response["stopReason"] = event["messageStop"]["stopReason"]
688
+
689
+ elif "metadata" in event:
690
+ metadata = event["metadata"]
691
+ response["usage"] = metadata.get("usage", {})
692
+ response["metrics"] = metadata.get("metrics", {})
693
+
694
+ response["output"]["message"] = {"role": "assistant", "content": content_blocks}
695
+ return response
@@ -2,6 +2,7 @@ from typing import Any, AsyncGenerator, AsyncIterable, Callable, Dict, Generator
2
2
 
3
3
  from .blueprint_builder import (
4
4
  build_prompt_blueprint_from_anthropic_event,
5
+ build_prompt_blueprint_from_bedrock_event,
5
6
  build_prompt_blueprint_from_google_event,
6
7
  build_prompt_blueprint_from_openai_chunk,
7
8
  )
@@ -18,12 +19,15 @@ def _build_stream_blueprint(result: Any, metadata: Dict) -> Any:
18
19
  elif provider == "google" or (provider == "vertexai" and model_name.startswith("gemini")):
19
20
  return build_prompt_blueprint_from_google_event(result, metadata)
20
21
 
21
- elif provider in ["anthropic", "anthropic.bedrock"] or (provider == "vertexai" and model_name.startswith("claude")):
22
+ elif provider in ("anthropic", "anthropic.bedrock") or (provider == "vertexai" and model_name.startswith("claude")):
22
23
  return build_prompt_blueprint_from_anthropic_event(result, metadata)
23
24
 
24
25
  elif provider == "mistral":
25
26
  return build_prompt_blueprint_from_openai_chunk(result.data, metadata)
26
27
 
28
+ elif provider == "amazon.bedrock":
29
+ return build_prompt_blueprint_from_bedrock_event(result, metadata)
30
+
27
31
  return None
28
32
 
29
33
 
@@ -37,6 +41,11 @@ def _build_stream_data(result: Any, stream_blueprint: Any, request_id: Any = Non
37
41
 
38
42
  def stream_response(*, generator: Generator, after_stream: Callable, map_results: Callable, metadata: Dict):
39
43
  results = []
44
+ provider = metadata.get("model", {}).get("provider", "")
45
+ if provider == "amazon.bedrock":
46
+ response_metadata = generator.get("ResponseMetadata", {})
47
+ generator = generator.get("stream", generator)
48
+
40
49
  for result in generator:
41
50
  results.append(result)
42
51
 
@@ -45,7 +54,12 @@ def stream_response(*, generator: Generator, after_stream: Callable, map_results
45
54
  yield data
46
55
 
47
56
  request_response = map_results(results)
48
- response = after_stream(request_response=request_response.model_dump(mode="json"))
57
+ if provider == "amazon.bedrock":
58
+ request_response["ResponseMetadata"] = response_metadata
59
+ else:
60
+ request_response = request_response.model_dump(mode="json")
61
+
62
+ response = after_stream(request_response=request_response)
49
63
  data["request_id"] = response.get("request_id")
50
64
  data["prompt_blueprint"] = response.get("prompt_blueprint")
51
65
  yield data
@@ -58,6 +72,10 @@ async def astream_response(
58
72
  metadata: Dict[str, Any] = None,
59
73
  ) -> AsyncGenerator[Dict[str, Any], None]:
60
74
  results = []
75
+ provider = metadata.get("model", {}).get("provider", "")
76
+ if provider == "amazon.bedrock":
77
+ response_metadata = generator.get("ResponseMetadata", {})
78
+ generator = generator.get("stream", generator)
61
79
 
62
80
  async for result in generator:
63
81
  results.append(result)
@@ -71,7 +89,13 @@ async def astream_response(
71
89
  yield item
72
90
 
73
91
  request_response = await map_results(async_generator_from_list(results))
74
- after_stream_response = await after_stream(request_response=request_response.model_dump(mode="json"))
92
+
93
+ if provider == "amazon.bedrock":
94
+ request_response["ResponseMetadata"] = response_metadata
95
+ else:
96
+ request_response = request_response.model_dump(mode="json")
97
+
98
+ after_stream_response = await after_stream(request_response=request_response)
75
99
  data["request_id"] = after_stream_response.get("request_id")
76
100
  data["prompt_blueprint"] = after_stream_response.get("prompt_blueprint")
77
101
  yield data
promptlayer/utils.py CHANGED
@@ -1544,6 +1544,51 @@ async def avertexai_request(prompt_blueprint: GetPromptTemplateResponse, client_
1544
1544
  )
1545
1545
 
1546
1546
 
1547
+ def amazon_bedrock_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1548
+ import boto3
1549
+
1550
+ bedrock_client = boto3.client(
1551
+ "bedrock-runtime",
1552
+ aws_access_key_id=function_kwargs.pop("aws_access_key", None),
1553
+ aws_secret_access_key=function_kwargs.pop("aws_secret_key", None),
1554
+ region_name=function_kwargs.pop("aws_region", "us-east-1"),
1555
+ )
1556
+
1557
+ stream = function_kwargs.pop("stream", False)
1558
+
1559
+ if stream:
1560
+ return bedrock_client.converse_stream(**function_kwargs)
1561
+ else:
1562
+ return bedrock_client.converse(**function_kwargs)
1563
+
1564
+
1565
+ async def aamazon_bedrock_request(
1566
+ prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict
1567
+ ):
1568
+ import aioboto3
1569
+
1570
+ aws_access_key = function_kwargs.pop("aws_access_key", None)
1571
+ aws_secret_key = function_kwargs.pop("aws_secret_key", None)
1572
+ aws_region = function_kwargs.pop("aws_region", "us-east-1")
1573
+
1574
+ session_kwargs = {}
1575
+ if aws_access_key:
1576
+ session_kwargs["aws_access_key_id"] = aws_access_key
1577
+ if aws_secret_key:
1578
+ session_kwargs["aws_secret_access_key"] = aws_secret_key
1579
+ if aws_region:
1580
+ session_kwargs["region_name"] = aws_region
1581
+
1582
+ stream = function_kwargs.pop("stream", False)
1583
+ session = aioboto3.Session()
1584
+
1585
+ async with session.client("bedrock-runtime", **session_kwargs) as client:
1586
+ if stream:
1587
+ return await client.converse_stream(**function_kwargs)
1588
+ else:
1589
+ return await client.converse(**function_kwargs)
1590
+
1591
+
1547
1592
  def anthropic_bedrock_request(prompt_blueprint: GetPromptTemplateResponse, client_kwargs: dict, function_kwargs: dict):
1548
1593
  from anthropic import AnthropicBedrock
1549
1594
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: promptlayer
3
- Version: 1.0.64
3
+ Version: 1.0.65
4
4
  Summary: PromptLayer is a platform for prompt engineering and tracks your LLM requests.
5
5
  License: Apache-2.0
6
6
  Author: Magniv
@@ -0,0 +1,22 @@
1
+ promptlayer/__init__.py,sha256=P1Cf4whOUCaydPccPLRuj9UmqbylJsnGjuDyiRa4l38,140
2
+ promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
+ promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
+ promptlayer/promptlayer.py,sha256=GY4dEID1bsazlcft1ECuNx0tIFqX5dLxMMgsec8bXt8,22816
5
+ promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
6
+ promptlayer/promptlayer_mixins.py,sha256=xgDwpr8D4Hl_n0OidlzeEWZtHfrhRQDIDx6a_z5Iy48,13930
7
+ promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
8
+ promptlayer/streaming/__init__.py,sha256=s0VFWaaDrQD3oFbJLytKlmiPsDDPlgTSqNjRbFj8kBI,1641
9
+ promptlayer/streaming/blueprint_builder.py,sha256=kYo8hby2eooCcPT2rXygu0Cj2iIp-_TqTZ1IGbF8moE,7337
10
+ promptlayer/streaming/response_handlers.py,sha256=1LYnBOjcbw1Wgvz4s5kLOYVY2qQmDpmnAo2GK6glocE,25110
11
+ promptlayer/streaming/stream_processor.py,sha256=AJfzINN6feuf5dhCpdKfk3MZ1n7KeXnopMZ5c97LjBg,3752
12
+ promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
13
+ promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
14
+ promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
15
+ promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
16
+ promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
17
+ promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
18
+ promptlayer/utils.py,sha256=FzW_mTr0nyPDHoYG9XBCUE6Ch04ga4qxmEfwUNAZfpo,59585
19
+ promptlayer-1.0.65.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
+ promptlayer-1.0.65.dist-info/METADATA,sha256=BlyHiHS5AYa-1tHL-R1aIgSfiUJcaXdaohE4OIccFqE,4819
21
+ promptlayer-1.0.65.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
22
+ promptlayer-1.0.65.dist-info/RECORD,,
@@ -1,22 +0,0 @@
1
- promptlayer/__init__.py,sha256=espGWsb2ruTQuV58LwrySNcRa1K-qfY8VACy73H1OHI,140
2
- promptlayer/groups/__init__.py,sha256=xhOAolLUBkr76ZHvJr29OwjCIk1V9qKQXjZCuyTJUIY,429
3
- promptlayer/groups/groups.py,sha256=YPROicy-TzpkrpA8vOpZS2lwvJ6VRtlbQ1S2oT1N0vM,338
4
- promptlayer/promptlayer.py,sha256=MVEcQUQoGopyfB5LIo4LACeg_AN-86J0MPTLS8NJFhc,22516
5
- promptlayer/promptlayer_base.py,sha256=jOgXzNZlV1LKOOsXSSAOgn8o4hXn_EV0oY9Nf3Bsu_s,6872
6
- promptlayer/promptlayer_mixins.py,sha256=nHRV-KFI6dHiEX-lr554Q7f1DeTDlzmJsIjbyQRWUAM,13059
7
- promptlayer/span_exporter.py,sha256=Pc1-zWAcjVCSykh-4rYPqiEZvzkG9xaYLVoHFY_TWaQ,2410
8
- promptlayer/streaming/__init__.py,sha256=yNO77fyOi_scNPbE-eIEDGwSOyp8WYyPZ7ZrHaoipmM,1523
9
- promptlayer/streaming/blueprint_builder.py,sha256=NLmqwspHoAsecrY7varbF4EQaUg5yBKfBxS4y7UycuU,5925
10
- promptlayer/streaming/response_handlers.py,sha256=vNvpP-RLVl2uHkKLc8Ci9bmNldCezRey40tgtBEd4bo,19005
11
- promptlayer/streaming/stream_processor.py,sha256=vB9pB25bd0vG-Pl2UYqB8Ae6b9iKSH-WCB9S-SNOSAU,2836
12
- promptlayer/templates.py,sha256=7ObDPMzHXjttDdJdCXA_pDL9XAnmcujIWucmgZJcOC8,1179
13
- promptlayer/track/__init__.py,sha256=tyweLTAY7UpYpBHWwY-T3pOPDIlGjcgccYXqU_r0694,1710
14
- promptlayer/track/track.py,sha256=A-awcYwsSwxktrlCMchy8NITIquwxU1UXbgLZMwqrA0,3164
15
- promptlayer/types/__init__.py,sha256=xJcvQuOk91ZBBePb40-1FDNDKYrZoH5lPE2q6_UhprM,111
16
- promptlayer/types/prompt_template.py,sha256=blkVBhh4u5pMhgX_Dsn78sN7Rv2Vy_zhd1-NERLXTpM,5075
17
- promptlayer/types/request_log.py,sha256=xU6bcxQar6GaBOJlgZTavXUV3FjE8sF_nSjPu4Ya_00,174
18
- promptlayer/utils.py,sha256=DNhX6ydXXIBDKI9c0d_Jke2l-ZjpdvIpWqXM7ebjt20,58029
19
- promptlayer-1.0.64.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
20
- promptlayer-1.0.64.dist-info/METADATA,sha256=dVMp07jg6DLiXm7U0kegC7K3IgDDcFbExo-qCJmXrdc,4819
21
- promptlayer-1.0.64.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
22
- promptlayer-1.0.64.dist-info/RECORD,,