langtrace-python-sdk 3.8.5__py3-none-any.whl → 3.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,8 @@ from langtrace_python_sdk.instrumentation.openai.patch import (
24
24
  async_embeddings_create,
25
25
  async_images_generate,
26
26
  chat_completions_create,
27
+ openai_responses_create,
28
+ async_openai_responses_create,
27
29
  embeddings_create,
28
30
  images_edit,
29
31
  images_generate,
@@ -32,7 +34,7 @@ from langtrace_python_sdk.instrumentation.openai.patch import (
32
34
  logging.basicConfig(level=logging.FATAL)
33
35
 
34
36
 
35
- class OpenAIInstrumentation(BaseInstrumentor): # type: ignore
37
+ class OpenAIInstrumentation(BaseInstrumentor): # type: ignore
36
38
 
37
39
  def instrumentation_dependencies(self) -> Collection[str]:
38
40
  return ["openai >= 0.27.0", "trace-attributes >= 4.0.5"]
@@ -54,6 +56,18 @@ class OpenAIInstrumentation(BaseInstrumentor): # type: ignore
54
56
  async_chat_completions_create(version, tracer),
55
57
  )
56
58
 
59
+ wrap_function_wrapper(
60
+ "openai.resources.responses",
61
+ "AsyncResponses.create",
62
+ async_openai_responses_create(version, tracer),
63
+ )
64
+
65
+ wrap_function_wrapper(
66
+ "openai.resources.responses",
67
+ "Responses.create",
68
+ openai_responses_create(version, tracer),
69
+ )
70
+
57
71
  wrap_function_wrapper(
58
72
  "openai.resources.images",
59
73
  "Images.generate",
@@ -7,27 +7,121 @@ from opentelemetry.trace import Span, SpanKind, Tracer
7
7
  from opentelemetry.trace.propagation import set_span_in_context
8
8
  from opentelemetry.trace.status import Status, StatusCode
9
9
 
10
- from langtrace_python_sdk.constants.instrumentation.common import \
11
- SERVICE_PROVIDERS
10
+ from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
12
11
  from langtrace_python_sdk.constants.instrumentation.openai import APIS
13
12
  from langtrace_python_sdk.instrumentation.openai.types import (
14
- ChatCompletionsCreateKwargs, ContentItem, EmbeddingsCreateKwargs,
15
- ImagesEditKwargs, ImagesGenerateKwargs, ResultType)
13
+ ChatCompletionsCreateKwargs,
14
+ ContentItem,
15
+ EmbeddingsCreateKwargs,
16
+ ImagesEditKwargs,
17
+ ImagesGenerateKwargs,
18
+ ResultType,
19
+ )
16
20
  from langtrace_python_sdk.types import NOT_GIVEN
17
21
  from langtrace_python_sdk.utils import set_span_attribute
18
- from langtrace_python_sdk.utils.llm import (StreamWrapper,
19
- calculate_prompt_tokens,
20
- get_base_url, get_extra_attributes,
21
- get_langtrace_attributes,
22
- get_llm_request_attributes,
23
- get_llm_url, get_span_name,
24
- get_tool_calls, is_streaming,
25
- set_event_completion,
26
- set_span_attributes,
27
- set_usage_attributes)
22
+ from langtrace_python_sdk.utils.llm import (
23
+ StreamWrapper,
24
+ calculate_prompt_tokens,
25
+ get_base_url,
26
+ get_extra_attributes,
27
+ get_langtrace_attributes,
28
+ get_llm_request_attributes,
29
+ get_llm_url,
30
+ get_span_name,
31
+ get_tool_calls,
32
+ is_streaming,
33
+ set_event_completion,
34
+ set_span_attributes,
35
+ set_usage_attributes,
36
+ )
28
37
  from langtrace_python_sdk.utils.silently_fail import silently_fail
29
38
 
30
39
 
40
+ def async_openai_responses_create(version: str, tracer: Tracer) -> Callable:
41
+ """Wrap the `create` method of the `openai.AsyncResponse.create` class to trace it."""
42
+
43
+ async def traced_method(
44
+ wrapped: Callable, instance: Any, args: List[Any], kwargs: Dict[str, Any]
45
+ ):
46
+ input_value = kwargs.get("input")
47
+ prompt = (
48
+ input_value[0]
49
+ if isinstance(input_value, list)
50
+ else [{"role": "user", "content": input_value}]
51
+ )
52
+ service_provider = SERVICE_PROVIDERS["OPENAI"]
53
+ span_attributes = {
54
+ "instructions": kwargs.get("instructions"),
55
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
56
+ **get_llm_request_attributes(
57
+ kwargs,
58
+ operation_name="openai.responses.create",
59
+ prompts=prompt,
60
+ ),
61
+ }
62
+ with tracer.start_as_current_span(
63
+ name="openai.responses.create",
64
+ kind=SpanKind.CLIENT,
65
+ context=set_span_in_context(trace.get_current_span()),
66
+ ) as span:
67
+ try:
68
+ set_span_attributes(span, span_attributes)
69
+
70
+ response = await wrapped(*args, **kwargs)
71
+ _set_openai_agentic_response_attributes(span, response)
72
+
73
+ return response
74
+ except Exception as err:
75
+ span.record_exception(err)
76
+ raise
77
+
78
+ return traced_method
79
+
80
+
81
+ def openai_responses_create(version: str, tracer: Tracer) -> Callable:
82
+ """Wrap the `create` method of the `openai.responses.create` class to trace it."""
83
+
84
+ def traced_method(
85
+ wrapped: Callable, instance: Any, args: List[Any], kwargs: Dict[str, Any]
86
+ ):
87
+ input_value = kwargs.get("input")
88
+ prompt = (
89
+ input_value[0]
90
+ if isinstance(input_value, list)
91
+ else [{"role": "user", "content": input_value}]
92
+ )
93
+ service_provider = SERVICE_PROVIDERS["OPENAI"]
94
+ span_attributes = {
95
+ "instructions": kwargs.get("instructions"),
96
+ **get_langtrace_attributes(version, service_provider, vendor_type="llm"),
97
+ **get_llm_request_attributes(
98
+ kwargs,
99
+ operation_name="openai.responses.create",
100
+ prompts=prompt,
101
+ ),
102
+ }
103
+ with tracer.start_as_current_span(
104
+ name="openai.responses.create",
105
+ kind=SpanKind.CLIENT,
106
+ context=set_span_in_context(trace.get_current_span()),
107
+ end_on_exit=False,
108
+ ) as span:
109
+ try:
110
+ set_span_attributes(span, span_attributes)
111
+
112
+ response = wrapped(*args, **kwargs)
113
+ if is_streaming(kwargs) and span.is_recording():
114
+ return StreamWrapper(response, span)
115
+ else:
116
+ _set_openai_agentic_response_attributes(span, response)
117
+ return response
118
+ except Exception as err:
119
+ span.record_exception(err)
120
+ raise
121
+
122
+ return traced_method
123
+
124
+
31
125
  def filter_valid_attributes(attributes):
32
126
  """Filter attributes where value is not None, not an empty string, and not openai.NOT_GIVEN."""
33
127
  return {
@@ -634,6 +728,21 @@ def extract_content(choice: Any) -> Union[str, List[Dict[str, Any]], Dict[str, A
634
728
  return ""
635
729
 
636
730
 
731
+ def _set_openai_agentic_response_attributes(span: Span, response) -> None:
732
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_ID, response.id)
733
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.model)
734
+ set_event_completion(span, [{"role": "assistant", "content": response.output_text}])
735
+ set_usage_attributes(
736
+ span,
737
+ {
738
+ "input_tokens": response.usage.input_tokens,
739
+ "output_tokens": response.usage.output_tokens,
740
+ "total_tokens": response.usage.total_tokens,
741
+ "cached_tokens": response.usage.input_tokens_details["cached_tokens"],
742
+ },
743
+ )
744
+
745
+
637
746
  @silently_fail
638
747
  def _set_input_attributes(
639
748
  span: Span, kwargs: ChatCompletionsCreateKwargs, attributes: LLMSpanAttributes
@@ -707,5 +816,9 @@ def _set_response_attributes(span: Span, result: ResultType) -> None:
707
816
  set_span_attribute(
708
817
  span,
709
818
  "gen_ai.usage.cached_tokens",
710
- result.usage.prompt_tokens_details.cached_tokens if result.usage.prompt_tokens_details else 0,
819
+ (
820
+ result.usage.prompt_tokens_details.cached_tokens
821
+ if result.usage.prompt_tokens_details
822
+ else 0
823
+ ),
711
824
  )
@@ -1,9 +1,6 @@
1
1
  import json
2
2
  from typing import Any, Callable, List
3
3
 
4
- from agents.exceptions import (InputGuardrailTripwireTriggered,
5
- OutputGuardrailTripwireTriggered)
6
- from agents.run import Runner
7
4
  from importlib_metadata import version as v
8
5
  from langtrace.trace_attributes import FrameworkSpanAttributes, SpanAttributes
9
6
  from opentelemetry import baggage, trace
@@ -18,6 +15,29 @@ from langtrace_python_sdk.utils.llm import (set_event_completion,
18
15
  set_usage_attributes)
19
16
 
20
17
 
18
+ # Define dummy classes to use when imports fail
19
+ class DummyRunner:
20
+ pass
21
+
22
+
23
+ class DummyException(Exception):
24
+ pass
25
+
26
+
27
+ # Try importing from openai-agents package
28
+ try:
29
+ from agents.exceptions import (InputGuardrailTripwireTriggered,
30
+ OutputGuardrailTripwireTriggered)
31
+ from agents.run import Runner
32
+ OPENAI_AGENTS_AVAILABLE = True
33
+ except ImportError:
34
+ # Define dummy classes if imports fail
35
+ InputGuardrailTripwireTriggered = DummyException
36
+ OutputGuardrailTripwireTriggered = DummyException
37
+ Runner = DummyRunner
38
+ OPENAI_AGENTS_AVAILABLE = False
39
+
40
+
21
41
  def extract_agent_details(agent_or_handoff):
22
42
  """Extract relevant details from an agent/handoff and its handoffs."""
23
43
  try:
@@ -70,6 +90,10 @@ def extract_handoff_details(handoff):
70
90
 
71
91
  def get_handoffs(version: str, tracer: Tracer) -> Callable:
72
92
  """Wrap the `prompt` method of the `TLM` class to trace it."""
93
+ if not OPENAI_AGENTS_AVAILABLE:
94
+ def noop_traced_method(wrapped: Callable, instance: Any, args: List[Any], kwargs: Any) -> Any:
95
+ return wrapped(*args, **kwargs)
96
+ return noop_traced_method
73
97
 
74
98
  def traced_method(
75
99
  wrapped: Callable,
@@ -117,7 +141,8 @@ def get_handoffs(version: str, tracer: Tracer) -> Callable:
117
141
  attributes = FrameworkSpanAttributes(**span_attributes)
118
142
 
119
143
  with tracer.start_as_current_span(
120
- name=f"openai_agents.available_handoffs", kind=SpanKind.CLIENT
144
+ name="openai_agents.available_handoffs",
145
+ kind=SpanKind.CLIENT
121
146
  ) as span:
122
147
  try:
123
148
  set_span_attributes(span, attributes)
@@ -157,12 +182,11 @@ def get_handoffs(version: str, tracer: Tracer) -> Callable:
157
182
  pass # Silently fail if error recording fails
158
183
  raise # Re-raise the original error since it's from the wrapped function
159
184
 
160
- except Exception as outer_err:
161
- # If anything fails in our instrumentation wrapper, catch it and return control to the wrapped function
185
+ except Exception:
162
186
  try:
163
187
  return wrapped(*args, **kwargs)
164
188
  except Exception as wrapped_err:
165
- raise wrapped_err # Only raise errors from the wrapped function
189
+ raise wrapped_err
166
190
 
167
191
  return traced_method
168
192
 
@@ -328,6 +352,10 @@ def extract_run_config(config):
328
352
 
329
353
  def get_new_response(version: str, tracer: Tracer) -> Callable:
330
354
  """Wrap the _get_new_response method to trace inputs and outputs."""
355
+ if not OPENAI_AGENTS_AVAILABLE:
356
+ async def noop_traced_method(wrapped: Callable, instance: Any, args: List[Any], kwargs: Any) -> Any:
357
+ return await wrapped(*args, **kwargs)
358
+ return noop_traced_method
331
359
 
332
360
  async def traced_method(
333
361
  wrapped: Callable,
@@ -524,7 +552,7 @@ def get_new_response(version: str, tracer: Tracer) -> Callable:
524
552
 
525
553
  raise
526
554
 
527
- except Exception as outer_err:
555
+ except Exception: # Remove outer_err since it's unused
528
556
  try:
529
557
  return await wrapped(*args, **kwargs)
530
558
  except Exception as wrapped_err:
@@ -96,22 +96,22 @@ def calculate_price_from_usage(model, usage):
96
96
 
97
97
  def convert_mistral_messages_to_serializable(mistral_messages):
98
98
  serializable_messages = []
99
-
99
+
100
100
  try:
101
101
  for message in mistral_messages:
102
102
  serializable_message = {"role": message.role}
103
-
103
+
104
104
  # Handle content
105
105
  if hasattr(message, "content"):
106
106
  serializable_message["content"] = message.content
107
-
107
+
108
108
  # Handle tool_calls
109
109
  if hasattr(message, "tool_calls") and message.tool_calls is not None:
110
110
  serializable_tool_calls = []
111
-
111
+
112
112
  for tool_call in message.tool_calls:
113
113
  serializable_tool_call = {}
114
-
114
+
115
115
  # Handle id, type, and index
116
116
  if hasattr(tool_call, "id"):
117
117
  serializable_tool_call["id"] = tool_call.id
@@ -119,111 +119,117 @@ def convert_mistral_messages_to_serializable(mistral_messages):
119
119
  serializable_tool_call["type"] = tool_call.type
120
120
  if hasattr(tool_call, "index"):
121
121
  serializable_tool_call["index"] = tool_call.index
122
-
122
+
123
123
  # Handle function
124
124
  if hasattr(tool_call, "function"):
125
125
  function_call = tool_call.function
126
126
  serializable_function = {}
127
-
127
+
128
128
  if hasattr(function_call, "name"):
129
129
  serializable_function["name"] = function_call.name
130
130
  if hasattr(function_call, "arguments"):
131
131
  serializable_function["arguments"] = function_call.arguments
132
-
132
+
133
133
  serializable_tool_call["function"] = serializable_function
134
-
134
+
135
135
  serializable_tool_calls.append(serializable_tool_call)
136
-
136
+
137
137
  serializable_message["tool_calls"] = serializable_tool_calls
138
-
138
+
139
139
  # Handle tool_call_id for tool messages
140
140
  if hasattr(message, "tool_call_id"):
141
141
  serializable_message["tool_call_id"] = message.tool_call_id
142
-
142
+
143
143
  serializable_messages.append(serializable_message)
144
144
  except Exception as e:
145
145
  pass
146
-
146
+
147
147
  return serializable_messages
148
148
 
149
149
 
150
150
  def convert_gemini_messages_to_serializable(formatted_messages, system_message=None):
151
151
  """
152
152
  Converts Gemini-formatted messages back to a JSON serializable format.
153
-
153
+
154
154
  Args:
155
155
  formatted_messages: The formatted messages from Gemini.
156
156
  system_message (str, optional): System message content.
157
-
157
+
158
158
  Returns:
159
159
  List[dict]: JSON serializable list of message dictionaries.
160
160
  """
161
161
  serializable_messages = []
162
-
162
+
163
163
  try:
164
164
  # Add system message if present
165
165
  if system_message:
166
- serializable_messages.append({
167
- "role": "system",
168
- "content": system_message
169
- })
170
-
166
+ serializable_messages.append({"role": "system", "content": system_message})
167
+
171
168
  for message_item in formatted_messages:
172
169
  # Handle the case where the item is a dict with 'role' and 'content' keys
173
- if isinstance(message_item, dict) and 'role' in message_item and 'content' in message_item:
174
- role = message_item['role']
175
- content_value = message_item['content']
176
-
170
+ if (
171
+ isinstance(message_item, dict)
172
+ and "role" in message_item
173
+ and "content" in message_item
174
+ ):
175
+ role = message_item["role"]
176
+ content_value = message_item["content"]
177
+
177
178
  # Initialize our serializable message
178
179
  serializable_message = {"role": role}
179
-
180
+
180
181
  # If content is a list of Content objects
181
182
  if isinstance(content_value, list) and len(content_value) > 0:
182
183
  for content_obj in content_value:
183
184
  # Process each Content object
184
- if hasattr(content_obj, 'parts') and hasattr(content_obj, 'role'):
185
+ if hasattr(content_obj, "parts") and hasattr(
186
+ content_obj, "role"
187
+ ):
185
188
  parts = content_obj.parts
186
-
189
+
187
190
  # Extract text from parts
188
191
  text_parts = []
189
192
  for part in parts:
190
- if hasattr(part, 'text') and part.text:
193
+ if hasattr(part, "text") and part.text:
191
194
  text_parts.append(part.text)
192
-
195
+
193
196
  if text_parts:
194
197
  serializable_message["content"] = " ".join(text_parts)
195
-
198
+
196
199
  # Here you can add additional processing for other part types
197
200
  # like function_call, function_response, inline_data, etc.
198
201
  # Similar to the previous implementation
199
-
202
+
200
203
  # If content is a string or already a primitive type
201
- elif isinstance(content_value, (str, int, float, bool)) or content_value is None:
204
+ elif (
205
+ isinstance(content_value, (str, int, float, bool))
206
+ or content_value is None
207
+ ):
202
208
  serializable_message["content"] = content_value
203
-
209
+
204
210
  # Add the processed message to our list
205
211
  serializable_messages.append(serializable_message)
206
-
212
+
207
213
  # Handle the case where the item is a Content object directly
208
- elif hasattr(message_item, 'role') and hasattr(message_item, 'parts'):
214
+ elif hasattr(message_item, "role") and hasattr(message_item, "parts"):
209
215
  # This is the case from the previous implementation
210
216
  # Process a Content object directly
211
217
  serializable_message = {"role": message_item.role}
212
-
218
+
213
219
  parts = message_item.parts
214
220
  text_parts = []
215
-
221
+
216
222
  for part in parts:
217
- if hasattr(part, 'text') and part.text:
223
+ if hasattr(part, "text") and part.text:
218
224
  text_parts.append(part.text)
219
-
225
+
220
226
  if text_parts:
221
227
  serializable_message["content"] = " ".join(text_parts)
222
-
228
+
223
229
  serializable_messages.append(serializable_message)
224
230
  except Exception as e:
225
231
  pass
226
-
232
+
227
233
  return serializable_messages
228
234
 
229
235
 
@@ -253,7 +259,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
253
259
  or kwargs.get("top_k", None)
254
260
  or kwargs.get("top_n", None)
255
261
  )
256
-
262
+
257
263
  try:
258
264
  prompts = json.dumps(prompts) if prompts else None
259
265
  except Exception as e:
@@ -261,9 +267,13 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
261
267
  # check model
262
268
  if kwargs.get("model") is not None:
263
269
  if kwargs.get("model").startswith("gemini"):
264
- prompts = json.dumps(convert_gemini_messages_to_serializable(prompts))
270
+ prompts = json.dumps(
271
+ convert_gemini_messages_to_serializable(prompts)
272
+ )
265
273
  elif kwargs.get("model").startswith("mistral"):
266
- prompts = json.dumps(convert_mistral_messages_to_serializable(prompts))
274
+ prompts = json.dumps(
275
+ convert_mistral_messages_to_serializable(prompts)
276
+ )
267
277
  else:
268
278
  prompts = "[]"
269
279
  else:
@@ -427,6 +437,7 @@ class StreamWrapper:
427
437
  "".join(self.result_content), response_model
428
438
  )
429
439
  if self._span_started:
440
+ print("SPAAN", self.span)
430
441
  set_span_attribute(
431
442
  self.span,
432
443
  SpanAttributes.LLM_RESPONSE_MODEL,
@@ -570,6 +581,9 @@ class StreamWrapper:
570
581
  and not hasattr(chunk.delta, "message")
571
582
  ):
572
583
  content = [chunk.delta.text] if hasattr(chunk.delta, "text") else []
584
+ # OpenAI Responses API
585
+ if hasattr(chunk, "type") and chunk.type == "response.completed":
586
+ content = [chunk.response.output_text]
573
587
 
574
588
  if isinstance(chunk, dict):
575
589
  if "message" in chunk:
@@ -579,7 +593,11 @@ class StreamWrapper:
579
593
  self.result_content.append(content[0])
580
594
 
581
595
  def set_usage_attributes(self, chunk):
582
-
596
+ # Responses API OpenAI
597
+ if hasattr(chunk, "type") and chunk.type == "response.completed":
598
+ usage = chunk.response.usage
599
+ self.completion_tokens = usage.output_tokens
600
+ self.prompt_tokens = usage.input_tokens
583
601
  # Anthropic & OpenAI
584
602
  if hasattr(chunk, "type") and chunk.type == "message_start":
585
603
  if hasattr(chunk.message, "usage") and chunk.message.usage is not None:
@@ -630,6 +648,7 @@ class StreamWrapper:
630
648
  and chunk.data.choices is not None
631
649
  ):
632
650
  chunk = chunk.data
651
+
633
652
  self.set_response_model(chunk=chunk)
634
653
  self.build_streaming_response(chunk=chunk)
635
654
  self.set_usage_attributes(chunk=chunk)
@@ -1 +1 @@
1
- __version__ = "3.8.5"
1
+ __version__ = "3.8.7"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langtrace-python-sdk
3
- Version: 3.8.5
3
+ Version: 3.8.7
4
4
  Summary: Python SDK for LangTrace
5
5
  Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
6
6
  Author-email: Scale3 Labs <engineering@scale3labs.com>
@@ -116,7 +116,7 @@ examples/weaviate_example/__init__.py,sha256=8JMDBsRSEV10HfTd-YC7xb4txBjD3la56sn
116
116
  examples/weaviate_example/query_text.py,sha256=wPHQTc_58kPoKTZMygVjTj-2ZcdrIuaausJfMxNQnQc,127162
117
117
  langtrace_python_sdk/__init__.py,sha256=VZM6i71NR7pBQK6XvJWRelknuTYUhqwqE7PlicKa5Wg,1166
118
118
  langtrace_python_sdk/langtrace.py,sha256=T-DsDrwWaL4gAUK1lkTRRpmvoO7F2WtO5hQZdyrVAxE,13791
119
- langtrace_python_sdk/version.py,sha256=mb3dZLLIE3dKNa7hv1kuERgx4o1UEUlj7DsxZRc2A38,22
119
+ langtrace_python_sdk/version.py,sha256=zMoy0NhWOOCYaBDKkUvjVGd7u0NZBhM_7SModsbbTxQ,22
120
120
  langtrace_python_sdk/constants/__init__.py,sha256=3CNYkWMdd1DrkGqzLUgNZXjdAlM6UFMlf_F-odAToyc,146
121
121
  langtrace_python_sdk/constants/exporter/langtrace_exporter.py,sha256=EVCrouYCpY98f0KSaKr4PzNxPULTZZO6dSA_crEOyJU,106
122
122
  langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -222,12 +222,12 @@ langtrace_python_sdk/instrumentation/ollama/__init__.py,sha256=g2zJsXnDHinXPzTc-
222
222
  langtrace_python_sdk/instrumentation/ollama/instrumentation.py,sha256=jdsvkqUJAAUNLVPtAkn_rG26HXetVQXWtjn4a6eWZro,2029
223
223
  langtrace_python_sdk/instrumentation/ollama/patch.py,sha256=w99r9wCCVDdJnZQEezYE2lW_iNFEtrldt9vq3ISAsag,5375
224
224
  langtrace_python_sdk/instrumentation/openai/__init__.py,sha256=VPHRNCQEdkizIVP2d0Uw_a7t8XOTSTprEIB8oboJFbs,95
225
- langtrace_python_sdk/instrumentation/openai/instrumentation.py,sha256=PZxI0qfoud1VsKGmJu49YDp0Z9z9TzCR8qxR3uznOMA,2810
226
- langtrace_python_sdk/instrumentation/openai/patch.py,sha256=mRE150sHGL2dq4oOKTYWek1vup7HuRW_sqP6nNOd3N8,27862
225
+ langtrace_python_sdk/instrumentation/openai/instrumentation.py,sha256=kvbRr5DrcP23ka4mLZpwr7W0ZxeTGHilkOlc_h5V5s8,3226
226
+ langtrace_python_sdk/instrumentation/openai/patch.py,sha256=I_Gzp8ULbOQzlG9PltrembOiBvfp0FQHDRwR5q5tmZg,31358
227
227
  langtrace_python_sdk/instrumentation/openai/types.py,sha256=aVkoa7tmAbYfyOhnyMrDaVjQuwhmRNLMthlNtKMtWX8,4311
228
228
  langtrace_python_sdk/instrumentation/openai_agents/__init__.py,sha256=ElRfFIQYXD2-eRyL3fZnjIsDJLTrDolh5cZHPnZv0q8,107
229
229
  langtrace_python_sdk/instrumentation/openai_agents/instrumentation.py,sha256=6M4FHXfem7pazrNgsimebrEfMb2FxI8lHrdEMbVf75Y,1860
230
- langtrace_python_sdk/instrumentation/openai_agents/patch.py,sha256=zXuIjqnPvWNEu8nCDTRxxvTAi8PwAOsBJEtsFn3PXaI,24012
230
+ langtrace_python_sdk/instrumentation/openai_agents/patch.py,sha256=-53a317SCmwnI5s1vgZZBs3RkmRpVjwaTQSZKiwR5Vs,24772
231
231
  langtrace_python_sdk/instrumentation/phidata/__init__.py,sha256=q2v6luvqp9gFf1AJX6YrBvuyMC_q6cEnB5syl2HNPlU,97
232
232
  langtrace_python_sdk/instrumentation/phidata/instrumentation.py,sha256=S639XMVOGmpIK9jug9NWrUBLqs1G5ywBZiIhVuCkwGk,2697
233
233
  langtrace_python_sdk/instrumentation/phidata/patch.py,sha256=-Jf_20wLLRGRM6sY3RreS-ocXjdq5m33-gxNtl_eUdQ,12133
@@ -249,7 +249,7 @@ langtrace_python_sdk/instrumentation/weaviate/patch.py,sha256=Lqixz32uAvDA2VLU3z
249
249
  langtrace_python_sdk/types/__init__.py,sha256=SJSJzkgPjGGTVJXUZ_FyR3p9DJ5kWGx7iAnJfY4ZYHU,4669
250
250
  langtrace_python_sdk/utils/__init__.py,sha256=VVDOG-QLd59ZvSHp0avjof0sbxlZ1QQOf0KoOF7ofhQ,3310
251
251
  langtrace_python_sdk/utils/langtrace_sampler.py,sha256=BupNndHbU9IL_wGleKetz8FdcveqHMBVz1bfKTTW80w,1753
252
- langtrace_python_sdk/utils/llm.py,sha256=giJU33LvMPaRjPAjUwBCehgHj_ei1HwM7gLJSVWYLnI,23238
252
+ langtrace_python_sdk/utils/llm.py,sha256=UGfCfOcQ6NiGKsA1_aJt9Pt7yi4iGKq26TJQroOwktg,23463
253
253
  langtrace_python_sdk/utils/misc.py,sha256=LaQr5LOmZMiuwVdjYh7aIu6o2C_Xb1wgpQGNOVmRzfE,1918
254
254
  langtrace_python_sdk/utils/prompt_registry.py,sha256=n5dQMVLBw8aJZY8Utvf67bncc25ELf6AH9BYw8_hSzo,2619
255
255
  langtrace_python_sdk/utils/sdk_version_checker.py,sha256=F-VVVH7Fmhr5LcY0IIe-34zIi5RQcx26uuxFpPzZesM,1782
@@ -300,8 +300,8 @@ tests/pinecone/cassettes/test_query.yaml,sha256=b5v9G3ssUy00oG63PlFUR3JErF2Js-5A
300
300
  tests/pinecone/cassettes/test_upsert.yaml,sha256=neWmQ1v3d03V8WoLl8FoFeeCYImb8pxlJBWnFd_lITU,38607
301
301
  tests/qdrant/conftest.py,sha256=9n0uHxxIjWk9fbYc4bx-uP8lSAgLBVx-cV9UjnsyCHM,381
302
302
  tests/qdrant/test_qdrant.py,sha256=pzjAjVY2kmsmGfrI2Gs2xrolfuaNHz7l1fqGQCjp5_o,3353
303
- langtrace_python_sdk-3.8.5.dist-info/METADATA,sha256=oM9Ya7m7DDYBDT1VtPe3uguAKkvdJImtz2WGgSxmLbo,15844
304
- langtrace_python_sdk-3.8.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
305
- langtrace_python_sdk-3.8.5.dist-info/entry_points.txt,sha256=1_b9-qvf2fE7uQNZcbUei9vLpFZBbbh9LrtGw95ssAo,70
306
- langtrace_python_sdk-3.8.5.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
307
- langtrace_python_sdk-3.8.5.dist-info/RECORD,,
303
+ langtrace_python_sdk-3.8.7.dist-info/METADATA,sha256=au8c2l_qdpKK8lgICNgotmeqr6K6mD4r_nhOS5X9pJo,15844
304
+ langtrace_python_sdk-3.8.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
305
+ langtrace_python_sdk-3.8.7.dist-info/entry_points.txt,sha256=1_b9-qvf2fE7uQNZcbUei9vLpFZBbbh9LrtGw95ssAo,70
306
+ langtrace_python_sdk-3.8.7.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
307
+ langtrace_python_sdk-3.8.7.dist-info/RECORD,,